1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the PPCISelLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PPCISelLowering.h"
15 #include "MCTargetDesc/PPCPredicates.h"
16 #include "PPC.h"
17 #include "PPCCCState.h"
18 #include "PPCCallingConv.h"
19 #include "PPCFrameLowering.h"
20 #include "PPCInstrInfo.h"
21 #include "PPCMachineFunctionInfo.h"
22 #include "PPCPerfectShuffle.h"
23 #include "PPCRegisterInfo.h"
24 #include "PPCSubtarget.h"
25 #include "PPCTargetMachine.h"
26 #include "llvm/ADT/APFloat.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/None.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/StringSwitch.h"
38 #include "llvm/CodeGen/CallingConvLower.h"
39 #include "llvm/CodeGen/ISDOpcodes.h"
40 #include "llvm/CodeGen/MachineBasicBlock.h"
41 #include "llvm/CodeGen/MachineFrameInfo.h"
42 #include "llvm/CodeGen/MachineFunction.h"
43 #include "llvm/CodeGen/MachineInstr.h"
44 #include "llvm/CodeGen/MachineInstrBuilder.h"
45 #include "llvm/CodeGen/MachineJumpTableInfo.h"
46 #include "llvm/CodeGen/MachineLoopInfo.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/MachineValueType.h"
51 #include "llvm/CodeGen/RuntimeLibcalls.h"
52 #include "llvm/CodeGen/SelectionDAG.h"
53 #include "llvm/CodeGen/SelectionDAGNodes.h"
54 #include "llvm/CodeGen/TargetInstrInfo.h"
55 #include "llvm/CodeGen/TargetLowering.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallSite.h"
59 #include "llvm/IR/CallingConv.h"
60 #include "llvm/IR/Constant.h"
61 #include "llvm/IR/Constants.h"
62 #include "llvm/IR/DataLayout.h"
63 #include "llvm/IR/DebugLoc.h"
64 #include "llvm/IR/DerivedTypes.h"
65 #include "llvm/IR/Function.h"
66 #include "llvm/IR/GlobalValue.h"
67 #include "llvm/IR/IRBuilder.h"
68 #include "llvm/IR/Instructions.h"
69 #include "llvm/IR/Intrinsics.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCExpr.h"
75 #include "llvm/MC/MCRegisterInfo.h"
76 #include "llvm/Support/AtomicOrdering.h"
77 #include "llvm/Support/BranchProbability.h"
78 #include "llvm/Support/Casting.h"
79 #include "llvm/Support/CodeGen.h"
80 #include "llvm/Support/CommandLine.h"
81 #include "llvm/Support/Compiler.h"
82 #include "llvm/Support/Debug.h"
83 #include "llvm/Support/ErrorHandling.h"
84 #include "llvm/Support/Format.h"
85 #include "llvm/Support/KnownBits.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/raw_ostream.h"
88 #include "llvm/Target/TargetMachine.h"
89 #include "llvm/Target/TargetOptions.h"
90 #include <algorithm>
91 #include <cassert>
92 #include <cstdint>
93 #include <iterator>
94 #include <list>
95 #include <utility>
96 #include <vector>
97 
98 using namespace llvm;
99 
100 #define DEBUG_TYPE "ppc-lowering"
101 
102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
104 
105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
107 
108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
110 
111 static cl::opt<bool> DisableSCO("disable-ppc-sco",
112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
113 
114 STATISTIC(NumTailCalls, "Number of tail calls");
115 STATISTIC(NumSiblingCalls, "Number of sibling calls");
116 
117 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
118 
119 // FIXME: Remove this once the bug has been fixed!
120 extern cl::opt<bool> ANDIGlueBug;
121 
122 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
123                                      const PPCSubtarget &STI)
124     : TargetLowering(TM), Subtarget(STI) {
125   // Use _setjmp/_longjmp instead of setjmp/longjmp.
126   setUseUnderscoreSetJmp(true);
127   setUseUnderscoreLongJmp(true);
128 
129   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
130   // arguments are at least 4/8 bytes aligned.
131   bool isPPC64 = Subtarget.isPPC64();
132   setMinStackArgumentAlignment(isPPC64 ? 8:4);
133 
134   // Set up the register classes.
135   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
136   if (!useSoftFloat()) {
137     addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
138     addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
139   }
140 
141   // Match BITREVERSE to customized fast code sequence in the td file.
142   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
143   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
144 
145   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
146   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
147 
148   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
149   for (MVT VT : MVT::integer_valuetypes()) {
150     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
151     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
152   }
153 
154   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
155 
156   // PowerPC has pre-inc load and store's.
157   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
158   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
159   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
160   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
161   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
162   setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
163   setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
164   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
165   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
166   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
167   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
168   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
169   setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
170   setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
171 
172   if (Subtarget.useCRBits()) {
173     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
174 
175     if (isPPC64 || Subtarget.hasFPCVT()) {
176       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
177       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
178                          isPPC64 ? MVT::i64 : MVT::i32);
179       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
180       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
181                         isPPC64 ? MVT::i64 : MVT::i32);
182     } else {
183       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
184       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
185     }
186 
187     // PowerPC does not support direct load/store of condition registers.
188     setOperationAction(ISD::LOAD, MVT::i1, Custom);
189     setOperationAction(ISD::STORE, MVT::i1, Custom);
190 
191     // FIXME: Remove this once the ANDI glue bug is fixed:
192     if (ANDIGlueBug)
193       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
194 
195     for (MVT VT : MVT::integer_valuetypes()) {
196       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
197       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
198       setTruncStoreAction(VT, MVT::i1, Expand);
199     }
200 
201     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
202   }
203 
204   // This is used in the ppcf128->int sequence.  Note it has different semantics
205   // from FP_ROUND:  that rounds to nearest, this rounds to zero.
206   setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
207 
208   // We do not currently implement these libm ops for PowerPC.
209   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
210   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
211   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
212   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
213   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
214   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
215 
216   // PowerPC has no SREM/UREM instructions unless we are on P9
217   // On P9 we may use a hardware instruction to compute the remainder.
218   // The instructions are not legalized directly because in the cases where the
219   // result of both the remainder and the division is required it is more
220   // efficient to compute the remainder from the result of the division rather
221   // than use the remainder instruction.
222   if (Subtarget.isISA3_0()) {
223     setOperationAction(ISD::SREM, MVT::i32, Custom);
224     setOperationAction(ISD::UREM, MVT::i32, Custom);
225     setOperationAction(ISD::SREM, MVT::i64, Custom);
226     setOperationAction(ISD::UREM, MVT::i64, Custom);
227   } else {
228     setOperationAction(ISD::SREM, MVT::i32, Expand);
229     setOperationAction(ISD::UREM, MVT::i32, Expand);
230     setOperationAction(ISD::SREM, MVT::i64, Expand);
231     setOperationAction(ISD::UREM, MVT::i64, Expand);
232   }
233 
234   if (Subtarget.hasP9Vector()) {
235     setOperationAction(ISD::ABS, MVT::v4i32, Legal);
236     setOperationAction(ISD::ABS, MVT::v8i16, Legal);
237     setOperationAction(ISD::ABS, MVT::v16i8, Legal);
238   }
239 
240   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
241   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
242   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
243   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
244   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
245   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
246   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
247   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
248   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
249 
250   // We don't support sin/cos/sqrt/fmod/pow
251   setOperationAction(ISD::FSIN , MVT::f64, Expand);
252   setOperationAction(ISD::FCOS , MVT::f64, Expand);
253   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
254   setOperationAction(ISD::FREM , MVT::f64, Expand);
255   setOperationAction(ISD::FPOW , MVT::f64, Expand);
256   setOperationAction(ISD::FMA  , MVT::f64, Legal);
257   setOperationAction(ISD::FSIN , MVT::f32, Expand);
258   setOperationAction(ISD::FCOS , MVT::f32, Expand);
259   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
260   setOperationAction(ISD::FREM , MVT::f32, Expand);
261   setOperationAction(ISD::FPOW , MVT::f32, Expand);
262   setOperationAction(ISD::FMA  , MVT::f32, Legal);
263 
264   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
265 
266   // If we're enabling GP optimizations, use hardware square root
267   if (!Subtarget.hasFSQRT() &&
268       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
269         Subtarget.hasFRE()))
270     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
271 
272   if (!Subtarget.hasFSQRT() &&
273       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
274         Subtarget.hasFRES()))
275     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
276 
277   if (Subtarget.hasFCPSGN()) {
278     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
279     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
280   } else {
281     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
282     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
283   }
284 
285   if (Subtarget.hasFPRND()) {
286     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
287     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
288     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
289     setOperationAction(ISD::FROUND, MVT::f64, Legal);
290 
291     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
292     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
293     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
294     setOperationAction(ISD::FROUND, MVT::f32, Legal);
295   }
296 
297   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
298   // to speed up scalar BSWAP64.
299   // CTPOP or CTTZ were introduced in P8/P9 respectivelly
300   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
301   if (Subtarget.isISA3_0()) {
302     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
303     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
304     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
305   } else {
306     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
307     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
308     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
309   }
310 
311   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
312     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
313     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
314   } else {
315     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
316     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
317   }
318 
319   // PowerPC does not have ROTR
320   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
321   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
322 
323   if (!Subtarget.useCRBits()) {
324     // PowerPC does not have Select
325     setOperationAction(ISD::SELECT, MVT::i32, Expand);
326     setOperationAction(ISD::SELECT, MVT::i64, Expand);
327     setOperationAction(ISD::SELECT, MVT::f32, Expand);
328     setOperationAction(ISD::SELECT, MVT::f64, Expand);
329   }
330 
331   // PowerPC wants to turn select_cc of FP into fsel when possible.
332   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
333   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
334 
335   // PowerPC wants to optimize integer setcc a bit
336   if (!Subtarget.useCRBits())
337     setOperationAction(ISD::SETCC, MVT::i32, Custom);
338 
339   // PowerPC does not have BRCOND which requires SetCC
340   if (!Subtarget.useCRBits())
341     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
342 
343   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
344 
345   // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
346   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
347 
348   // PowerPC does not have [U|S]INT_TO_FP
349   setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
350   setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
351 
352   if (Subtarget.hasDirectMove() && isPPC64) {
353     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
354     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
355     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
356     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
357   } else {
358     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
359     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
360     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
361     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
362   }
363 
364   // We cannot sextinreg(i1).  Expand to shifts.
365   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
366 
367   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
368   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
369   // support continuation, user-level threading, and etc.. As a result, no
370   // other SjLj exception interfaces are implemented and please don't build
371   // your own exception handling based on them.
372   // LLVM/Clang supports zero-cost DWARF exception handling.
373   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
374   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
375 
376   // We want to legalize GlobalAddress and ConstantPool nodes into the
377   // appropriate instructions to materialize the address.
378   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
379   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
380   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
381   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
382   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
383   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
384   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
385   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
386   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
387   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
388 
389   // TRAP is legal.
390   setOperationAction(ISD::TRAP, MVT::Other, Legal);
391 
392   // TRAMPOLINE is custom lowered.
393   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
394   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
395 
396   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
397   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
398 
399   if (Subtarget.isSVR4ABI()) {
400     if (isPPC64) {
401       // VAARG always uses double-word chunks, so promote anything smaller.
402       setOperationAction(ISD::VAARG, MVT::i1, Promote);
403       AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
404       setOperationAction(ISD::VAARG, MVT::i8, Promote);
405       AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
406       setOperationAction(ISD::VAARG, MVT::i16, Promote);
407       AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
408       setOperationAction(ISD::VAARG, MVT::i32, Promote);
409       AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
410       setOperationAction(ISD::VAARG, MVT::Other, Expand);
411     } else {
412       // VAARG is custom lowered with the 32-bit SVR4 ABI.
413       setOperationAction(ISD::VAARG, MVT::Other, Custom);
414       setOperationAction(ISD::VAARG, MVT::i64, Custom);
415     }
416   } else
417     setOperationAction(ISD::VAARG, MVT::Other, Expand);
418 
419   if (Subtarget.isSVR4ABI() && !isPPC64)
420     // VACOPY is custom lowered with the 32-bit SVR4 ABI.
421     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
422   else
423     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
424 
425   // Use the default implementation.
426   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
427   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
428   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
429   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
430   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
431   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
432   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
433   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
434   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
435 
436   // We want to custom lower some of our intrinsics.
437   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
438 
439   // To handle counter-based loop conditions.
440   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
441 
442   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
443   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
444   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
445   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
446 
447   // Comparisons that require checking two conditions.
448   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
449   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
450   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
451   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
452   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
453   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
454   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
455   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
456   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
457   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
458   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
459   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
460 
461   if (Subtarget.has64BitSupport()) {
462     // They also have instructions for converting between i64 and fp.
463     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
464     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
465     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
466     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
467     // This is just the low 32 bits of a (signed) fp->i64 conversion.
468     // We cannot do this with Promote because i64 is not a legal type.
469     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
470 
471     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
472       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
473   } else {
474     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
475     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
476   }
477 
478   // With the instructions enabled under FPCVT, we can do everything.
479   if (Subtarget.hasFPCVT()) {
480     if (Subtarget.has64BitSupport()) {
481       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
482       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
483       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
484       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
485     }
486 
487     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
488     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
489     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
490     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
491   }
492 
493   if (Subtarget.use64BitRegs()) {
494     // 64-bit PowerPC implementations can support i64 types directly
495     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
496     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
497     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
498     // 64-bit PowerPC wants to expand i128 shifts itself.
499     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
500     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
501     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
502   } else {
503     // 32-bit PowerPC wants to expand i64 shifts itself.
504     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
505     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
506     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
507   }
508 
509   if (Subtarget.hasAltivec()) {
510     // First set operation action for all vector types to expand. Then we
511     // will selectively turn on ones that can be effectively codegen'd.
512     for (MVT VT : MVT::vector_valuetypes()) {
513       // add/sub are legal for all supported vector VT's.
514       setOperationAction(ISD::ADD, VT, Legal);
515       setOperationAction(ISD::SUB, VT, Legal);
516 
517       // Vector instructions introduced in P8
518       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
519         setOperationAction(ISD::CTPOP, VT, Legal);
520         setOperationAction(ISD::CTLZ, VT, Legal);
521       }
522       else {
523         setOperationAction(ISD::CTPOP, VT, Expand);
524         setOperationAction(ISD::CTLZ, VT, Expand);
525       }
526 
527       // Vector instructions introduced in P9
528       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
529         setOperationAction(ISD::CTTZ, VT, Legal);
530       else
531         setOperationAction(ISD::CTTZ, VT, Expand);
532 
533       // We promote all shuffles to v16i8.
534       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
535       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
536 
537       // We promote all non-typed operations to v4i32.
538       setOperationAction(ISD::AND   , VT, Promote);
539       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
540       setOperationAction(ISD::OR    , VT, Promote);
541       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
542       setOperationAction(ISD::XOR   , VT, Promote);
543       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
544       setOperationAction(ISD::LOAD  , VT, Promote);
545       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
546       setOperationAction(ISD::SELECT, VT, Promote);
547       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
548       setOperationAction(ISD::SELECT_CC, VT, Promote);
549       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
550       setOperationAction(ISD::STORE, VT, Promote);
551       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
552 
553       // No other operations are legal.
554       setOperationAction(ISD::MUL , VT, Expand);
555       setOperationAction(ISD::SDIV, VT, Expand);
556       setOperationAction(ISD::SREM, VT, Expand);
557       setOperationAction(ISD::UDIV, VT, Expand);
558       setOperationAction(ISD::UREM, VT, Expand);
559       setOperationAction(ISD::FDIV, VT, Expand);
560       setOperationAction(ISD::FREM, VT, Expand);
561       setOperationAction(ISD::FNEG, VT, Expand);
562       setOperationAction(ISD::FSQRT, VT, Expand);
563       setOperationAction(ISD::FLOG, VT, Expand);
564       setOperationAction(ISD::FLOG10, VT, Expand);
565       setOperationAction(ISD::FLOG2, VT, Expand);
566       setOperationAction(ISD::FEXP, VT, Expand);
567       setOperationAction(ISD::FEXP2, VT, Expand);
568       setOperationAction(ISD::FSIN, VT, Expand);
569       setOperationAction(ISD::FCOS, VT, Expand);
570       setOperationAction(ISD::FABS, VT, Expand);
571       setOperationAction(ISD::FFLOOR, VT, Expand);
572       setOperationAction(ISD::FCEIL,  VT, Expand);
573       setOperationAction(ISD::FTRUNC, VT, Expand);
574       setOperationAction(ISD::FRINT,  VT, Expand);
575       setOperationAction(ISD::FNEARBYINT, VT, Expand);
576       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
577       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
578       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
579       setOperationAction(ISD::MULHU, VT, Expand);
580       setOperationAction(ISD::MULHS, VT, Expand);
581       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
582       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
583       setOperationAction(ISD::UDIVREM, VT, Expand);
584       setOperationAction(ISD::SDIVREM, VT, Expand);
585       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
586       setOperationAction(ISD::FPOW, VT, Expand);
587       setOperationAction(ISD::BSWAP, VT, Expand);
588       setOperationAction(ISD::VSELECT, VT, Expand);
589       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
590       setOperationAction(ISD::ROTL, VT, Expand);
591       setOperationAction(ISD::ROTR, VT, Expand);
592 
593       for (MVT InnerVT : MVT::vector_valuetypes()) {
594         setTruncStoreAction(VT, InnerVT, Expand);
595         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
596         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
597         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
598       }
599     }
600 
601     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
602     // with merges, splats, etc.
603     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
604 
605     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
606     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
607     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
608     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
609     setOperationAction(ISD::SELECT, MVT::v4i32,
610                        Subtarget.useCRBits() ? Legal : Expand);
611     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
612     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
613     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
614     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
615     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
616     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
617     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
618     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
619     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
620 
621     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
622     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
623     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
624     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
625 
626     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
627     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
628 
629     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
630       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
631       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
632     }
633 
634     if (Subtarget.hasP8Altivec())
635       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
636     else
637       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
638 
639     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
640     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
641 
642     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
643     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
644 
645     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
646     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
647     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
648     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
649 
650     // Altivec does not contain unordered floating-point compare instructions
651     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
652     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
653     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
654     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
655 
656     if (Subtarget.hasVSX()) {
657       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
658       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
659       if (Subtarget.hasP8Vector()) {
660         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
661         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
662       }
663       if (Subtarget.hasDirectMove() && isPPC64) {
664         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
665         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
666         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
667         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
668         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
669         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
670         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
671         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
672       }
673       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
674 
675       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
676       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
677       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
678       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
679       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
680 
681       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
682 
683       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
684       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
685 
686       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
687       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
688 
689       setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
690       setOperationAction(ISD::VSELECT, MVT::v8i16, Legal);
691       setOperationAction(ISD::VSELECT, MVT::v4i32, Legal);
692       setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
693       setOperationAction(ISD::VSELECT, MVT::v2f64, Legal);
694 
695       // Share the Altivec comparison restrictions.
696       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
697       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
698       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
699       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
700 
701       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
702       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
703 
704       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
705 
706       if (Subtarget.hasP8Vector())
707         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
708 
709       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
710 
711       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
712       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
713       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
714 
715       if (Subtarget.hasP8Altivec()) {
716         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
717         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
718         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
719 
720         // 128 bit shifts can be accomplished via 3 instructions for SHL and
721         // SRL, but not for SRA because of the instructions available:
722         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
723         // doing
724         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
725         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
726         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
727 
728         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
729       }
730       else {
731         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
732         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
733         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
734 
735         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
736 
737         // VSX v2i64 only supports non-arithmetic operations.
738         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
739         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
740       }
741 
742       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
743       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
744       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
745       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
746 
747       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
748 
749       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
750       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
751       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
752       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
753 
754       // Vector operation legalization checks the result type of
755       // SIGN_EXTEND_INREG, overall legalization checks the inner type.
756       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
757       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
758       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
759       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
760 
761       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
762       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
763       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
764       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
765 
766       if (Subtarget.hasDirectMove())
767         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
768       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
769 
770       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
771     }
772 
773     if (Subtarget.hasP8Altivec()) {
774       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
775       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
776     }
777 
778     if (Subtarget.hasP9Vector()) {
779       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
780       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
781 
782       // 128 bit shifts can be accomplished via 3 instructions for SHL and
783       // SRL, but not for SRA because of the instructions available:
784       // VS{RL} and VS{RL}O.
785       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
786       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
787       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
788     }
789 
790     if (Subtarget.hasP9Altivec()) {
791       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
792       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
793     }
794   }
795 
796   if (Subtarget.hasQPX()) {
797     setOperationAction(ISD::FADD, MVT::v4f64, Legal);
798     setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
799     setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
800     setOperationAction(ISD::FREM, MVT::v4f64, Expand);
801 
802     setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
803     setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
804 
805     setOperationAction(ISD::LOAD  , MVT::v4f64, Custom);
806     setOperationAction(ISD::STORE , MVT::v4f64, Custom);
807 
808     setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
809     setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
810 
811     if (!Subtarget.useCRBits())
812       setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
813     setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
814 
815     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
816     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
817     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
818     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
819     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
820     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
821     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
822 
823     setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
824     setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
825 
826     setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
827     setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand);
828     setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
829 
830     setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
831     setOperationAction(ISD::FABS , MVT::v4f64, Legal);
832     setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
833     setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
834     setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
835     setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
836     setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
837     setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
838     setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
839     setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
840 
841     setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
842     setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
843 
844     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
845     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
846 
847     addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
848 
849     setOperationAction(ISD::FADD, MVT::v4f32, Legal);
850     setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
851     setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
852     setOperationAction(ISD::FREM, MVT::v4f32, Expand);
853 
854     setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
855     setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
856 
857     setOperationAction(ISD::LOAD  , MVT::v4f32, Custom);
858     setOperationAction(ISD::STORE , MVT::v4f32, Custom);
859 
860     if (!Subtarget.useCRBits())
861       setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
862     setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
863 
864     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
865     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
866     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
867     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
868     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
869     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
870     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
871 
872     setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
873     setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
874 
875     setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
876     setOperationAction(ISD::FABS , MVT::v4f32, Legal);
877     setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
878     setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
879     setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
880     setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
881     setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
882     setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
883     setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
884     setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
885 
886     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
887     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
888 
889     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
890     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
891 
892     addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
893 
894     setOperationAction(ISD::AND , MVT::v4i1, Legal);
895     setOperationAction(ISD::OR , MVT::v4i1, Legal);
896     setOperationAction(ISD::XOR , MVT::v4i1, Legal);
897 
898     if (!Subtarget.useCRBits())
899       setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
900     setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
901 
902     setOperationAction(ISD::LOAD  , MVT::v4i1, Custom);
903     setOperationAction(ISD::STORE , MVT::v4i1, Custom);
904 
905     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
906     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
907     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
908     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
909     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
910     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
911     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
912 
913     setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
914     setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
915 
916     addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
917 
918     setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
919     setOperationAction(ISD::FCEIL,  MVT::v4f64, Legal);
920     setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
921     setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
922 
923     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
924     setOperationAction(ISD::FCEIL,  MVT::v4f32, Legal);
925     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
926     setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
927 
928     setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
929     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
930 
931     // These need to set FE_INEXACT, and so cannot be vectorized here.
932     setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
933     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
934 
935     if (TM.Options.UnsafeFPMath) {
936       setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
937       setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
938 
939       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
940       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
941     } else {
942       setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
943       setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
944 
945       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
946       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
947     }
948   }
949 
950   if (Subtarget.has64BitSupport())
951     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
952 
953   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
954 
955   if (!isPPC64) {
956     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
957     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
958   }
959 
960   setBooleanContents(ZeroOrOneBooleanContent);
961 
962   if (Subtarget.hasAltivec()) {
963     // Altivec instructions set fields to all zeros or all ones.
964     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
965   }
966 
967   if (!isPPC64) {
968     // These libcalls are not available in 32-bit.
969     setLibcallName(RTLIB::SHL_I128, nullptr);
970     setLibcallName(RTLIB::SRL_I128, nullptr);
971     setLibcallName(RTLIB::SRA_I128, nullptr);
972   }
973 
974   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
975 
976   // We have target-specific dag combine patterns for the following nodes:
977   setTargetDAGCombine(ISD::SHL);
978   setTargetDAGCombine(ISD::SRA);
979   setTargetDAGCombine(ISD::SRL);
980   setTargetDAGCombine(ISD::SINT_TO_FP);
981   setTargetDAGCombine(ISD::BUILD_VECTOR);
982   if (Subtarget.hasFPCVT())
983     setTargetDAGCombine(ISD::UINT_TO_FP);
984   setTargetDAGCombine(ISD::LOAD);
985   setTargetDAGCombine(ISD::STORE);
986   setTargetDAGCombine(ISD::BR_CC);
987   if (Subtarget.useCRBits())
988     setTargetDAGCombine(ISD::BRCOND);
989   setTargetDAGCombine(ISD::BSWAP);
990   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
991   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
992   setTargetDAGCombine(ISD::INTRINSIC_VOID);
993 
994   setTargetDAGCombine(ISD::SIGN_EXTEND);
995   setTargetDAGCombine(ISD::ZERO_EXTEND);
996   setTargetDAGCombine(ISD::ANY_EXTEND);
997 
998   if (Subtarget.useCRBits()) {
999     setTargetDAGCombine(ISD::TRUNCATE);
1000     setTargetDAGCombine(ISD::SETCC);
1001     setTargetDAGCombine(ISD::SELECT_CC);
1002   }
1003 
1004   // Use reciprocal estimates.
1005   if (TM.Options.UnsafeFPMath) {
1006     setTargetDAGCombine(ISD::FDIV);
1007     setTargetDAGCombine(ISD::FSQRT);
1008   }
1009 
1010   // Darwin long double math library functions have $LDBL128 appended.
1011   if (Subtarget.isDarwin()) {
1012     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
1013     setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
1014     setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
1015     setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
1016     setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
1017     setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
1018     setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
1019     setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
1020     setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
1021     setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
1022   }
1023 
1024   // With 32 condition bits, we don't need to sink (and duplicate) compares
1025   // aggressively in CodeGenPrep.
1026   if (Subtarget.useCRBits()) {
1027     setHasMultipleConditionRegisters();
1028     setJumpIsExpensive();
1029   }
1030 
1031   setMinFunctionAlignment(2);
1032   if (Subtarget.isDarwin())
1033     setPrefFunctionAlignment(4);
1034 
1035   switch (Subtarget.getDarwinDirective()) {
1036   default: break;
1037   case PPC::DIR_970:
1038   case PPC::DIR_A2:
1039   case PPC::DIR_E500mc:
1040   case PPC::DIR_E5500:
1041   case PPC::DIR_PWR4:
1042   case PPC::DIR_PWR5:
1043   case PPC::DIR_PWR5X:
1044   case PPC::DIR_PWR6:
1045   case PPC::DIR_PWR6X:
1046   case PPC::DIR_PWR7:
1047   case PPC::DIR_PWR8:
1048   case PPC::DIR_PWR9:
1049     setPrefFunctionAlignment(4);
1050     setPrefLoopAlignment(4);
1051     break;
1052   }
1053 
1054   if (Subtarget.enableMachineScheduler())
1055     setSchedulingPreference(Sched::Source);
1056   else
1057     setSchedulingPreference(Sched::Hybrid);
1058 
1059   computeRegisterProperties(STI.getRegisterInfo());
1060 
1061   // The Freescale cores do better with aggressive inlining of memcpy and
1062   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1063   if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc ||
1064       Subtarget.getDarwinDirective() == PPC::DIR_E5500) {
1065     MaxStoresPerMemset = 32;
1066     MaxStoresPerMemsetOptSize = 16;
1067     MaxStoresPerMemcpy = 32;
1068     MaxStoresPerMemcpyOptSize = 8;
1069     MaxStoresPerMemmove = 32;
1070     MaxStoresPerMemmoveOptSize = 8;
1071   } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) {
1072     // The A2 also benefits from (very) aggressive inlining of memcpy and
1073     // friends. The overhead of a the function call, even when warm, can be
1074     // over one hundred cycles.
1075     MaxStoresPerMemset = 128;
1076     MaxStoresPerMemcpy = 128;
1077     MaxStoresPerMemmove = 128;
1078     MaxLoadsPerMemcmp = 128;
1079   } else {
1080     MaxLoadsPerMemcmp = 8;
1081     MaxLoadsPerMemcmpOptSize = 4;
1082   }
1083 }
1084 
1085 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1086 /// the desired ByVal argument alignment.
1087 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
1088                              unsigned MaxMaxAlign) {
1089   if (MaxAlign == MaxMaxAlign)
1090     return;
1091   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1092     if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
1093       MaxAlign = 32;
1094     else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
1095       MaxAlign = 16;
1096   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1097     unsigned EltAlign = 0;
1098     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1099     if (EltAlign > MaxAlign)
1100       MaxAlign = EltAlign;
1101   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1102     for (auto *EltTy : STy->elements()) {
1103       unsigned EltAlign = 0;
1104       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1105       if (EltAlign > MaxAlign)
1106         MaxAlign = EltAlign;
1107       if (MaxAlign == MaxMaxAlign)
1108         break;
1109     }
1110   }
1111 }
1112 
1113 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1114 /// function arguments in the caller parameter area.
1115 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1116                                                   const DataLayout &DL) const {
1117   // Darwin passes everything on 4 byte boundary.
1118   if (Subtarget.isDarwin())
1119     return 4;
1120 
1121   // 16byte and wider vectors are passed on 16byte boundary.
1122   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1123   unsigned Align = Subtarget.isPPC64() ? 8 : 4;
1124   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1125     getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
1126   return Align;
1127 }
1128 
1129 bool PPCTargetLowering::useSoftFloat() const {
1130   return Subtarget.useSoftFloat();
1131 }
1132 
1133 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1134   switch ((PPCISD::NodeType)Opcode) {
1135   case PPCISD::FIRST_NUMBER:    break;
1136   case PPCISD::FSEL:            return "PPCISD::FSEL";
1137   case PPCISD::FCFID:           return "PPCISD::FCFID";
1138   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1139   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1140   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1141   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1142   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1143   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1144   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1145   case PPCISD::FRE:             return "PPCISD::FRE";
1146   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1147   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1148   case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
1149   case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
1150   case PPCISD::VPERM:           return "PPCISD::VPERM";
1151   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1152   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1153   case PPCISD::XXREVERSE:       return "PPCISD::XXREVERSE";
1154   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1155   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1156   case PPCISD::CMPB:            return "PPCISD::CMPB";
1157   case PPCISD::Hi:              return "PPCISD::Hi";
1158   case PPCISD::Lo:              return "PPCISD::Lo";
1159   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1160   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1161   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1162   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1163   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1164   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1165   case PPCISD::SRL:             return "PPCISD::SRL";
1166   case PPCISD::SRA:             return "PPCISD::SRA";
1167   case PPCISD::SHL:             return "PPCISD::SHL";
1168   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1169   case PPCISD::CALL:            return "PPCISD::CALL";
1170   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1171   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1172   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1173   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1174   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1175   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1176   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1177   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1178   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1179   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1180   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1181   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1182   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1183   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1184   case PPCISD::ANDIo_1_EQ_BIT:  return "PPCISD::ANDIo_1_EQ_BIT";
1185   case PPCISD::ANDIo_1_GT_BIT:  return "PPCISD::ANDIo_1_GT_BIT";
1186   case PPCISD::VCMP:            return "PPCISD::VCMP";
1187   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1188   case PPCISD::LBRX:            return "PPCISD::LBRX";
1189   case PPCISD::STBRX:           return "PPCISD::STBRX";
1190   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1191   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1192   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1193   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1194   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1195   case PPCISD::SExtVElems:      return "PPCISD::SExtVElems";
1196   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1197   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1198   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1199   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1200   case PPCISD::BDZ:             return "PPCISD::BDZ";
1201   case PPCISD::MFFS:            return "PPCISD::MFFS";
1202   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1203   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1204   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1205   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1206   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1207   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1208   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1209   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1210   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1211   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1212   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1213   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1214   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1215   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1216   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1217   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1218   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1219   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1220   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1221   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1222   case PPCISD::SC:              return "PPCISD::SC";
1223   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1224   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1225   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1226   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1227   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1228   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
1229   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
1230   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
1231   case PPCISD::QVESPLATI:       return "PPCISD::QVESPLATI";
1232   case PPCISD::QBFLT:           return "PPCISD::QBFLT";
1233   case PPCISD::QVLFSb:          return "PPCISD::QVLFSb";
1234   }
1235   return nullptr;
1236 }
1237 
1238 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1239                                           EVT VT) const {
1240   if (!VT.isVector())
1241     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1242 
1243   if (Subtarget.hasQPX())
1244     return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1245 
1246   return VT.changeVectorElementTypeToInteger();
1247 }
1248 
1249 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1250   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1251   return true;
1252 }
1253 
1254 //===----------------------------------------------------------------------===//
1255 // Node matching predicates, for use by the tblgen matching code.
1256 //===----------------------------------------------------------------------===//
1257 
1258 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1259 static bool isFloatingPointZero(SDValue Op) {
1260   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1261     return CFP->getValueAPF().isZero();
1262   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1263     // Maybe this has already been legalized into the constant pool?
1264     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1265       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1266         return CFP->getValueAPF().isZero();
1267   }
1268   return false;
1269 }
1270 
1271 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1272 /// true if Op is undef or if it matches the specified value.
1273 static bool isConstantOrUndef(int Op, int Val) {
1274   return Op < 0 || Op == Val;
1275 }
1276 
1277 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1278 /// VPKUHUM instruction.
1279 /// The ShuffleKind distinguishes between big-endian operations with
1280 /// two different inputs (0), either-endian operations with two identical
1281 /// inputs (1), and little-endian operations with two different inputs (2).
1282 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1283 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1284                                SelectionDAG &DAG) {
1285   bool IsLE = DAG.getDataLayout().isLittleEndian();
1286   if (ShuffleKind == 0) {
1287     if (IsLE)
1288       return false;
1289     for (unsigned i = 0; i != 16; ++i)
1290       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1291         return false;
1292   } else if (ShuffleKind == 2) {
1293     if (!IsLE)
1294       return false;
1295     for (unsigned i = 0; i != 16; ++i)
1296       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1297         return false;
1298   } else if (ShuffleKind == 1) {
1299     unsigned j = IsLE ? 0 : 1;
1300     for (unsigned i = 0; i != 8; ++i)
1301       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1302           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1303         return false;
1304   }
1305   return true;
1306 }
1307 
1308 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1309 /// VPKUWUM instruction.
1310 /// The ShuffleKind distinguishes between big-endian operations with
1311 /// two different inputs (0), either-endian operations with two identical
1312 /// inputs (1), and little-endian operations with two different inputs (2).
1313 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1314 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1315                                SelectionDAG &DAG) {
1316   bool IsLE = DAG.getDataLayout().isLittleEndian();
1317   if (ShuffleKind == 0) {
1318     if (IsLE)
1319       return false;
1320     for (unsigned i = 0; i != 16; i += 2)
1321       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1322           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1323         return false;
1324   } else if (ShuffleKind == 2) {
1325     if (!IsLE)
1326       return false;
1327     for (unsigned i = 0; i != 16; i += 2)
1328       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1329           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1330         return false;
1331   } else if (ShuffleKind == 1) {
1332     unsigned j = IsLE ? 0 : 2;
1333     for (unsigned i = 0; i != 8; i += 2)
1334       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1335           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1336           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1337           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1338         return false;
1339   }
1340   return true;
1341 }
1342 
1343 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1344 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1345 /// current subtarget.
1346 ///
1347 /// The ShuffleKind distinguishes between big-endian operations with
1348 /// two different inputs (0), either-endian operations with two identical
1349 /// inputs (1), and little-endian operations with two different inputs (2).
1350 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1351 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1352                                SelectionDAG &DAG) {
1353   const PPCSubtarget& Subtarget =
1354     static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1355   if (!Subtarget.hasP8Vector())
1356     return false;
1357 
1358   bool IsLE = DAG.getDataLayout().isLittleEndian();
1359   if (ShuffleKind == 0) {
1360     if (IsLE)
1361       return false;
1362     for (unsigned i = 0; i != 16; i += 4)
1363       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1364           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1365           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1366           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1367         return false;
1368   } else if (ShuffleKind == 2) {
1369     if (!IsLE)
1370       return false;
1371     for (unsigned i = 0; i != 16; i += 4)
1372       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1373           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1374           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1375           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1376         return false;
1377   } else if (ShuffleKind == 1) {
1378     unsigned j = IsLE ? 0 : 4;
1379     for (unsigned i = 0; i != 8; i += 4)
1380       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1381           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1382           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1383           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1384           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1385           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1386           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1387           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1388         return false;
1389   }
1390   return true;
1391 }
1392 
1393 /// isVMerge - Common function, used to match vmrg* shuffles.
1394 ///
1395 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1396                      unsigned LHSStart, unsigned RHSStart) {
1397   if (N->getValueType(0) != MVT::v16i8)
1398     return false;
1399   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1400          "Unsupported merge size!");
1401 
1402   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1403     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1404       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1405                              LHSStart+j+i*UnitSize) ||
1406           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1407                              RHSStart+j+i*UnitSize))
1408         return false;
1409     }
1410   return true;
1411 }
1412 
1413 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1414 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1415 /// The ShuffleKind distinguishes between big-endian merges with two
1416 /// different inputs (0), either-endian merges with two identical inputs (1),
1417 /// and little-endian merges with two different inputs (2).  For the latter,
1418 /// the input operands are swapped (see PPCInstrAltivec.td).
1419 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1420                              unsigned ShuffleKind, SelectionDAG &DAG) {
1421   if (DAG.getDataLayout().isLittleEndian()) {
1422     if (ShuffleKind == 1) // unary
1423       return isVMerge(N, UnitSize, 0, 0);
1424     else if (ShuffleKind == 2) // swapped
1425       return isVMerge(N, UnitSize, 0, 16);
1426     else
1427       return false;
1428   } else {
1429     if (ShuffleKind == 1) // unary
1430       return isVMerge(N, UnitSize, 8, 8);
1431     else if (ShuffleKind == 0) // normal
1432       return isVMerge(N, UnitSize, 8, 24);
1433     else
1434       return false;
1435   }
1436 }
1437 
1438 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1439 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1440 /// The ShuffleKind distinguishes between big-endian merges with two
1441 /// different inputs (0), either-endian merges with two identical inputs (1),
1442 /// and little-endian merges with two different inputs (2).  For the latter,
1443 /// the input operands are swapped (see PPCInstrAltivec.td).
1444 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1445                              unsigned ShuffleKind, SelectionDAG &DAG) {
1446   if (DAG.getDataLayout().isLittleEndian()) {
1447     if (ShuffleKind == 1) // unary
1448       return isVMerge(N, UnitSize, 8, 8);
1449     else if (ShuffleKind == 2) // swapped
1450       return isVMerge(N, UnitSize, 8, 24);
1451     else
1452       return false;
1453   } else {
1454     if (ShuffleKind == 1) // unary
1455       return isVMerge(N, UnitSize, 0, 0);
1456     else if (ShuffleKind == 0) // normal
1457       return isVMerge(N, UnitSize, 0, 16);
1458     else
1459       return false;
1460   }
1461 }
1462 
1463 /**
1464  * \brief Common function used to match vmrgew and vmrgow shuffles
1465  *
1466  * The indexOffset determines whether to look for even or odd words in
1467  * the shuffle mask. This is based on the of the endianness of the target
1468  * machine.
1469  *   - Little Endian:
1470  *     - Use offset of 0 to check for odd elements
1471  *     - Use offset of 4 to check for even elements
1472  *   - Big Endian:
1473  *     - Use offset of 0 to check for even elements
1474  *     - Use offset of 4 to check for odd elements
1475  * A detailed description of the vector element ordering for little endian and
1476  * big endian can be found at
1477  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1478  * Targeting your applications - what little endian and big endian IBM XL C/C++
1479  * compiler differences mean to you
1480  *
1481  * The mask to the shuffle vector instruction specifies the indices of the
1482  * elements from the two input vectors to place in the result. The elements are
1483  * numbered in array-access order, starting with the first vector. These vectors
1484  * are always of type v16i8, thus each vector will contain 16 elements of size
1485  * 8. More info on the shuffle vector can be found in the
1486  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1487  * Language Reference.
1488  *
1489  * The RHSStartValue indicates whether the same input vectors are used (unary)
1490  * or two different input vectors are used, based on the following:
1491  *   - If the instruction uses the same vector for both inputs, the range of the
1492  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1493  *     be 0.
1494  *   - If the instruction has two different vectors then the range of the
1495  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1496  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1497  *     to 31 specify elements in the second vector).
1498  *
1499  * \param[in] N The shuffle vector SD Node to analyze
1500  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1501  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1502  * vector to the shuffle_vector instruction
1503  * \return true iff this shuffle vector represents an even or odd word merge
1504  */
1505 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1506                      unsigned RHSStartValue) {
1507   if (N->getValueType(0) != MVT::v16i8)
1508     return false;
1509 
1510   for (unsigned i = 0; i < 2; ++i)
1511     for (unsigned j = 0; j < 4; ++j)
1512       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1513                              i*RHSStartValue+j+IndexOffset) ||
1514           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1515                              i*RHSStartValue+j+IndexOffset+8))
1516         return false;
1517   return true;
1518 }
1519 
1520 /**
1521  * \brief Determine if the specified shuffle mask is suitable for the vmrgew or
1522  * vmrgow instructions.
1523  *
1524  * \param[in] N The shuffle vector SD Node to analyze
1525  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1526  * \param[in] ShuffleKind Identify the type of merge:
1527  *   - 0 = big-endian merge with two different inputs;
1528  *   - 1 = either-endian merge with two identical inputs;
1529  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1530  *     little-endian merges).
1531  * \param[in] DAG The current SelectionDAG
1532  * \return true iff this shuffle mask
1533  */
1534 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1535                               unsigned ShuffleKind, SelectionDAG &DAG) {
1536   if (DAG.getDataLayout().isLittleEndian()) {
1537     unsigned indexOffset = CheckEven ? 4 : 0;
1538     if (ShuffleKind == 1) // Unary
1539       return isVMerge(N, indexOffset, 0);
1540     else if (ShuffleKind == 2) // swapped
1541       return isVMerge(N, indexOffset, 16);
1542     else
1543       return false;
1544   }
1545   else {
1546     unsigned indexOffset = CheckEven ? 0 : 4;
1547     if (ShuffleKind == 1) // Unary
1548       return isVMerge(N, indexOffset, 0);
1549     else if (ShuffleKind == 0) // Normal
1550       return isVMerge(N, indexOffset, 16);
1551     else
1552       return false;
1553   }
1554   return false;
1555 }
1556 
1557 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1558 /// amount, otherwise return -1.
1559 /// The ShuffleKind distinguishes between big-endian operations with two
1560 /// different inputs (0), either-endian operations with two identical inputs
1561 /// (1), and little-endian operations with two different inputs (2).  For the
1562 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1563 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1564                              SelectionDAG &DAG) {
1565   if (N->getValueType(0) != MVT::v16i8)
1566     return -1;
1567 
1568   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1569 
1570   // Find the first non-undef value in the shuffle mask.
1571   unsigned i;
1572   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1573     /*search*/;
1574 
1575   if (i == 16) return -1;  // all undef.
1576 
1577   // Otherwise, check to see if the rest of the elements are consecutively
1578   // numbered from this value.
1579   unsigned ShiftAmt = SVOp->getMaskElt(i);
1580   if (ShiftAmt < i) return -1;
1581 
1582   ShiftAmt -= i;
1583   bool isLE = DAG.getDataLayout().isLittleEndian();
1584 
1585   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1586     // Check the rest of the elements to see if they are consecutive.
1587     for (++i; i != 16; ++i)
1588       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1589         return -1;
1590   } else if (ShuffleKind == 1) {
1591     // Check the rest of the elements to see if they are consecutive.
1592     for (++i; i != 16; ++i)
1593       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1594         return -1;
1595   } else
1596     return -1;
1597 
1598   if (isLE)
1599     ShiftAmt = 16 - ShiftAmt;
1600 
1601   return ShiftAmt;
1602 }
1603 
1604 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1605 /// specifies a splat of a single element that is suitable for input to
1606 /// VSPLTB/VSPLTH/VSPLTW.
1607 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1608   assert(N->getValueType(0) == MVT::v16i8 &&
1609          (EltSize == 1 || EltSize == 2 || EltSize == 4));
1610 
1611   // The consecutive indices need to specify an element, not part of two
1612   // different elements.  So abandon ship early if this isn't the case.
1613   if (N->getMaskElt(0) % EltSize != 0)
1614     return false;
1615 
1616   // This is a splat operation if each element of the permute is the same, and
1617   // if the value doesn't reference the second vector.
1618   unsigned ElementBase = N->getMaskElt(0);
1619 
1620   // FIXME: Handle UNDEF elements too!
1621   if (ElementBase >= 16)
1622     return false;
1623 
1624   // Check that the indices are consecutive, in the case of a multi-byte element
1625   // splatted with a v16i8 mask.
1626   for (unsigned i = 1; i != EltSize; ++i)
1627     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1628       return false;
1629 
1630   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1631     if (N->getMaskElt(i) < 0) continue;
1632     for (unsigned j = 0; j != EltSize; ++j)
1633       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1634         return false;
1635   }
1636   return true;
1637 }
1638 
1639 /// Check that the mask is shuffling N byte elements. Within each N byte
1640 /// element of the mask, the indices could be either in increasing or
1641 /// decreasing order as long as they are consecutive.
1642 /// \param[in] N the shuffle vector SD Node to analyze
1643 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1644 /// Word/DoubleWord/QuadWord).
1645 /// \param[in] StepLen the delta indices number among the N byte element, if
1646 /// the mask is in increasing/decreasing order then it is 1/-1.
1647 /// \return true iff the mask is shuffling N byte elements.
1648 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1649                                    int StepLen) {
1650   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1651          "Unexpected element width.");
1652   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1653 
1654   unsigned NumOfElem = 16 / Width;
1655   unsigned MaskVal[16]; //  Width is never greater than 16
1656   for (unsigned i = 0; i < NumOfElem; ++i) {
1657     MaskVal[0] = N->getMaskElt(i * Width);
1658     if ((StepLen == 1) && (MaskVal[0] % Width)) {
1659       return false;
1660     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1661       return false;
1662     }
1663 
1664     for (unsigned int j = 1; j < Width; ++j) {
1665       MaskVal[j] = N->getMaskElt(i * Width + j);
1666       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1667         return false;
1668       }
1669     }
1670   }
1671 
1672   return true;
1673 }
1674 
1675 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1676                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1677   if (!isNByteElemShuffleMask(N, 4, 1))
1678     return false;
1679 
1680   // Now we look at mask elements 0,4,8,12
1681   unsigned M0 = N->getMaskElt(0) / 4;
1682   unsigned M1 = N->getMaskElt(4) / 4;
1683   unsigned M2 = N->getMaskElt(8) / 4;
1684   unsigned M3 = N->getMaskElt(12) / 4;
1685   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1686   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1687 
1688   // Below, let H and L be arbitrary elements of the shuffle mask
1689   // where H is in the range [4,7] and L is in the range [0,3].
1690   // H, 1, 2, 3 or L, 5, 6, 7
1691   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1692       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1693     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1694     InsertAtByte = IsLE ? 12 : 0;
1695     Swap = M0 < 4;
1696     return true;
1697   }
1698   // 0, H, 2, 3 or 4, L, 6, 7
1699   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1700       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1701     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1702     InsertAtByte = IsLE ? 8 : 4;
1703     Swap = M1 < 4;
1704     return true;
1705   }
1706   // 0, 1, H, 3 or 4, 5, L, 7
1707   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1708       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1709     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1710     InsertAtByte = IsLE ? 4 : 8;
1711     Swap = M2 < 4;
1712     return true;
1713   }
1714   // 0, 1, 2, H or 4, 5, 6, L
1715   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1716       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1717     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1718     InsertAtByte = IsLE ? 0 : 12;
1719     Swap = M3 < 4;
1720     return true;
1721   }
1722 
1723   // If both vector operands for the shuffle are the same vector, the mask will
1724   // contain only elements from the first one and the second one will be undef.
1725   if (N->getOperand(1).isUndef()) {
1726     ShiftElts = 0;
1727     Swap = true;
1728     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1729     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1730       InsertAtByte = IsLE ? 12 : 0;
1731       return true;
1732     }
1733     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1734       InsertAtByte = IsLE ? 8 : 4;
1735       return true;
1736     }
1737     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1738       InsertAtByte = IsLE ? 4 : 8;
1739       return true;
1740     }
1741     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1742       InsertAtByte = IsLE ? 0 : 12;
1743       return true;
1744     }
1745   }
1746 
1747   return false;
1748 }
1749 
1750 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1751                                bool &Swap, bool IsLE) {
1752   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1753   // Ensure each byte index of the word is consecutive.
1754   if (!isNByteElemShuffleMask(N, 4, 1))
1755     return false;
1756 
1757   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
1758   unsigned M0 = N->getMaskElt(0) / 4;
1759   unsigned M1 = N->getMaskElt(4) / 4;
1760   unsigned M2 = N->getMaskElt(8) / 4;
1761   unsigned M3 = N->getMaskElt(12) / 4;
1762 
1763   // If both vector operands for the shuffle are the same vector, the mask will
1764   // contain only elements from the first one and the second one will be undef.
1765   if (N->getOperand(1).isUndef()) {
1766     assert(M0 < 4 && "Indexing into an undef vector?");
1767     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
1768       return false;
1769 
1770     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
1771     Swap = false;
1772     return true;
1773   }
1774 
1775   // Ensure each word index of the ShuffleVector Mask is consecutive.
1776   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
1777     return false;
1778 
1779   if (IsLE) {
1780     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
1781       // Input vectors don't need to be swapped if the leading element
1782       // of the result is one of the 3 left elements of the second vector
1783       // (or if there is no shift to be done at all).
1784       Swap = false;
1785       ShiftElts = (8 - M0) % 8;
1786     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
1787       // Input vectors need to be swapped if the leading element
1788       // of the result is one of the 3 left elements of the first vector
1789       // (or if we're shifting by 4 - thereby simply swapping the vectors).
1790       Swap = true;
1791       ShiftElts = (4 - M0) % 4;
1792     }
1793 
1794     return true;
1795   } else {                                          // BE
1796     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
1797       // Input vectors don't need to be swapped if the leading element
1798       // of the result is one of the 4 elements of the first vector.
1799       Swap = false;
1800       ShiftElts = M0;
1801     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
1802       // Input vectors need to be swapped if the leading element
1803       // of the result is one of the 4 elements of the right vector.
1804       Swap = true;
1805       ShiftElts = M0 - 4;
1806     }
1807 
1808     return true;
1809   }
1810 }
1811 
1812 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
1813   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1814 
1815   if (!isNByteElemShuffleMask(N, Width, -1))
1816     return false;
1817 
1818   for (int i = 0; i < 16; i += Width)
1819     if (N->getMaskElt(i) != i + Width - 1)
1820       return false;
1821 
1822   return true;
1823 }
1824 
1825 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
1826   return isXXBRShuffleMaskHelper(N, 2);
1827 }
1828 
1829 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
1830   return isXXBRShuffleMaskHelper(N, 4);
1831 }
1832 
1833 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
1834   return isXXBRShuffleMaskHelper(N, 8);
1835 }
1836 
1837 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
1838   return isXXBRShuffleMaskHelper(N, 16);
1839 }
1840 
1841 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
1842 /// if the inputs to the instruction should be swapped and set \p DM to the
1843 /// value for the immediate.
1844 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
1845 /// AND element 0 of the result comes from the first input (LE) or second input
1846 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
1847 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
1848 /// mask.
1849 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
1850                                bool &Swap, bool IsLE) {
1851   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1852 
1853   // Ensure each byte index of the double word is consecutive.
1854   if (!isNByteElemShuffleMask(N, 8, 1))
1855     return false;
1856 
1857   unsigned M0 = N->getMaskElt(0) / 8;
1858   unsigned M1 = N->getMaskElt(8) / 8;
1859   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
1860 
1861   // If both vector operands for the shuffle are the same vector, the mask will
1862   // contain only elements from the first one and the second one will be undef.
1863   if (N->getOperand(1).isUndef()) {
1864     if ((M0 | M1) < 2) {
1865       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
1866       Swap = false;
1867       return true;
1868     } else
1869       return false;
1870   }
1871 
1872   if (IsLE) {
1873     if (M0 > 1 && M1 < 2) {
1874       Swap = false;
1875     } else if (M0 < 2 && M1 > 1) {
1876       M0 = (M0 + 2) % 4;
1877       M1 = (M1 + 2) % 4;
1878       Swap = true;
1879     } else
1880       return false;
1881 
1882     // Note: if control flow comes here that means Swap is already set above
1883     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
1884     return true;
1885   } else { // BE
1886     if (M0 < 2 && M1 > 1) {
1887       Swap = false;
1888     } else if (M0 > 1 && M1 < 2) {
1889       M0 = (M0 + 2) % 4;
1890       M1 = (M1 + 2) % 4;
1891       Swap = true;
1892     } else
1893       return false;
1894 
1895     // Note: if control flow comes here that means Swap is already set above
1896     DM = (M0 << 1) + (M1 & 1);
1897     return true;
1898   }
1899 }
1900 
1901 
1902 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
1903 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
1904 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
1905                                 SelectionDAG &DAG) {
1906   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1907   assert(isSplatShuffleMask(SVOp, EltSize));
1908   if (DAG.getDataLayout().isLittleEndian())
1909     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
1910   else
1911     return SVOp->getMaskElt(0) / EltSize;
1912 }
1913 
1914 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
1915 /// by using a vspltis[bhw] instruction of the specified element size, return
1916 /// the constant being splatted.  The ByteSize field indicates the number of
1917 /// bytes of each element [124] -> [bhw].
1918 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
1919   SDValue OpVal(nullptr, 0);
1920 
1921   // If ByteSize of the splat is bigger than the element size of the
1922   // build_vector, then we have a case where we are checking for a splat where
1923   // multiple elements of the buildvector are folded together into a single
1924   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
1925   unsigned EltSize = 16/N->getNumOperands();
1926   if (EltSize < ByteSize) {
1927     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
1928     SDValue UniquedVals[4];
1929     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
1930 
1931     // See if all of the elements in the buildvector agree across.
1932     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1933       if (N->getOperand(i).isUndef()) continue;
1934       // If the element isn't a constant, bail fully out.
1935       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
1936 
1937       if (!UniquedVals[i&(Multiple-1)].getNode())
1938         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
1939       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
1940         return SDValue();  // no match.
1941     }
1942 
1943     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
1944     // either constant or undef values that are identical for each chunk.  See
1945     // if these chunks can form into a larger vspltis*.
1946 
1947     // Check to see if all of the leading entries are either 0 or -1.  If
1948     // neither, then this won't fit into the immediate field.
1949     bool LeadingZero = true;
1950     bool LeadingOnes = true;
1951     for (unsigned i = 0; i != Multiple-1; ++i) {
1952       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
1953 
1954       LeadingZero &= isNullConstant(UniquedVals[i]);
1955       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
1956     }
1957     // Finally, check the least significant entry.
1958     if (LeadingZero) {
1959       if (!UniquedVals[Multiple-1].getNode())
1960         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
1961       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
1962       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
1963         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
1964     }
1965     if (LeadingOnes) {
1966       if (!UniquedVals[Multiple-1].getNode())
1967         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
1968       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
1969       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
1970         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
1971     }
1972 
1973     return SDValue();
1974   }
1975 
1976   // Check to see if this buildvec has a single non-undef value in its elements.
1977   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1978     if (N->getOperand(i).isUndef()) continue;
1979     if (!OpVal.getNode())
1980       OpVal = N->getOperand(i);
1981     else if (OpVal != N->getOperand(i))
1982       return SDValue();
1983   }
1984 
1985   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
1986 
1987   unsigned ValSizeInBytes = EltSize;
1988   uint64_t Value = 0;
1989   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1990     Value = CN->getZExtValue();
1991   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
1992     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
1993     Value = FloatToBits(CN->getValueAPF().convertToFloat());
1994   }
1995 
1996   // If the splat value is larger than the element value, then we can never do
1997   // this splat.  The only case that we could fit the replicated bits into our
1998   // immediate field for would be zero, and we prefer to use vxor for it.
1999   if (ValSizeInBytes < ByteSize) return SDValue();
2000 
2001   // If the element value is larger than the splat value, check if it consists
2002   // of a repeated bit pattern of size ByteSize.
2003   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2004     return SDValue();
2005 
2006   // Properly sign extend the value.
2007   int MaskVal = SignExtend32(Value, ByteSize * 8);
2008 
2009   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2010   if (MaskVal == 0) return SDValue();
2011 
2012   // Finally, if this value fits in a 5 bit sext field, return it
2013   if (SignExtend32<5>(MaskVal) == MaskVal)
2014     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2015   return SDValue();
2016 }
2017 
2018 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2019 /// amount, otherwise return -1.
2020 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2021   EVT VT = N->getValueType(0);
2022   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2023     return -1;
2024 
2025   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2026 
2027   // Find the first non-undef value in the shuffle mask.
2028   unsigned i;
2029   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2030     /*search*/;
2031 
2032   if (i == 4) return -1;  // all undef.
2033 
2034   // Otherwise, check to see if the rest of the elements are consecutively
2035   // numbered from this value.
2036   unsigned ShiftAmt = SVOp->getMaskElt(i);
2037   if (ShiftAmt < i) return -1;
2038   ShiftAmt -= i;
2039 
2040   // Check the rest of the elements to see if they are consecutive.
2041   for (++i; i != 4; ++i)
2042     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2043       return -1;
2044 
2045   return ShiftAmt;
2046 }
2047 
2048 //===----------------------------------------------------------------------===//
2049 //  Addressing Mode Selection
2050 //===----------------------------------------------------------------------===//
2051 
2052 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2053 /// or 64-bit immediate, and if the value can be accurately represented as a
2054 /// sign extension from a 16-bit value.  If so, this returns true and the
2055 /// immediate.
2056 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2057   if (!isa<ConstantSDNode>(N))
2058     return false;
2059 
2060   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2061   if (N->getValueType(0) == MVT::i32)
2062     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2063   else
2064     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2065 }
2066 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2067   return isIntS16Immediate(Op.getNode(), Imm);
2068 }
2069 
2070 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2071 /// can be represented as an indexed [r+r] operation.  Returns false if it
2072 /// can be more efficiently represented with [r+imm].
2073 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
2074                                             SDValue &Index,
2075                                             SelectionDAG &DAG) const {
2076   int16_t imm = 0;
2077   if (N.getOpcode() == ISD::ADD) {
2078     if (isIntS16Immediate(N.getOperand(1), imm))
2079       return false;    // r+i
2080     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2081       return false;    // r+i
2082 
2083     Base = N.getOperand(0);
2084     Index = N.getOperand(1);
2085     return true;
2086   } else if (N.getOpcode() == ISD::OR) {
2087     if (isIntS16Immediate(N.getOperand(1), imm))
2088       return false;    // r+i can fold it if we can.
2089 
2090     // If this is an or of disjoint bitfields, we can codegen this as an add
2091     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2092     // disjoint.
2093     KnownBits LHSKnown, RHSKnown;
2094     DAG.computeKnownBits(N.getOperand(0), LHSKnown);
2095 
2096     if (LHSKnown.Zero.getBoolValue()) {
2097       DAG.computeKnownBits(N.getOperand(1), RHSKnown);
2098       // If all of the bits are known zero on the LHS or RHS, the add won't
2099       // carry.
2100       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2101         Base = N.getOperand(0);
2102         Index = N.getOperand(1);
2103         return true;
2104       }
2105     }
2106   }
2107 
2108   return false;
2109 }
2110 
2111 // If we happen to be doing an i64 load or store into a stack slot that has
2112 // less than a 4-byte alignment, then the frame-index elimination may need to
2113 // use an indexed load or store instruction (because the offset may not be a
2114 // multiple of 4). The extra register needed to hold the offset comes from the
2115 // register scavenger, and it is possible that the scavenger will need to use
2116 // an emergency spill slot. As a result, we need to make sure that a spill slot
2117 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2118 // stack slot.
2119 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2120   // FIXME: This does not handle the LWA case.
2121   if (VT != MVT::i64)
2122     return;
2123 
2124   // NOTE: We'll exclude negative FIs here, which come from argument
2125   // lowering, because there are no known test cases triggering this problem
2126   // using packed structures (or similar). We can remove this exclusion if
2127   // we find such a test case. The reason why this is so test-case driven is
2128   // because this entire 'fixup' is only to prevent crashes (from the
2129   // register scavenger) on not-really-valid inputs. For example, if we have:
2130   //   %a = alloca i1
2131   //   %b = bitcast i1* %a to i64*
2132   //   store i64* a, i64 b
2133   // then the store should really be marked as 'align 1', but is not. If it
2134   // were marked as 'align 1' then the indexed form would have been
2135   // instruction-selected initially, and the problem this 'fixup' is preventing
2136   // won't happen regardless.
2137   if (FrameIdx < 0)
2138     return;
2139 
2140   MachineFunction &MF = DAG.getMachineFunction();
2141   MachineFrameInfo &MFI = MF.getFrameInfo();
2142 
2143   unsigned Align = MFI.getObjectAlignment(FrameIdx);
2144   if (Align >= 4)
2145     return;
2146 
2147   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2148   FuncInfo->setHasNonRISpills();
2149 }
2150 
2151 /// Returns true if the address N can be represented by a base register plus
2152 /// a signed 16-bit displacement [r+imm], and if it is not better
2153 /// represented as reg+reg.  If \p Alignment is non-zero, only accept
2154 /// displacements that are multiples of that value.
2155 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
2156                                             SDValue &Base,
2157                                             SelectionDAG &DAG,
2158                                             unsigned Alignment) const {
2159   // FIXME dl should come from parent load or store, not from address
2160   SDLoc dl(N);
2161   // If this can be more profitably realized as r+r, fail.
2162   if (SelectAddressRegReg(N, Disp, Base, DAG))
2163     return false;
2164 
2165   if (N.getOpcode() == ISD::ADD) {
2166     int16_t imm = 0;
2167     if (isIntS16Immediate(N.getOperand(1), imm) &&
2168         (!Alignment || (imm % Alignment) == 0)) {
2169       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2170       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2171         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2172         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2173       } else {
2174         Base = N.getOperand(0);
2175       }
2176       return true; // [r+i]
2177     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2178       // Match LOAD (ADD (X, Lo(G))).
2179       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2180              && "Cannot handle constant offsets yet!");
2181       Disp = N.getOperand(1).getOperand(0);  // The global address.
2182       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2183              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2184              Disp.getOpcode() == ISD::TargetConstantPool ||
2185              Disp.getOpcode() == ISD::TargetJumpTable);
2186       Base = N.getOperand(0);
2187       return true;  // [&g+r]
2188     }
2189   } else if (N.getOpcode() == ISD::OR) {
2190     int16_t imm = 0;
2191     if (isIntS16Immediate(N.getOperand(1), imm) &&
2192         (!Alignment || (imm % Alignment) == 0)) {
2193       // If this is an or of disjoint bitfields, we can codegen this as an add
2194       // (for better address arithmetic) if the LHS and RHS of the OR are
2195       // provably disjoint.
2196       KnownBits LHSKnown;
2197       DAG.computeKnownBits(N.getOperand(0), LHSKnown);
2198 
2199       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2200         // If all of the bits are known zero on the LHS or RHS, the add won't
2201         // carry.
2202         if (FrameIndexSDNode *FI =
2203               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2204           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2205           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2206         } else {
2207           Base = N.getOperand(0);
2208         }
2209         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2210         return true;
2211       }
2212     }
2213   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2214     // Loading from a constant address.
2215 
2216     // If this address fits entirely in a 16-bit sext immediate field, codegen
2217     // this as "d, 0"
2218     int16_t Imm;
2219     if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) {
2220       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2221       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2222                              CN->getValueType(0));
2223       return true;
2224     }
2225 
2226     // Handle 32-bit sext immediates with LIS + addr mode.
2227     if ((CN->getValueType(0) == MVT::i32 ||
2228          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2229         (!Alignment || (CN->getZExtValue() % Alignment) == 0)) {
2230       int Addr = (int)CN->getZExtValue();
2231 
2232       // Otherwise, break this down into an LIS + disp.
2233       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2234 
2235       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2236                                    MVT::i32);
2237       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2238       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2239       return true;
2240     }
2241   }
2242 
2243   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2244   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2245     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2246     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2247   } else
2248     Base = N;
2249   return true;      // [r+0]
2250 }
2251 
2252 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2253 /// represented as an indexed [r+r] operation.
2254 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2255                                                 SDValue &Index,
2256                                                 SelectionDAG &DAG) const {
2257   // Check to see if we can easily represent this as an [r+r] address.  This
2258   // will fail if it thinks that the address is more profitably represented as
2259   // reg+imm, e.g. where imm = 0.
2260   if (SelectAddressRegReg(N, Base, Index, DAG))
2261     return true;
2262 
2263   // If the address is the result of an add, we will utilize the fact that the
2264   // address calculation includes an implicit add.  However, we can reduce
2265   // register pressure if we do not materialize a constant just for use as the
2266   // index register.  We only get rid of the add if it is not an add of a
2267   // value and a 16-bit signed constant and both have a single use.
2268   int16_t imm = 0;
2269   if (N.getOpcode() == ISD::ADD &&
2270       (!isIntS16Immediate(N.getOperand(1), imm) ||
2271        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2272     Base = N.getOperand(0);
2273     Index = N.getOperand(1);
2274     return true;
2275   }
2276 
2277   // Otherwise, do it the hard way, using R0 as the base register.
2278   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2279                          N.getValueType());
2280   Index = N;
2281   return true;
2282 }
2283 
2284 /// getPreIndexedAddressParts - returns true by value, base pointer and
2285 /// offset pointer and addressing mode by reference if the node's address
2286 /// can be legally represented as pre-indexed load / store address.
2287 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2288                                                   SDValue &Offset,
2289                                                   ISD::MemIndexedMode &AM,
2290                                                   SelectionDAG &DAG) const {
2291   if (DisablePPCPreinc) return false;
2292 
2293   bool isLoad = true;
2294   SDValue Ptr;
2295   EVT VT;
2296   unsigned Alignment;
2297   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2298     Ptr = LD->getBasePtr();
2299     VT = LD->getMemoryVT();
2300     Alignment = LD->getAlignment();
2301   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2302     Ptr = ST->getBasePtr();
2303     VT  = ST->getMemoryVT();
2304     Alignment = ST->getAlignment();
2305     isLoad = false;
2306   } else
2307     return false;
2308 
2309   // PowerPC doesn't have preinc load/store instructions for vectors (except
2310   // for QPX, which does have preinc r+r forms).
2311   if (VT.isVector()) {
2312     if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2313       return false;
2314     } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2315       AM = ISD::PRE_INC;
2316       return true;
2317     }
2318   }
2319 
2320   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2321     // Common code will reject creating a pre-inc form if the base pointer
2322     // is a frame index, or if N is a store and the base pointer is either
2323     // the same as or a predecessor of the value being stored.  Check for
2324     // those situations here, and try with swapped Base/Offset instead.
2325     bool Swap = false;
2326 
2327     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2328       Swap = true;
2329     else if (!isLoad) {
2330       SDValue Val = cast<StoreSDNode>(N)->getValue();
2331       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2332         Swap = true;
2333     }
2334 
2335     if (Swap)
2336       std::swap(Base, Offset);
2337 
2338     AM = ISD::PRE_INC;
2339     return true;
2340   }
2341 
2342   // LDU/STU can only handle immediates that are a multiple of 4.
2343   if (VT != MVT::i64) {
2344     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0))
2345       return false;
2346   } else {
2347     // LDU/STU need an address with at least 4-byte alignment.
2348     if (Alignment < 4)
2349       return false;
2350 
2351     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4))
2352       return false;
2353   }
2354 
2355   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2356     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2357     // sext i32 to i64 when addr mode is r+i.
2358     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2359         LD->getExtensionType() == ISD::SEXTLOAD &&
2360         isa<ConstantSDNode>(Offset))
2361       return false;
2362   }
2363 
2364   AM = ISD::PRE_INC;
2365   return true;
2366 }
2367 
2368 //===----------------------------------------------------------------------===//
2369 //  LowerOperation implementation
2370 //===----------------------------------------------------------------------===//
2371 
2372 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2373 /// and LoOpFlags to the target MO flags.
2374 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2375                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2376                                const GlobalValue *GV = nullptr) {
2377   HiOpFlags = PPCII::MO_HA;
2378   LoOpFlags = PPCII::MO_LO;
2379 
2380   // Don't use the pic base if not in PIC relocation model.
2381   if (IsPIC) {
2382     HiOpFlags |= PPCII::MO_PIC_FLAG;
2383     LoOpFlags |= PPCII::MO_PIC_FLAG;
2384   }
2385 
2386   // If this is a reference to a global value that requires a non-lazy-ptr, make
2387   // sure that instruction lowering adds it.
2388   if (GV && Subtarget.hasLazyResolverStub(GV)) {
2389     HiOpFlags |= PPCII::MO_NLP_FLAG;
2390     LoOpFlags |= PPCII::MO_NLP_FLAG;
2391 
2392     if (GV->hasHiddenVisibility()) {
2393       HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2394       LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2395     }
2396   }
2397 }
2398 
2399 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2400                              SelectionDAG &DAG) {
2401   SDLoc DL(HiPart);
2402   EVT PtrVT = HiPart.getValueType();
2403   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2404 
2405   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2406   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2407 
2408   // With PIC, the first instruction is actually "GR+hi(&G)".
2409   if (isPIC)
2410     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2411                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2412 
2413   // Generate non-pic code that has direct accesses to the constant pool.
2414   // The address of the global is just (hi(&g)+lo(&g)).
2415   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2416 }
2417 
2418 static void setUsesTOCBasePtr(MachineFunction &MF) {
2419   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2420   FuncInfo->setUsesTOCBasePtr();
2421 }
2422 
2423 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2424   setUsesTOCBasePtr(DAG.getMachineFunction());
2425 }
2426 
2427 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit,
2428                            SDValue GA) {
2429   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2430   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) :
2431                 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2432 
2433   SDValue Ops[] = { GA, Reg };
2434   return DAG.getMemIntrinsicNode(
2435       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2436       MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0,
2437       MachineMemOperand::MOLoad);
2438 }
2439 
2440 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2441                                              SelectionDAG &DAG) const {
2442   EVT PtrVT = Op.getValueType();
2443   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2444   const Constant *C = CP->getConstVal();
2445 
2446   // 64-bit SVR4 ABI code is always position-independent.
2447   // The actual address of the GlobalValue is stored in the TOC.
2448   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2449     setUsesTOCBasePtr(DAG);
2450     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
2451     return getTOCEntry(DAG, SDLoc(CP), true, GA);
2452   }
2453 
2454   unsigned MOHiFlag, MOLoFlag;
2455   bool IsPIC = isPositionIndependent();
2456   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2457 
2458   if (IsPIC && Subtarget.isSVR4ABI()) {
2459     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
2460                                            PPCII::MO_PIC_FLAG);
2461     return getTOCEntry(DAG, SDLoc(CP), false, GA);
2462   }
2463 
2464   SDValue CPIHi =
2465     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
2466   SDValue CPILo =
2467     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
2468   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2469 }
2470 
2471 // For 64-bit PowerPC, prefer the more compact relative encodings.
2472 // This trades 32 bits per jump table entry for one or two instructions
2473 // on the jump site.
2474 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2475   if (isJumpTableRelative())
2476     return MachineJumpTableInfo::EK_LabelDifference32;
2477 
2478   return TargetLowering::getJumpTableEncoding();
2479 }
2480 
2481 bool PPCTargetLowering::isJumpTableRelative() const {
2482   if (Subtarget.isPPC64())
2483     return true;
2484   return TargetLowering::isJumpTableRelative();
2485 }
2486 
2487 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2488                                                     SelectionDAG &DAG) const {
2489   if (!Subtarget.isPPC64())
2490     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2491 
2492   switch (getTargetMachine().getCodeModel()) {
2493   case CodeModel::Small:
2494   case CodeModel::Medium:
2495     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2496   default:
2497     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2498                        getPointerTy(DAG.getDataLayout()));
2499   }
2500 }
2501 
2502 const MCExpr *
2503 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2504                                                 unsigned JTI,
2505                                                 MCContext &Ctx) const {
2506   if (!Subtarget.isPPC64())
2507     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2508 
2509   switch (getTargetMachine().getCodeModel()) {
2510   case CodeModel::Small:
2511   case CodeModel::Medium:
2512     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2513   default:
2514     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2515   }
2516 }
2517 
2518 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2519   EVT PtrVT = Op.getValueType();
2520   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2521 
2522   // 64-bit SVR4 ABI code is always position-independent.
2523   // The actual address of the GlobalValue is stored in the TOC.
2524   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2525     setUsesTOCBasePtr(DAG);
2526     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2527     return getTOCEntry(DAG, SDLoc(JT), true, GA);
2528   }
2529 
2530   unsigned MOHiFlag, MOLoFlag;
2531   bool IsPIC = isPositionIndependent();
2532   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2533 
2534   if (IsPIC && Subtarget.isSVR4ABI()) {
2535     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2536                                         PPCII::MO_PIC_FLAG);
2537     return getTOCEntry(DAG, SDLoc(GA), false, GA);
2538   }
2539 
2540   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2541   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2542   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2543 }
2544 
2545 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2546                                              SelectionDAG &DAG) const {
2547   EVT PtrVT = Op.getValueType();
2548   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2549   const BlockAddress *BA = BASDN->getBlockAddress();
2550 
2551   // 64-bit SVR4 ABI code is always position-independent.
2552   // The actual BlockAddress is stored in the TOC.
2553   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2554     setUsesTOCBasePtr(DAG);
2555     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2556     return getTOCEntry(DAG, SDLoc(BASDN), true, GA);
2557   }
2558 
2559   unsigned MOHiFlag, MOLoFlag;
2560   bool IsPIC = isPositionIndependent();
2561   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2562   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2563   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2564   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2565 }
2566 
2567 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2568                                               SelectionDAG &DAG) const {
2569   // FIXME: TLS addresses currently use medium model code sequences,
2570   // which is the most useful form.  Eventually support for small and
2571   // large models could be added if users need it, at the cost of
2572   // additional complexity.
2573   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2574   if (DAG.getTarget().useEmulatedTLS())
2575     return LowerToTLSEmulatedModel(GA, DAG);
2576 
2577   SDLoc dl(GA);
2578   const GlobalValue *GV = GA->getGlobal();
2579   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2580   bool is64bit = Subtarget.isPPC64();
2581   const Module *M = DAG.getMachineFunction().getFunction().getParent();
2582   PICLevel::Level picLevel = M->getPICLevel();
2583 
2584   TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
2585 
2586   if (Model == TLSModel::LocalExec) {
2587     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2588                                                PPCII::MO_TPREL_HA);
2589     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2590                                                PPCII::MO_TPREL_LO);
2591     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
2592                              : DAG.getRegister(PPC::R2, MVT::i32);
2593 
2594     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
2595     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
2596   }
2597 
2598   if (Model == TLSModel::InitialExec) {
2599     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2600     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2601                                                 PPCII::MO_TLS);
2602     SDValue GOTPtr;
2603     if (is64bit) {
2604       setUsesTOCBasePtr(DAG);
2605       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2606       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
2607                            PtrVT, GOTReg, TGA);
2608     } else
2609       GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
2610     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
2611                                    PtrVT, TGA, GOTPtr);
2612     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
2613   }
2614 
2615   if (Model == TLSModel::GeneralDynamic) {
2616     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2617     SDValue GOTPtr;
2618     if (is64bit) {
2619       setUsesTOCBasePtr(DAG);
2620       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2621       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
2622                                    GOTReg, TGA);
2623     } else {
2624       if (picLevel == PICLevel::SmallPIC)
2625         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2626       else
2627         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2628     }
2629     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
2630                        GOTPtr, TGA, TGA);
2631   }
2632 
2633   if (Model == TLSModel::LocalDynamic) {
2634     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2635     SDValue GOTPtr;
2636     if (is64bit) {
2637       setUsesTOCBasePtr(DAG);
2638       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2639       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
2640                            GOTReg, TGA);
2641     } else {
2642       if (picLevel == PICLevel::SmallPIC)
2643         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2644       else
2645         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2646     }
2647     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
2648                                   PtrVT, GOTPtr, TGA, TGA);
2649     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
2650                                       PtrVT, TLSAddr, TGA);
2651     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
2652   }
2653 
2654   llvm_unreachable("Unknown TLS model!");
2655 }
2656 
2657 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
2658                                               SelectionDAG &DAG) const {
2659   EVT PtrVT = Op.getValueType();
2660   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
2661   SDLoc DL(GSDN);
2662   const GlobalValue *GV = GSDN->getGlobal();
2663 
2664   // 64-bit SVR4 ABI code is always position-independent.
2665   // The actual address of the GlobalValue is stored in the TOC.
2666   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2667     setUsesTOCBasePtr(DAG);
2668     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
2669     return getTOCEntry(DAG, DL, true, GA);
2670   }
2671 
2672   unsigned MOHiFlag, MOLoFlag;
2673   bool IsPIC = isPositionIndependent();
2674   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
2675 
2676   if (IsPIC && Subtarget.isSVR4ABI()) {
2677     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
2678                                             GSDN->getOffset(),
2679                                             PPCII::MO_PIC_FLAG);
2680     return getTOCEntry(DAG, DL, false, GA);
2681   }
2682 
2683   SDValue GAHi =
2684     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
2685   SDValue GALo =
2686     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
2687 
2688   SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG);
2689 
2690   // If the global reference is actually to a non-lazy-pointer, we have to do an
2691   // extra load to get the address of the global.
2692   if (MOHiFlag & PPCII::MO_NLP_FLAG)
2693     Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2694   return Ptr;
2695 }
2696 
2697 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2698   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2699   SDLoc dl(Op);
2700 
2701   if (Op.getValueType() == MVT::v2i64) {
2702     // When the operands themselves are v2i64 values, we need to do something
2703     // special because VSX has no underlying comparison operations for these.
2704     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
2705       // Equality can be handled by casting to the legal type for Altivec
2706       // comparisons, everything else needs to be expanded.
2707       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
2708         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
2709                  DAG.getSetCC(dl, MVT::v4i32,
2710                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
2711                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
2712                    CC));
2713       }
2714 
2715       return SDValue();
2716     }
2717 
2718     // We handle most of these in the usual way.
2719     return Op;
2720   }
2721 
2722   // If we're comparing for equality to zero, expose the fact that this is
2723   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
2724   // fold the new nodes.
2725   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
2726     return V;
2727 
2728   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2729     // Leave comparisons against 0 and -1 alone for now, since they're usually
2730     // optimized.  FIXME: revisit this when we can custom lower all setcc
2731     // optimizations.
2732     if (C->isAllOnesValue() || C->isNullValue())
2733       return SDValue();
2734   }
2735 
2736   // If we have an integer seteq/setne, turn it into a compare against zero
2737   // by xor'ing the rhs with the lhs, which is faster than setting a
2738   // condition register, reading it back out, and masking the correct bit.  The
2739   // normal approach here uses sub to do this instead of xor.  Using xor exposes
2740   // the result to other bit-twiddling opportunities.
2741   EVT LHSVT = Op.getOperand(0).getValueType();
2742   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2743     EVT VT = Op.getValueType();
2744     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
2745                                 Op.getOperand(1));
2746     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
2747   }
2748   return SDValue();
2749 }
2750 
2751 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2752   SDNode *Node = Op.getNode();
2753   EVT VT = Node->getValueType(0);
2754   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2755   SDValue InChain = Node->getOperand(0);
2756   SDValue VAListPtr = Node->getOperand(1);
2757   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2758   SDLoc dl(Node);
2759 
2760   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
2761 
2762   // gpr_index
2763   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
2764                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
2765   InChain = GprIndex.getValue(1);
2766 
2767   if (VT == MVT::i64) {
2768     // Check if GprIndex is even
2769     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
2770                                  DAG.getConstant(1, dl, MVT::i32));
2771     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
2772                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
2773     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
2774                                           DAG.getConstant(1, dl, MVT::i32));
2775     // Align GprIndex to be even if it isn't
2776     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
2777                            GprIndex);
2778   }
2779 
2780   // fpr index is 1 byte after gpr
2781   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2782                                DAG.getConstant(1, dl, MVT::i32));
2783 
2784   // fpr
2785   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
2786                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
2787   InChain = FprIndex.getValue(1);
2788 
2789   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2790                                        DAG.getConstant(8, dl, MVT::i32));
2791 
2792   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2793                                         DAG.getConstant(4, dl, MVT::i32));
2794 
2795   // areas
2796   SDValue OverflowArea =
2797       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
2798   InChain = OverflowArea.getValue(1);
2799 
2800   SDValue RegSaveArea =
2801       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
2802   InChain = RegSaveArea.getValue(1);
2803 
2804   // select overflow_area if index > 8
2805   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
2806                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
2807 
2808   // adjustment constant gpr_index * 4/8
2809   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
2810                                     VT.isInteger() ? GprIndex : FprIndex,
2811                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
2812                                                     MVT::i32));
2813 
2814   // OurReg = RegSaveArea + RegConstant
2815   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
2816                                RegConstant);
2817 
2818   // Floating types are 32 bytes into RegSaveArea
2819   if (VT.isFloatingPoint())
2820     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
2821                          DAG.getConstant(32, dl, MVT::i32));
2822 
2823   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
2824   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
2825                                    VT.isInteger() ? GprIndex : FprIndex,
2826                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
2827                                                    MVT::i32));
2828 
2829   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
2830                               VT.isInteger() ? VAListPtr : FprPtr,
2831                               MachinePointerInfo(SV), MVT::i8);
2832 
2833   // determine if we should load from reg_save_area or overflow_area
2834   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
2835 
2836   // increase overflow_area by 4/8 if gpr/fpr > 8
2837   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
2838                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
2839                                           dl, MVT::i32));
2840 
2841   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
2842                              OverflowAreaPlusN);
2843 
2844   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
2845                               MachinePointerInfo(), MVT::i32);
2846 
2847   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
2848 }
2849 
2850 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
2851   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
2852 
2853   // We have to copy the entire va_list struct:
2854   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
2855   return DAG.getMemcpy(Op.getOperand(0), Op,
2856                        Op.getOperand(1), Op.getOperand(2),
2857                        DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true,
2858                        false, MachinePointerInfo(), MachinePointerInfo());
2859 }
2860 
2861 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
2862                                                   SelectionDAG &DAG) const {
2863   return Op.getOperand(0);
2864 }
2865 
2866 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
2867                                                 SelectionDAG &DAG) const {
2868   SDValue Chain = Op.getOperand(0);
2869   SDValue Trmp = Op.getOperand(1); // trampoline
2870   SDValue FPtr = Op.getOperand(2); // nested function
2871   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
2872   SDLoc dl(Op);
2873 
2874   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2875   bool isPPC64 = (PtrVT == MVT::i64);
2876   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
2877 
2878   TargetLowering::ArgListTy Args;
2879   TargetLowering::ArgListEntry Entry;
2880 
2881   Entry.Ty = IntPtrTy;
2882   Entry.Node = Trmp; Args.push_back(Entry);
2883 
2884   // TrampSize == (isPPC64 ? 48 : 40);
2885   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
2886                                isPPC64 ? MVT::i64 : MVT::i32);
2887   Args.push_back(Entry);
2888 
2889   Entry.Node = FPtr; Args.push_back(Entry);
2890   Entry.Node = Nest; Args.push_back(Entry);
2891 
2892   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
2893   TargetLowering::CallLoweringInfo CLI(DAG);
2894   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
2895       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
2896       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
2897 
2898   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2899   return CallResult.second;
2900 }
2901 
2902 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2903   MachineFunction &MF = DAG.getMachineFunction();
2904   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2905   EVT PtrVT = getPointerTy(MF.getDataLayout());
2906 
2907   SDLoc dl(Op);
2908 
2909   if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
2910     // vastart just stores the address of the VarArgsFrameIndex slot into the
2911     // memory location argument.
2912     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
2913     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2914     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
2915                         MachinePointerInfo(SV));
2916   }
2917 
2918   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
2919   // We suppose the given va_list is already allocated.
2920   //
2921   // typedef struct {
2922   //  char gpr;     /* index into the array of 8 GPRs
2923   //                 * stored in the register save area
2924   //                 * gpr=0 corresponds to r3,
2925   //                 * gpr=1 to r4, etc.
2926   //                 */
2927   //  char fpr;     /* index into the array of 8 FPRs
2928   //                 * stored in the register save area
2929   //                 * fpr=0 corresponds to f1,
2930   //                 * fpr=1 to f2, etc.
2931   //                 */
2932   //  char *overflow_arg_area;
2933   //                /* location on stack that holds
2934   //                 * the next overflow argument
2935   //                 */
2936   //  char *reg_save_area;
2937   //               /* where r3:r10 and f1:f8 (if saved)
2938   //                * are stored
2939   //                */
2940   // } va_list[1];
2941 
2942   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
2943   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
2944   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
2945                                             PtrVT);
2946   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2947                                  PtrVT);
2948 
2949   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
2950   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
2951 
2952   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
2953   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
2954 
2955   uint64_t FPROffset = 1;
2956   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
2957 
2958   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2959 
2960   // Store first byte : number of int regs
2961   SDValue firstStore =
2962       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
2963                         MachinePointerInfo(SV), MVT::i8);
2964   uint64_t nextOffset = FPROffset;
2965   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
2966                                   ConstFPROffset);
2967 
2968   // Store second byte : number of float regs
2969   SDValue secondStore =
2970       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
2971                         MachinePointerInfo(SV, nextOffset), MVT::i8);
2972   nextOffset += StackOffset;
2973   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
2974 
2975   // Store second word : arguments given on stack
2976   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
2977                                     MachinePointerInfo(SV, nextOffset));
2978   nextOffset += FrameOffset;
2979   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
2980 
2981   // Store third word : arguments given in registers
2982   return DAG.getStore(thirdStore, dl, FR, nextPtr,
2983                       MachinePointerInfo(SV, nextOffset));
2984 }
2985 
2986 #include "PPCGenCallingConv.inc"
2987 
2988 // Function whose sole purpose is to kill compiler warnings
2989 // stemming from unused functions included from PPCGenCallingConv.inc.
2990 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const {
2991   return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
2992 }
2993 
2994 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
2995                                       CCValAssign::LocInfo &LocInfo,
2996                                       ISD::ArgFlagsTy &ArgFlags,
2997                                       CCState &State) {
2998   return true;
2999 }
3000 
3001 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
3002                                              MVT &LocVT,
3003                                              CCValAssign::LocInfo &LocInfo,
3004                                              ISD::ArgFlagsTy &ArgFlags,
3005                                              CCState &State) {
3006   static const MCPhysReg ArgRegs[] = {
3007     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3008     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3009   };
3010   const unsigned NumArgRegs = array_lengthof(ArgRegs);
3011 
3012   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
3013 
3014   // Skip one register if the first unallocated register has an even register
3015   // number and there are still argument registers available which have not been
3016   // allocated yet. RegNum is actually an index into ArgRegs, which means we
3017   // need to skip a register if RegNum is odd.
3018   if (RegNum != NumArgRegs && RegNum % 2 == 1) {
3019     State.AllocateReg(ArgRegs[RegNum]);
3020   }
3021 
3022   // Always return false here, as this function only makes sure that the first
3023   // unallocated register has an odd register number and does not actually
3024   // allocate a register for the current argument.
3025   return false;
3026 }
3027 
3028 bool
3029 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT,
3030                                                   MVT &LocVT,
3031                                                   CCValAssign::LocInfo &LocInfo,
3032                                                   ISD::ArgFlagsTy &ArgFlags,
3033                                                   CCState &State) {
3034   static const MCPhysReg ArgRegs[] = {
3035     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3036     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3037   };
3038   const unsigned NumArgRegs = array_lengthof(ArgRegs);
3039 
3040   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
3041   int RegsLeft = NumArgRegs - RegNum;
3042 
3043   // Skip if there is not enough registers left for long double type (4 gpr regs
3044   // in soft float mode) and put long double argument on the stack.
3045   if (RegNum != NumArgRegs && RegsLeft < 4) {
3046     for (int i = 0; i < RegsLeft; i++) {
3047       State.AllocateReg(ArgRegs[RegNum + i]);
3048     }
3049   }
3050 
3051   return false;
3052 }
3053 
3054 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
3055                                                MVT &LocVT,
3056                                                CCValAssign::LocInfo &LocInfo,
3057                                                ISD::ArgFlagsTy &ArgFlags,
3058                                                CCState &State) {
3059   static const MCPhysReg ArgRegs[] = {
3060     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3061     PPC::F8
3062   };
3063 
3064   const unsigned NumArgRegs = array_lengthof(ArgRegs);
3065 
3066   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
3067 
3068   // If there is only one Floating-point register left we need to put both f64
3069   // values of a split ppc_fp128 value on the stack.
3070   if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
3071     State.AllocateReg(ArgRegs[RegNum]);
3072   }
3073 
3074   // Always return false here, as this function only makes sure that the two f64
3075   // values a ppc_fp128 value is split into are both passed in registers or both
3076   // passed on the stack and does not actually allocate a register for the
3077   // current argument.
3078   return false;
3079 }
3080 
3081 /// FPR - The set of FP registers that should be allocated for arguments,
3082 /// on Darwin.
3083 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3084                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3085                                 PPC::F11, PPC::F12, PPC::F13};
3086 
3087 /// QFPR - The set of QPX registers that should be allocated for arguments.
3088 static const MCPhysReg QFPR[] = {
3089     PPC::QF1, PPC::QF2, PPC::QF3,  PPC::QF4,  PPC::QF5,  PPC::QF6, PPC::QF7,
3090     PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3091 
3092 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3093 /// the stack.
3094 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3095                                        unsigned PtrByteSize) {
3096   unsigned ArgSize = ArgVT.getStoreSize();
3097   if (Flags.isByVal())
3098     ArgSize = Flags.getByValSize();
3099 
3100   // Round up to multiples of the pointer size, except for array members,
3101   // which are always packed.
3102   if (!Flags.isInConsecutiveRegs())
3103     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3104 
3105   return ArgSize;
3106 }
3107 
3108 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3109 /// on the stack.
3110 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3111                                             ISD::ArgFlagsTy Flags,
3112                                             unsigned PtrByteSize) {
3113   unsigned Align = PtrByteSize;
3114 
3115   // Altivec parameters are padded to a 16 byte boundary.
3116   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3117       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3118       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3119       ArgVT == MVT::v1i128)
3120     Align = 16;
3121   // QPX vector types stored in double-precision are padded to a 32 byte
3122   // boundary.
3123   else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3124     Align = 32;
3125 
3126   // ByVal parameters are aligned as requested.
3127   if (Flags.isByVal()) {
3128     unsigned BVAlign = Flags.getByValAlign();
3129     if (BVAlign > PtrByteSize) {
3130       if (BVAlign % PtrByteSize != 0)
3131           llvm_unreachable(
3132             "ByVal alignment is not a multiple of the pointer size");
3133 
3134       Align = BVAlign;
3135     }
3136   }
3137 
3138   // Array members are always packed to their original alignment.
3139   if (Flags.isInConsecutiveRegs()) {
3140     // If the array member was split into multiple registers, the first
3141     // needs to be aligned to the size of the full type.  (Except for
3142     // ppcf128, which is only aligned as its f64 components.)
3143     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3144       Align = OrigVT.getStoreSize();
3145     else
3146       Align = ArgVT.getStoreSize();
3147   }
3148 
3149   return Align;
3150 }
3151 
3152 /// CalculateStackSlotUsed - Return whether this argument will use its
3153 /// stack slot (instead of being passed in registers).  ArgOffset,
3154 /// AvailableFPRs, and AvailableVRs must hold the current argument
3155 /// position, and will be updated to account for this argument.
3156 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3157                                    ISD::ArgFlagsTy Flags,
3158                                    unsigned PtrByteSize,
3159                                    unsigned LinkageSize,
3160                                    unsigned ParamAreaSize,
3161                                    unsigned &ArgOffset,
3162                                    unsigned &AvailableFPRs,
3163                                    unsigned &AvailableVRs, bool HasQPX) {
3164   bool UseMemory = false;
3165 
3166   // Respect alignment of argument on the stack.
3167   unsigned Align =
3168     CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3169   ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3170   // If there's no space left in the argument save area, we must
3171   // use memory (this check also catches zero-sized arguments).
3172   if (ArgOffset >= LinkageSize + ParamAreaSize)
3173     UseMemory = true;
3174 
3175   // Allocate argument on the stack.
3176   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3177   if (Flags.isInConsecutiveRegsLast())
3178     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3179   // If we overran the argument save area, we must use memory
3180   // (this check catches arguments passed partially in memory)
3181   if (ArgOffset > LinkageSize + ParamAreaSize)
3182     UseMemory = true;
3183 
3184   // However, if the argument is actually passed in an FPR or a VR,
3185   // we don't use memory after all.
3186   if (!Flags.isByVal()) {
3187     if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3188         // QPX registers overlap with the scalar FP registers.
3189         (HasQPX && (ArgVT == MVT::v4f32 ||
3190                     ArgVT == MVT::v4f64 ||
3191                     ArgVT == MVT::v4i1)))
3192       if (AvailableFPRs > 0) {
3193         --AvailableFPRs;
3194         return false;
3195       }
3196     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3197         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3198         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3199         ArgVT == MVT::v1i128)
3200       if (AvailableVRs > 0) {
3201         --AvailableVRs;
3202         return false;
3203       }
3204   }
3205 
3206   return UseMemory;
3207 }
3208 
3209 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3210 /// ensure minimum alignment required for target.
3211 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3212                                      unsigned NumBytes) {
3213   unsigned TargetAlign = Lowering->getStackAlignment();
3214   unsigned AlignMask = TargetAlign - 1;
3215   NumBytes = (NumBytes + AlignMask) & ~AlignMask;
3216   return NumBytes;
3217 }
3218 
3219 SDValue PPCTargetLowering::LowerFormalArguments(
3220     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3221     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3222     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3223   if (Subtarget.isSVR4ABI()) {
3224     if (Subtarget.isPPC64())
3225       return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
3226                                          dl, DAG, InVals);
3227     else
3228       return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
3229                                          dl, DAG, InVals);
3230   } else {
3231     return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
3232                                        dl, DAG, InVals);
3233   }
3234 }
3235 
3236 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3237     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3238     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3239     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3240 
3241   // 32-bit SVR4 ABI Stack Frame Layout:
3242   //              +-----------------------------------+
3243   //        +-->  |            Back chain             |
3244   //        |     +-----------------------------------+
3245   //        |     | Floating-point register save area |
3246   //        |     +-----------------------------------+
3247   //        |     |    General register save area     |
3248   //        |     +-----------------------------------+
3249   //        |     |          CR save word             |
3250   //        |     +-----------------------------------+
3251   //        |     |         VRSAVE save word          |
3252   //        |     +-----------------------------------+
3253   //        |     |         Alignment padding         |
3254   //        |     +-----------------------------------+
3255   //        |     |     Vector register save area     |
3256   //        |     +-----------------------------------+
3257   //        |     |       Local variable space        |
3258   //        |     +-----------------------------------+
3259   //        |     |        Parameter list area        |
3260   //        |     +-----------------------------------+
3261   //        |     |           LR save word            |
3262   //        |     +-----------------------------------+
3263   // SP-->  +---  |            Back chain             |
3264   //              +-----------------------------------+
3265   //
3266   // Specifications:
3267   //   System V Application Binary Interface PowerPC Processor Supplement
3268   //   AltiVec Technology Programming Interface Manual
3269 
3270   MachineFunction &MF = DAG.getMachineFunction();
3271   MachineFrameInfo &MFI = MF.getFrameInfo();
3272   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3273 
3274   EVT PtrVT = getPointerTy(MF.getDataLayout());
3275   // Potential tail calls could cause overwriting of argument stack slots.
3276   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3277                        (CallConv == CallingConv::Fast));
3278   unsigned PtrByteSize = 4;
3279 
3280   // Assign locations to all of the incoming arguments.
3281   SmallVector<CCValAssign, 16> ArgLocs;
3282   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3283                  *DAG.getContext());
3284 
3285   // Reserve space for the linkage area on the stack.
3286   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3287   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
3288   if (useSoftFloat())
3289     CCInfo.PreAnalyzeFormalArguments(Ins);
3290 
3291   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3292   CCInfo.clearWasPPCF128();
3293 
3294   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3295     CCValAssign &VA = ArgLocs[i];
3296 
3297     // Arguments stored in registers.
3298     if (VA.isRegLoc()) {
3299       const TargetRegisterClass *RC;
3300       EVT ValVT = VA.getValVT();
3301 
3302       switch (ValVT.getSimpleVT().SimpleTy) {
3303         default:
3304           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3305         case MVT::i1:
3306         case MVT::i32:
3307           RC = &PPC::GPRCRegClass;
3308           break;
3309         case MVT::f32:
3310           if (Subtarget.hasP8Vector())
3311             RC = &PPC::VSSRCRegClass;
3312           else
3313             RC = &PPC::F4RCRegClass;
3314           break;
3315         case MVT::f64:
3316           if (Subtarget.hasVSX())
3317             RC = &PPC::VSFRCRegClass;
3318           else
3319             RC = &PPC::F8RCRegClass;
3320           break;
3321         case MVT::v16i8:
3322         case MVT::v8i16:
3323         case MVT::v4i32:
3324           RC = &PPC::VRRCRegClass;
3325           break;
3326         case MVT::v4f32:
3327           RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3328           break;
3329         case MVT::v2f64:
3330         case MVT::v2i64:
3331           RC = &PPC::VRRCRegClass;
3332           break;
3333         case MVT::v4f64:
3334           RC = &PPC::QFRCRegClass;
3335           break;
3336         case MVT::v4i1:
3337           RC = &PPC::QBRCRegClass;
3338           break;
3339       }
3340 
3341       // Transform the arguments stored in physical registers into virtual ones.
3342       unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3343       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3344                                             ValVT == MVT::i1 ? MVT::i32 : ValVT);
3345 
3346       if (ValVT == MVT::i1)
3347         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3348 
3349       InVals.push_back(ArgValue);
3350     } else {
3351       // Argument stored in memory.
3352       assert(VA.isMemLoc());
3353 
3354       unsigned ArgSize = VA.getLocVT().getStoreSize();
3355       int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(),
3356                                      isImmutable);
3357 
3358       // Create load nodes to retrieve arguments from the stack.
3359       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3360       InVals.push_back(
3361           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3362     }
3363   }
3364 
3365   // Assign locations to all of the incoming aggregate by value arguments.
3366   // Aggregates passed by value are stored in the local variable space of the
3367   // caller's stack frame, right above the parameter list area.
3368   SmallVector<CCValAssign, 16> ByValArgLocs;
3369   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3370                       ByValArgLocs, *DAG.getContext());
3371 
3372   // Reserve stack space for the allocations in CCInfo.
3373   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3374 
3375   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3376 
3377   // Area that is at least reserved in the caller of this function.
3378   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3379   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3380 
3381   // Set the size that is at least reserved in caller of this function.  Tail
3382   // call optimized function's reserved stack space needs to be aligned so that
3383   // taking the difference between two stack areas will result in an aligned
3384   // stack.
3385   MinReservedArea =
3386       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3387   FuncInfo->setMinReservedArea(MinReservedArea);
3388 
3389   SmallVector<SDValue, 8> MemOps;
3390 
3391   // If the function takes variable number of arguments, make a frame index for
3392   // the start of the first vararg value... for expansion of llvm.va_start.
3393   if (isVarArg) {
3394     static const MCPhysReg GPArgRegs[] = {
3395       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3396       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3397     };
3398     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3399 
3400     static const MCPhysReg FPArgRegs[] = {
3401       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3402       PPC::F8
3403     };
3404     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3405 
3406     if (useSoftFloat())
3407        NumFPArgRegs = 0;
3408 
3409     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3410     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3411 
3412     // Make room for NumGPArgRegs and NumFPArgRegs.
3413     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3414                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3415 
3416     FuncInfo->setVarArgsStackOffset(
3417       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3418                             CCInfo.getNextStackOffset(), true));
3419 
3420     FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false));
3421     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3422 
3423     // The fixed integer arguments of a variadic function are stored to the
3424     // VarArgsFrameIndex on the stack so that they may be loaded by
3425     // dereferencing the result of va_next.
3426     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3427       // Get an existing live-in vreg, or add a new one.
3428       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3429       if (!VReg)
3430         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3431 
3432       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3433       SDValue Store =
3434           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3435       MemOps.push_back(Store);
3436       // Increment the address by four for the next argument to store
3437       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3438       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3439     }
3440 
3441     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3442     // is set.
3443     // The double arguments are stored to the VarArgsFrameIndex
3444     // on the stack.
3445     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3446       // Get an existing live-in vreg, or add a new one.
3447       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3448       if (!VReg)
3449         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3450 
3451       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3452       SDValue Store =
3453           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3454       MemOps.push_back(Store);
3455       // Increment the address by eight for the next argument to store
3456       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3457                                          PtrVT);
3458       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3459     }
3460   }
3461 
3462   if (!MemOps.empty())
3463     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3464 
3465   return Chain;
3466 }
3467 
3468 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3469 // value to MVT::i64 and then truncate to the correct register size.
3470 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3471                                              EVT ObjectVT, SelectionDAG &DAG,
3472                                              SDValue ArgVal,
3473                                              const SDLoc &dl) const {
3474   if (Flags.isSExt())
3475     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3476                          DAG.getValueType(ObjectVT));
3477   else if (Flags.isZExt())
3478     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3479                          DAG.getValueType(ObjectVT));
3480 
3481   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3482 }
3483 
3484 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3485     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3486     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3487     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3488   // TODO: add description of PPC stack frame format, or at least some docs.
3489   //
3490   bool isELFv2ABI = Subtarget.isELFv2ABI();
3491   bool isLittleEndian = Subtarget.isLittleEndian();
3492   MachineFunction &MF = DAG.getMachineFunction();
3493   MachineFrameInfo &MFI = MF.getFrameInfo();
3494   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3495 
3496   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3497          "fastcc not supported on varargs functions");
3498 
3499   EVT PtrVT = getPointerTy(MF.getDataLayout());
3500   // Potential tail calls could cause overwriting of argument stack slots.
3501   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3502                        (CallConv == CallingConv::Fast));
3503   unsigned PtrByteSize = 8;
3504   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3505 
3506   static const MCPhysReg GPR[] = {
3507     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3508     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3509   };
3510   static const MCPhysReg VR[] = {
3511     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3512     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3513   };
3514 
3515   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3516   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3517   const unsigned Num_VR_Regs  = array_lengthof(VR);
3518   const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3519 
3520   // Do a first pass over the arguments to determine whether the ABI
3521   // guarantees that our caller has allocated the parameter save area
3522   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3523   // in the ELFv2 ABI, it is true if this is a vararg function or if
3524   // any parameter is located in a stack slot.
3525 
3526   bool HasParameterArea = !isELFv2ABI || isVarArg;
3527   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3528   unsigned NumBytes = LinkageSize;
3529   unsigned AvailableFPRs = Num_FPR_Regs;
3530   unsigned AvailableVRs = Num_VR_Regs;
3531   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3532     if (Ins[i].Flags.isNest())
3533       continue;
3534 
3535     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3536                                PtrByteSize, LinkageSize, ParamAreaSize,
3537                                NumBytes, AvailableFPRs, AvailableVRs,
3538                                Subtarget.hasQPX()))
3539       HasParameterArea = true;
3540   }
3541 
3542   // Add DAG nodes to load the arguments or copy them out of registers.  On
3543   // entry to a function on PPC, the arguments start after the linkage area,
3544   // although the first ones are often in registers.
3545 
3546   unsigned ArgOffset = LinkageSize;
3547   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3548   unsigned &QFPR_idx = FPR_idx;
3549   SmallVector<SDValue, 8> MemOps;
3550   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3551   unsigned CurArgIdx = 0;
3552   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3553     SDValue ArgVal;
3554     bool needsLoad = false;
3555     EVT ObjectVT = Ins[ArgNo].VT;
3556     EVT OrigVT = Ins[ArgNo].ArgVT;
3557     unsigned ObjSize = ObjectVT.getStoreSize();
3558     unsigned ArgSize = ObjSize;
3559     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3560     if (Ins[ArgNo].isOrigArg()) {
3561       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3562       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3563     }
3564     // We re-align the argument offset for each argument, except when using the
3565     // fast calling convention, when we need to make sure we do that only when
3566     // we'll actually use a stack slot.
3567     unsigned CurArgOffset, Align;
3568     auto ComputeArgOffset = [&]() {
3569       /* Respect alignment of argument on the stack.  */
3570       Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3571       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3572       CurArgOffset = ArgOffset;
3573     };
3574 
3575     if (CallConv != CallingConv::Fast) {
3576       ComputeArgOffset();
3577 
3578       /* Compute GPR index associated with argument offset.  */
3579       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3580       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3581     }
3582 
3583     // FIXME the codegen can be much improved in some cases.
3584     // We do not have to keep everything in memory.
3585     if (Flags.isByVal()) {
3586       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3587 
3588       if (CallConv == CallingConv::Fast)
3589         ComputeArgOffset();
3590 
3591       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3592       ObjSize = Flags.getByValSize();
3593       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3594       // Empty aggregate parameters do not take up registers.  Examples:
3595       //   struct { } a;
3596       //   union  { } b;
3597       //   int c[0];
3598       // etc.  However, we have to provide a place-holder in InVals, so
3599       // pretend we have an 8-byte item at the current address for that
3600       // purpose.
3601       if (!ObjSize) {
3602         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3603         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3604         InVals.push_back(FIN);
3605         continue;
3606       }
3607 
3608       // Create a stack object covering all stack doublewords occupied
3609       // by the argument.  If the argument is (fully or partially) on
3610       // the stack, or if the argument is fully in registers but the
3611       // caller has allocated the parameter save anyway, we can refer
3612       // directly to the caller's stack frame.  Otherwise, create a
3613       // local copy in our own frame.
3614       int FI;
3615       if (HasParameterArea ||
3616           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3617         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3618       else
3619         FI = MFI.CreateStackObject(ArgSize, Align, false);
3620       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3621 
3622       // Handle aggregates smaller than 8 bytes.
3623       if (ObjSize < PtrByteSize) {
3624         // The value of the object is its address, which differs from the
3625         // address of the enclosing doubleword on big-endian systems.
3626         SDValue Arg = FIN;
3627         if (!isLittleEndian) {
3628           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3629           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3630         }
3631         InVals.push_back(Arg);
3632 
3633         if (GPR_idx != Num_GPR_Regs) {
3634           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3635           FuncInfo->addLiveInAttr(VReg, Flags);
3636           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3637           SDValue Store;
3638 
3639           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3640             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3641                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
3642             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
3643                                       MachinePointerInfo(&*FuncArg), ObjType);
3644           } else {
3645             // For sizes that don't fit a truncating store (3, 5, 6, 7),
3646             // store the whole register as-is to the parameter save area
3647             // slot.
3648             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3649                                  MachinePointerInfo(&*FuncArg));
3650           }
3651 
3652           MemOps.push_back(Store);
3653         }
3654         // Whether we copied from a register or not, advance the offset
3655         // into the parameter save area by a full doubleword.
3656         ArgOffset += PtrByteSize;
3657         continue;
3658       }
3659 
3660       // The value of the object is its address, which is the address of
3661       // its first stack doubleword.
3662       InVals.push_back(FIN);
3663 
3664       // Store whatever pieces of the object are in registers to memory.
3665       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3666         if (GPR_idx == Num_GPR_Regs)
3667           break;
3668 
3669         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3670         FuncInfo->addLiveInAttr(VReg, Flags);
3671         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3672         SDValue Addr = FIN;
3673         if (j) {
3674           SDValue Off = DAG.getConstant(j, dl, PtrVT);
3675           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
3676         }
3677         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
3678                                      MachinePointerInfo(&*FuncArg, j));
3679         MemOps.push_back(Store);
3680         ++GPR_idx;
3681       }
3682       ArgOffset += ArgSize;
3683       continue;
3684     }
3685 
3686     switch (ObjectVT.getSimpleVT().SimpleTy) {
3687     default: llvm_unreachable("Unhandled argument type!");
3688     case MVT::i1:
3689     case MVT::i32:
3690     case MVT::i64:
3691       if (Flags.isNest()) {
3692         // The 'nest' parameter, if any, is passed in R11.
3693         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3694         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3695 
3696         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3697           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3698 
3699         break;
3700       }
3701 
3702       // These can be scalar arguments or elements of an integer array type
3703       // passed directly.  Clang may use those instead of "byval" aggregate
3704       // types to avoid forcing arguments to memory unnecessarily.
3705       if (GPR_idx != Num_GPR_Regs) {
3706         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3707         FuncInfo->addLiveInAttr(VReg, Flags);
3708         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3709 
3710         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3711           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3712           // value to MVT::i64 and then truncate to the correct register size.
3713           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3714       } else {
3715         if (CallConv == CallingConv::Fast)
3716           ComputeArgOffset();
3717 
3718         needsLoad = true;
3719         ArgSize = PtrByteSize;
3720       }
3721       if (CallConv != CallingConv::Fast || needsLoad)
3722         ArgOffset += 8;
3723       break;
3724 
3725     case MVT::f32:
3726     case MVT::f64:
3727       // These can be scalar arguments or elements of a float array type
3728       // passed directly.  The latter are used to implement ELFv2 homogenous
3729       // float aggregates.
3730       if (FPR_idx != Num_FPR_Regs) {
3731         unsigned VReg;
3732 
3733         if (ObjectVT == MVT::f32)
3734           VReg = MF.addLiveIn(FPR[FPR_idx],
3735                               Subtarget.hasP8Vector()
3736                                   ? &PPC::VSSRCRegClass
3737                                   : &PPC::F4RCRegClass);
3738         else
3739           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
3740                                                 ? &PPC::VSFRCRegClass
3741                                                 : &PPC::F8RCRegClass);
3742 
3743         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3744         ++FPR_idx;
3745       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
3746         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
3747         // once we support fp <-> gpr moves.
3748 
3749         // This can only ever happen in the presence of f32 array types,
3750         // since otherwise we never run out of FPRs before running out
3751         // of GPRs.
3752         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3753         FuncInfo->addLiveInAttr(VReg, Flags);
3754         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3755 
3756         if (ObjectVT == MVT::f32) {
3757           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
3758             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
3759                                  DAG.getConstant(32, dl, MVT::i32));
3760           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
3761         }
3762 
3763         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
3764       } else {
3765         if (CallConv == CallingConv::Fast)
3766           ComputeArgOffset();
3767 
3768         needsLoad = true;
3769       }
3770 
3771       // When passing an array of floats, the array occupies consecutive
3772       // space in the argument area; only round up to the next doubleword
3773       // at the end of the array.  Otherwise, each float takes 8 bytes.
3774       if (CallConv != CallingConv::Fast || needsLoad) {
3775         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
3776         ArgOffset += ArgSize;
3777         if (Flags.isInConsecutiveRegsLast())
3778           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3779       }
3780       break;
3781     case MVT::v4f32:
3782     case MVT::v4i32:
3783     case MVT::v8i16:
3784     case MVT::v16i8:
3785     case MVT::v2f64:
3786     case MVT::v2i64:
3787     case MVT::v1i128:
3788       if (!Subtarget.hasQPX()) {
3789       // These can be scalar arguments or elements of a vector array type
3790       // passed directly.  The latter are used to implement ELFv2 homogenous
3791       // vector aggregates.
3792       if (VR_idx != Num_VR_Regs) {
3793         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3794         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3795         ++VR_idx;
3796       } else {
3797         if (CallConv == CallingConv::Fast)
3798           ComputeArgOffset();
3799 
3800         needsLoad = true;
3801       }
3802       if (CallConv != CallingConv::Fast || needsLoad)
3803         ArgOffset += 16;
3804       break;
3805       } // not QPX
3806 
3807       assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
3808              "Invalid QPX parameter type");
3809       /* fall through */
3810 
3811     case MVT::v4f64:
3812     case MVT::v4i1:
3813       // QPX vectors are treated like their scalar floating-point subregisters
3814       // (except that they're larger).
3815       unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
3816       if (QFPR_idx != Num_QFPR_Regs) {
3817         const TargetRegisterClass *RC;
3818         switch (ObjectVT.getSimpleVT().SimpleTy) {
3819         case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
3820         case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
3821         default:         RC = &PPC::QBRCRegClass; break;
3822         }
3823 
3824         unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
3825         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3826         ++QFPR_idx;
3827       } else {
3828         if (CallConv == CallingConv::Fast)
3829           ComputeArgOffset();
3830         needsLoad = true;
3831       }
3832       if (CallConv != CallingConv::Fast || needsLoad)
3833         ArgOffset += Sz;
3834       break;
3835     }
3836 
3837     // We need to load the argument to a virtual register if we determined
3838     // above that we ran out of physical registers of the appropriate type.
3839     if (needsLoad) {
3840       if (ObjSize < ArgSize && !isLittleEndian)
3841         CurArgOffset += ArgSize - ObjSize;
3842       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
3843       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3844       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
3845     }
3846 
3847     InVals.push_back(ArgVal);
3848   }
3849 
3850   // Area that is at least reserved in the caller of this function.
3851   unsigned MinReservedArea;
3852   if (HasParameterArea)
3853     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
3854   else
3855     MinReservedArea = LinkageSize;
3856 
3857   // Set the size that is at least reserved in caller of this function.  Tail
3858   // call optimized functions' reserved stack space needs to be aligned so that
3859   // taking the difference between two stack areas will result in an aligned
3860   // stack.
3861   MinReservedArea =
3862       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3863   FuncInfo->setMinReservedArea(MinReservedArea);
3864 
3865   // If the function takes variable number of arguments, make a frame index for
3866   // the start of the first vararg value... for expansion of llvm.va_start.
3867   if (isVarArg) {
3868     int Depth = ArgOffset;
3869 
3870     FuncInfo->setVarArgsFrameIndex(
3871       MFI.CreateFixedObject(PtrByteSize, Depth, true));
3872     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3873 
3874     // If this function is vararg, store any remaining integer argument regs
3875     // to their spots on the stack so that they may be loaded by dereferencing
3876     // the result of va_next.
3877     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3878          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
3879       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3880       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3881       SDValue Store =
3882           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3883       MemOps.push_back(Store);
3884       // Increment the address by four for the next argument to store
3885       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
3886       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3887     }
3888   }
3889 
3890   if (!MemOps.empty())
3891     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3892 
3893   return Chain;
3894 }
3895 
3896 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
3897     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3898     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3899     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3900   // TODO: add description of PPC stack frame format, or at least some docs.
3901   //
3902   MachineFunction &MF = DAG.getMachineFunction();
3903   MachineFrameInfo &MFI = MF.getFrameInfo();
3904   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3905 
3906   EVT PtrVT = getPointerTy(MF.getDataLayout());
3907   bool isPPC64 = PtrVT == MVT::i64;
3908   // Potential tail calls could cause overwriting of argument stack slots.
3909   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3910                        (CallConv == CallingConv::Fast));
3911   unsigned PtrByteSize = isPPC64 ? 8 : 4;
3912   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3913   unsigned ArgOffset = LinkageSize;
3914   // Area that is at least reserved in caller of this function.
3915   unsigned MinReservedArea = ArgOffset;
3916 
3917   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
3918     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3919     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3920   };
3921   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
3922     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3923     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3924   };
3925   static const MCPhysReg VR[] = {
3926     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3927     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3928   };
3929 
3930   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
3931   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3932   const unsigned Num_VR_Regs  = array_lengthof( VR);
3933 
3934   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3935 
3936   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
3937 
3938   // In 32-bit non-varargs functions, the stack space for vectors is after the
3939   // stack space for non-vectors.  We do not use this space unless we have
3940   // too many vectors to fit in registers, something that only occurs in
3941   // constructed examples:), but we have to walk the arglist to figure
3942   // that out...for the pathological case, compute VecArgOffset as the
3943   // start of the vector parameter area.  Computing VecArgOffset is the
3944   // entire point of the following loop.
3945   unsigned VecArgOffset = ArgOffset;
3946   if (!isVarArg && !isPPC64) {
3947     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
3948          ++ArgNo) {
3949       EVT ObjectVT = Ins[ArgNo].VT;
3950       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3951 
3952       if (Flags.isByVal()) {
3953         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
3954         unsigned ObjSize = Flags.getByValSize();
3955         unsigned ArgSize =
3956                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3957         VecArgOffset += ArgSize;
3958         continue;
3959       }
3960 
3961       switch(ObjectVT.getSimpleVT().SimpleTy) {
3962       default: llvm_unreachable("Unhandled argument type!");
3963       case MVT::i1:
3964       case MVT::i32:
3965       case MVT::f32:
3966         VecArgOffset += 4;
3967         break;
3968       case MVT::i64:  // PPC64
3969       case MVT::f64:
3970         // FIXME: We are guaranteed to be !isPPC64 at this point.
3971         // Does MVT::i64 apply?
3972         VecArgOffset += 8;
3973         break;
3974       case MVT::v4f32:
3975       case MVT::v4i32:
3976       case MVT::v8i16:
3977       case MVT::v16i8:
3978         // Nothing to do, we're only looking at Nonvector args here.
3979         break;
3980       }
3981     }
3982   }
3983   // We've found where the vector parameter area in memory is.  Skip the
3984   // first 12 parameters; these don't use that memory.
3985   VecArgOffset = ((VecArgOffset+15)/16)*16;
3986   VecArgOffset += 12*16;
3987 
3988   // Add DAG nodes to load the arguments or copy them out of registers.  On
3989   // entry to a function on PPC, the arguments start after the linkage area,
3990   // although the first ones are often in registers.
3991 
3992   SmallVector<SDValue, 8> MemOps;
3993   unsigned nAltivecParamsAtEnd = 0;
3994   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3995   unsigned CurArgIdx = 0;
3996   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3997     SDValue ArgVal;
3998     bool needsLoad = false;
3999     EVT ObjectVT = Ins[ArgNo].VT;
4000     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4001     unsigned ArgSize = ObjSize;
4002     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4003     if (Ins[ArgNo].isOrigArg()) {
4004       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4005       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4006     }
4007     unsigned CurArgOffset = ArgOffset;
4008 
4009     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4010     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4011         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4012       if (isVarArg || isPPC64) {
4013         MinReservedArea = ((MinReservedArea+15)/16)*16;
4014         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4015                                                   Flags,
4016                                                   PtrByteSize);
4017       } else  nAltivecParamsAtEnd++;
4018     } else
4019       // Calculate min reserved area.
4020       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4021                                                 Flags,
4022                                                 PtrByteSize);
4023 
4024     // FIXME the codegen can be much improved in some cases.
4025     // We do not have to keep everything in memory.
4026     if (Flags.isByVal()) {
4027       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4028 
4029       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4030       ObjSize = Flags.getByValSize();
4031       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4032       // Objects of size 1 and 2 are right justified, everything else is
4033       // left justified.  This means the memory address is adjusted forwards.
4034       if (ObjSize==1 || ObjSize==2) {
4035         CurArgOffset = CurArgOffset + (4 - ObjSize);
4036       }
4037       // The value of the object is its address.
4038       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4039       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4040       InVals.push_back(FIN);
4041       if (ObjSize==1 || ObjSize==2) {
4042         if (GPR_idx != Num_GPR_Regs) {
4043           unsigned VReg;
4044           if (isPPC64)
4045             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4046           else
4047             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4048           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4049           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4050           SDValue Store =
4051               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4052                                 MachinePointerInfo(&*FuncArg), ObjType);
4053           MemOps.push_back(Store);
4054           ++GPR_idx;
4055         }
4056 
4057         ArgOffset += PtrByteSize;
4058 
4059         continue;
4060       }
4061       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4062         // Store whatever pieces of the object are in registers
4063         // to memory.  ArgOffset will be the address of the beginning
4064         // of the object.
4065         if (GPR_idx != Num_GPR_Regs) {
4066           unsigned VReg;
4067           if (isPPC64)
4068             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4069           else
4070             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4071           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4072           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4073           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4074           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4075                                        MachinePointerInfo(&*FuncArg, j));
4076           MemOps.push_back(Store);
4077           ++GPR_idx;
4078           ArgOffset += PtrByteSize;
4079         } else {
4080           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4081           break;
4082         }
4083       }
4084       continue;
4085     }
4086 
4087     switch (ObjectVT.getSimpleVT().SimpleTy) {
4088     default: llvm_unreachable("Unhandled argument type!");
4089     case MVT::i1:
4090     case MVT::i32:
4091       if (!isPPC64) {
4092         if (GPR_idx != Num_GPR_Regs) {
4093           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4094           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4095 
4096           if (ObjectVT == MVT::i1)
4097             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4098 
4099           ++GPR_idx;
4100         } else {
4101           needsLoad = true;
4102           ArgSize = PtrByteSize;
4103         }
4104         // All int arguments reserve stack space in the Darwin ABI.
4105         ArgOffset += PtrByteSize;
4106         break;
4107       }
4108       LLVM_FALLTHROUGH;
4109     case MVT::i64:  // PPC64
4110       if (GPR_idx != Num_GPR_Regs) {
4111         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4112         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4113 
4114         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4115           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4116           // value to MVT::i64 and then truncate to the correct register size.
4117           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4118 
4119         ++GPR_idx;
4120       } else {
4121         needsLoad = true;
4122         ArgSize = PtrByteSize;
4123       }
4124       // All int arguments reserve stack space in the Darwin ABI.
4125       ArgOffset += 8;
4126       break;
4127 
4128     case MVT::f32:
4129     case MVT::f64:
4130       // Every 4 bytes of argument space consumes one of the GPRs available for
4131       // argument passing.
4132       if (GPR_idx != Num_GPR_Regs) {
4133         ++GPR_idx;
4134         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4135           ++GPR_idx;
4136       }
4137       if (FPR_idx != Num_FPR_Regs) {
4138         unsigned VReg;
4139 
4140         if (ObjectVT == MVT::f32)
4141           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4142         else
4143           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4144 
4145         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4146         ++FPR_idx;
4147       } else {
4148         needsLoad = true;
4149       }
4150 
4151       // All FP arguments reserve stack space in the Darwin ABI.
4152       ArgOffset += isPPC64 ? 8 : ObjSize;
4153       break;
4154     case MVT::v4f32:
4155     case MVT::v4i32:
4156     case MVT::v8i16:
4157     case MVT::v16i8:
4158       // Note that vector arguments in registers don't reserve stack space,
4159       // except in varargs functions.
4160       if (VR_idx != Num_VR_Regs) {
4161         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4162         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4163         if (isVarArg) {
4164           while ((ArgOffset % 16) != 0) {
4165             ArgOffset += PtrByteSize;
4166             if (GPR_idx != Num_GPR_Regs)
4167               GPR_idx++;
4168           }
4169           ArgOffset += 16;
4170           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4171         }
4172         ++VR_idx;
4173       } else {
4174         if (!isVarArg && !isPPC64) {
4175           // Vectors go after all the nonvectors.
4176           CurArgOffset = VecArgOffset;
4177           VecArgOffset += 16;
4178         } else {
4179           // Vectors are aligned.
4180           ArgOffset = ((ArgOffset+15)/16)*16;
4181           CurArgOffset = ArgOffset;
4182           ArgOffset += 16;
4183         }
4184         needsLoad = true;
4185       }
4186       break;
4187     }
4188 
4189     // We need to load the argument to a virtual register if we determined above
4190     // that we ran out of physical registers of the appropriate type.
4191     if (needsLoad) {
4192       int FI = MFI.CreateFixedObject(ObjSize,
4193                                      CurArgOffset + (ArgSize - ObjSize),
4194                                      isImmutable);
4195       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4196       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4197     }
4198 
4199     InVals.push_back(ArgVal);
4200   }
4201 
4202   // Allow for Altivec parameters at the end, if needed.
4203   if (nAltivecParamsAtEnd) {
4204     MinReservedArea = ((MinReservedArea+15)/16)*16;
4205     MinReservedArea += 16*nAltivecParamsAtEnd;
4206   }
4207 
4208   // Area that is at least reserved in the caller of this function.
4209   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4210 
4211   // Set the size that is at least reserved in caller of this function.  Tail
4212   // call optimized functions' reserved stack space needs to be aligned so that
4213   // taking the difference between two stack areas will result in an aligned
4214   // stack.
4215   MinReservedArea =
4216       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4217   FuncInfo->setMinReservedArea(MinReservedArea);
4218 
4219   // If the function takes variable number of arguments, make a frame index for
4220   // the start of the first vararg value... for expansion of llvm.va_start.
4221   if (isVarArg) {
4222     int Depth = ArgOffset;
4223 
4224     FuncInfo->setVarArgsFrameIndex(
4225       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4226                             Depth, true));
4227     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4228 
4229     // If this function is vararg, store any remaining integer argument regs
4230     // to their spots on the stack so that they may be loaded by dereferencing
4231     // the result of va_next.
4232     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4233       unsigned VReg;
4234 
4235       if (isPPC64)
4236         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4237       else
4238         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4239 
4240       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4241       SDValue Store =
4242           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4243       MemOps.push_back(Store);
4244       // Increment the address by four for the next argument to store
4245       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4246       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4247     }
4248   }
4249 
4250   if (!MemOps.empty())
4251     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4252 
4253   return Chain;
4254 }
4255 
4256 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4257 /// adjusted to accommodate the arguments for the tailcall.
4258 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4259                                    unsigned ParamSize) {
4260 
4261   if (!isTailCall) return 0;
4262 
4263   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4264   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4265   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4266   // Remember only if the new adjustement is bigger.
4267   if (SPDiff < FI->getTailCallSPDelta())
4268     FI->setTailCallSPDelta(SPDiff);
4269 
4270   return SPDiff;
4271 }
4272 
4273 static bool isFunctionGlobalAddress(SDValue Callee);
4274 
4275 static bool
4276 callsShareTOCBase(const Function *Caller, SDValue Callee,
4277                     const TargetMachine &TM) {
4278   // If !G, Callee can be an external symbol.
4279   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4280   if (!G)
4281     return false;
4282 
4283   // The medium and large code models are expected to provide a sufficiently
4284   // large TOC to provide all data addressing needs of a module with a
4285   // single TOC. Since each module will be addressed with a single TOC then we
4286   // only need to check that caller and callee don't cross dso boundaries.
4287   if (CodeModel::Medium == TM.getCodeModel() ||
4288       CodeModel::Large == TM.getCodeModel())
4289     return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal());
4290 
4291   // Otherwise we need to ensure callee and caller are in the same section,
4292   // since the linker may allocate multiple TOCs, and we don't know which
4293   // sections will belong to the same TOC base.
4294 
4295   const GlobalValue *GV = G->getGlobal();
4296   if (!GV->isStrongDefinitionForLinker())
4297     return false;
4298 
4299   // Any explicitly-specified sections and section prefixes must also match.
4300   // Also, if we're using -ffunction-sections, then each function is always in
4301   // a different section (the same is true for COMDAT functions).
4302   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4303       GV->getSection() != Caller->getSection())
4304     return false;
4305   if (const auto *F = dyn_cast<Function>(GV)) {
4306     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4307       return false;
4308   }
4309 
4310   // If the callee might be interposed, then we can't assume the ultimate call
4311   // target will be in the same section. Even in cases where we can assume that
4312   // interposition won't happen, in any case where the linker might insert a
4313   // stub to allow for interposition, we must generate code as though
4314   // interposition might occur. To understand why this matters, consider a
4315   // situation where: a -> b -> c where the arrows indicate calls. b and c are
4316   // in the same section, but a is in a different module (i.e. has a different
4317   // TOC base pointer). If the linker allows for interposition between b and c,
4318   // then it will generate a stub for the call edge between b and c which will
4319   // save the TOC pointer into the designated stack slot allocated by b. If we
4320   // return true here, and therefore allow a tail call between b and c, that
4321   // stack slot won't exist and the b -> c stub will end up saving b'c TOC base
4322   // pointer into the stack slot allocated by a (where the a -> b stub saved
4323   // a's TOC base pointer). If we're not considering a tail call, but rather,
4324   // whether a nop is needed after the call instruction in b, because the linker
4325   // will insert a stub, it might complain about a missing nop if we omit it
4326   // (although many don't complain in this case).
4327   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4328     return false;
4329 
4330   return true;
4331 }
4332 
4333 static bool
4334 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4335                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4336   assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64());
4337 
4338   const unsigned PtrByteSize = 8;
4339   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4340 
4341   static const MCPhysReg GPR[] = {
4342     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4343     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4344   };
4345   static const MCPhysReg VR[] = {
4346     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4347     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4348   };
4349 
4350   const unsigned NumGPRs = array_lengthof(GPR);
4351   const unsigned NumFPRs = 13;
4352   const unsigned NumVRs = array_lengthof(VR);
4353   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4354 
4355   unsigned NumBytes = LinkageSize;
4356   unsigned AvailableFPRs = NumFPRs;
4357   unsigned AvailableVRs = NumVRs;
4358 
4359   for (const ISD::OutputArg& Param : Outs) {
4360     if (Param.Flags.isNest()) continue;
4361 
4362     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4363                                PtrByteSize, LinkageSize, ParamAreaSize,
4364                                NumBytes, AvailableFPRs, AvailableVRs,
4365                                Subtarget.hasQPX()))
4366       return true;
4367   }
4368   return false;
4369 }
4370 
4371 static bool
4372 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) {
4373   if (CS.arg_size() != CallerFn->arg_size())
4374     return false;
4375 
4376   ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin();
4377   ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end();
4378   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4379 
4380   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4381     const Value* CalleeArg = *CalleeArgIter;
4382     const Value* CallerArg = &(*CallerArgIter);
4383     if (CalleeArg == CallerArg)
4384       continue;
4385 
4386     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4387     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4388     //      }
4389     // 1st argument of callee is undef and has the same type as caller.
4390     if (CalleeArg->getType() == CallerArg->getType() &&
4391         isa<UndefValue>(CalleeArg))
4392       continue;
4393 
4394     return false;
4395   }
4396 
4397   return true;
4398 }
4399 
4400 // Returns true if TCO is possible between the callers and callees
4401 // calling conventions.
4402 static bool
4403 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4404                                     CallingConv::ID CalleeCC) {
4405   // Tail calls are possible with fastcc and ccc.
4406   auto isTailCallableCC  = [] (CallingConv::ID CC){
4407       return  CC == CallingConv::C || CC == CallingConv::Fast;
4408   };
4409   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4410     return false;
4411 
4412   // We can safely tail call both fastcc and ccc callees from a c calling
4413   // convention caller. If the caller is fastcc, we may have less stack space
4414   // than a non-fastcc caller with the same signature so disable tail-calls in
4415   // that case.
4416   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4417 }
4418 
4419 bool
4420 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4421                                     SDValue Callee,
4422                                     CallingConv::ID CalleeCC,
4423                                     ImmutableCallSite CS,
4424                                     bool isVarArg,
4425                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
4426                                     const SmallVectorImpl<ISD::InputArg> &Ins,
4427                                     SelectionDAG& DAG) const {
4428   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4429 
4430   if (DisableSCO && !TailCallOpt) return false;
4431 
4432   // Variadic argument functions are not supported.
4433   if (isVarArg) return false;
4434 
4435   auto &Caller = DAG.getMachineFunction().getFunction();
4436   // Check that the calling conventions are compatible for tco.
4437   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4438     return false;
4439 
4440   // Caller contains any byval parameter is not supported.
4441   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4442     return false;
4443 
4444   // Callee contains any byval parameter is not supported, too.
4445   // Note: This is a quick work around, because in some cases, e.g.
4446   // caller's stack size > callee's stack size, we are still able to apply
4447   // sibling call optimization. For example, gcc is able to do SCO for caller1
4448   // in the following example, but not for caller2.
4449   //   struct test {
4450   //     long int a;
4451   //     char ary[56];
4452   //   } gTest;
4453   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4454   //     b->a = v.a;
4455   //     return 0;
4456   //   }
4457   //   void caller1(struct test a, struct test c, struct test *b) {
4458   //     callee(gTest, b); }
4459   //   void caller2(struct test *b) { callee(gTest, b); }
4460   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4461     return false;
4462 
4463   // If callee and caller use different calling conventions, we cannot pass
4464   // parameters on stack since offsets for the parameter area may be different.
4465   if (Caller.getCallingConv() != CalleeCC &&
4466       needStackSlotPassParameters(Subtarget, Outs))
4467     return false;
4468 
4469   // No TCO/SCO on indirect call because Caller have to restore its TOC
4470   if (!isFunctionGlobalAddress(Callee) &&
4471       !isa<ExternalSymbolSDNode>(Callee))
4472     return false;
4473 
4474   // If the caller and callee potentially have different TOC bases then we
4475   // cannot tail call since we need to restore the TOC pointer after the call.
4476   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4477   if (!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4478     return false;
4479 
4480   // TCO allows altering callee ABI, so we don't have to check further.
4481   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4482     return true;
4483 
4484   if (DisableSCO) return false;
4485 
4486   // If callee use the same argument list that caller is using, then we can
4487   // apply SCO on this case. If it is not, then we need to check if callee needs
4488   // stack for passing arguments.
4489   if (!hasSameArgumentList(&Caller, CS) &&
4490       needStackSlotPassParameters(Subtarget, Outs)) {
4491     return false;
4492   }
4493 
4494   return true;
4495 }
4496 
4497 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4498 /// for tail call optimization. Targets which want to do tail call
4499 /// optimization should implement this function.
4500 bool
4501 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4502                                                      CallingConv::ID CalleeCC,
4503                                                      bool isVarArg,
4504                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4505                                                      SelectionDAG& DAG) const {
4506   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4507     return false;
4508 
4509   // Variable argument functions are not supported.
4510   if (isVarArg)
4511     return false;
4512 
4513   MachineFunction &MF = DAG.getMachineFunction();
4514   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4515   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4516     // Functions containing by val parameters are not supported.
4517     for (unsigned i = 0; i != Ins.size(); i++) {
4518        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4519        if (Flags.isByVal()) return false;
4520     }
4521 
4522     // Non-PIC/GOT tail calls are supported.
4523     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4524       return true;
4525 
4526     // At the moment we can only do local tail calls (in same module, hidden
4527     // or protected) if we are generating PIC.
4528     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4529       return G->getGlobal()->hasHiddenVisibility()
4530           || G->getGlobal()->hasProtectedVisibility();
4531   }
4532 
4533   return false;
4534 }
4535 
4536 /// isCallCompatibleAddress - Return the immediate to use if the specified
4537 /// 32-bit value is representable in the immediate field of a BxA instruction.
4538 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4539   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4540   if (!C) return nullptr;
4541 
4542   int Addr = C->getZExtValue();
4543   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4544       SignExtend32<26>(Addr) != Addr)
4545     return nullptr;  // Top 6 bits have to be sext of immediate.
4546 
4547   return DAG
4548       .getConstant(
4549           (int)C->getZExtValue() >> 2, SDLoc(Op),
4550           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4551       .getNode();
4552 }
4553 
4554 namespace {
4555 
4556 struct TailCallArgumentInfo {
4557   SDValue Arg;
4558   SDValue FrameIdxOp;
4559   int FrameIdx = 0;
4560 
4561   TailCallArgumentInfo() = default;
4562 };
4563 
4564 } // end anonymous namespace
4565 
4566 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4567 static void StoreTailCallArgumentsToStackSlot(
4568     SelectionDAG &DAG, SDValue Chain,
4569     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4570     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4571   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4572     SDValue Arg = TailCallArgs[i].Arg;
4573     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4574     int FI = TailCallArgs[i].FrameIdx;
4575     // Store relative to framepointer.
4576     MemOpChains.push_back(DAG.getStore(
4577         Chain, dl, Arg, FIN,
4578         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4579   }
4580 }
4581 
4582 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4583 /// the appropriate stack slot for the tail call optimized function call.
4584 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4585                                              SDValue OldRetAddr, SDValue OldFP,
4586                                              int SPDiff, const SDLoc &dl) {
4587   if (SPDiff) {
4588     // Calculate the new stack slot for the return address.
4589     MachineFunction &MF = DAG.getMachineFunction();
4590     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4591     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4592     bool isPPC64 = Subtarget.isPPC64();
4593     int SlotSize = isPPC64 ? 8 : 4;
4594     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4595     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4596                                                          NewRetAddrLoc, true);
4597     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4598     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4599     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4600                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4601 
4602     // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
4603     // slot as the FP is never overwritten.
4604     if (Subtarget.isDarwinABI()) {
4605       int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
4606       int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc,
4607                                                          true);
4608       SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
4609       Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
4610                            MachinePointerInfo::getFixedStack(
4611                                DAG.getMachineFunction(), NewFPIdx));
4612     }
4613   }
4614   return Chain;
4615 }
4616 
4617 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4618 /// the position of the argument.
4619 static void
4620 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4621                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4622                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4623   int Offset = ArgOffset + SPDiff;
4624   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4625   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4626   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4627   SDValue FIN = DAG.getFrameIndex(FI, VT);
4628   TailCallArgumentInfo Info;
4629   Info.Arg = Arg;
4630   Info.FrameIdxOp = FIN;
4631   Info.FrameIdx = FI;
4632   TailCallArguments.push_back(Info);
4633 }
4634 
4635 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4636 /// stack slot. Returns the chain as result and the loaded frame pointers in
4637 /// LROpOut/FPOpout. Used when tail calling.
4638 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4639     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4640     SDValue &FPOpOut, const SDLoc &dl) const {
4641   if (SPDiff) {
4642     // Load the LR and FP stack slot for later adjusting.
4643     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4644     LROpOut = getReturnAddrFrameIndex(DAG);
4645     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4646     Chain = SDValue(LROpOut.getNode(), 1);
4647 
4648     // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
4649     // slot as the FP is never overwritten.
4650     if (Subtarget.isDarwinABI()) {
4651       FPOpOut = getFramePointerFrameIndex(DAG);
4652       FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo());
4653       Chain = SDValue(FPOpOut.getNode(), 1);
4654     }
4655   }
4656   return Chain;
4657 }
4658 
4659 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4660 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4661 /// specified by the specific parameter attribute. The copy will be passed as
4662 /// a byval function parameter.
4663 /// Sometimes what we are copying is the end of a larger object, the part that
4664 /// does not fit in registers.
4665 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4666                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4667                                          SelectionDAG &DAG, const SDLoc &dl) {
4668   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4669   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
4670                        false, false, false, MachinePointerInfo(),
4671                        MachinePointerInfo());
4672 }
4673 
4674 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4675 /// tail calls.
4676 static void LowerMemOpCallTo(
4677     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4678     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4679     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4680     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4681   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4682   if (!isTailCall) {
4683     if (isVector) {
4684       SDValue StackPtr;
4685       if (isPPC64)
4686         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4687       else
4688         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4689       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4690                            DAG.getConstant(ArgOffset, dl, PtrVT));
4691     }
4692     MemOpChains.push_back(
4693         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4694     // Calculate and remember argument location.
4695   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4696                                   TailCallArguments);
4697 }
4698 
4699 static void
4700 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4701                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4702                 SDValue FPOp,
4703                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4704   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4705   // might overwrite each other in case of tail call optimization.
4706   SmallVector<SDValue, 8> MemOpChains2;
4707   // Do not flag preceding copytoreg stuff together with the following stuff.
4708   InFlag = SDValue();
4709   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4710                                     MemOpChains2, dl);
4711   if (!MemOpChains2.empty())
4712     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4713 
4714   // Store the return address to the appropriate stack slot.
4715   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4716 
4717   // Emit callseq_end just before tailcall node.
4718   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4719                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4720   InFlag = Chain.getValue(1);
4721 }
4722 
4723 // Is this global address that of a function that can be called by name? (as
4724 // opposed to something that must hold a descriptor for an indirect call).
4725 static bool isFunctionGlobalAddress(SDValue Callee) {
4726   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4727     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4728         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4729       return false;
4730 
4731     return G->getGlobal()->getValueType()->isFunctionTy();
4732   }
4733 
4734   return false;
4735 }
4736 
4737 static unsigned
4738 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain,
4739             SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall,
4740             bool isPatchPoint, bool hasNest,
4741             SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
4742             SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
4743             ImmutableCallSite CS, const PPCSubtarget &Subtarget) {
4744   bool isPPC64 = Subtarget.isPPC64();
4745   bool isSVR4ABI = Subtarget.isSVR4ABI();
4746   bool isELFv2ABI = Subtarget.isELFv2ABI();
4747 
4748   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4749   NodeTys.push_back(MVT::Other);   // Returns a chain
4750   NodeTys.push_back(MVT::Glue);    // Returns a flag for retval copy to use.
4751 
4752   unsigned CallOpc = PPCISD::CALL;
4753 
4754   bool needIndirectCall = true;
4755   if (!isSVR4ABI || !isPPC64)
4756     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
4757       // If this is an absolute destination address, use the munged value.
4758       Callee = SDValue(Dest, 0);
4759       needIndirectCall = false;
4760     }
4761 
4762   // PC-relative references to external symbols should go through $stub, unless
4763   // we're building with the leopard linker or later, which automatically
4764   // synthesizes these stubs.
4765   const TargetMachine &TM = DAG.getTarget();
4766   const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
4767   const GlobalValue *GV = nullptr;
4768   if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee))
4769     GV = G->getGlobal();
4770   bool Local = TM.shouldAssumeDSOLocal(*Mod, GV);
4771   bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64;
4772 
4773   if (isFunctionGlobalAddress(Callee)) {
4774     GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
4775     // A call to a TLS address is actually an indirect call to a
4776     // thread-specific pointer.
4777     unsigned OpFlags = 0;
4778     if (UsePlt)
4779       OpFlags = PPCII::MO_PLT;
4780 
4781     // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
4782     // every direct call is) turn it into a TargetGlobalAddress /
4783     // TargetExternalSymbol node so that legalize doesn't hack it.
4784     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
4785                                         Callee.getValueType(), 0, OpFlags);
4786     needIndirectCall = false;
4787   }
4788 
4789   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
4790     unsigned char OpFlags = 0;
4791 
4792     if (UsePlt)
4793       OpFlags = PPCII::MO_PLT;
4794 
4795     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
4796                                          OpFlags);
4797     needIndirectCall = false;
4798   }
4799 
4800   if (isPatchPoint) {
4801     // We'll form an invalid direct call when lowering a patchpoint; the full
4802     // sequence for an indirect call is complicated, and many of the
4803     // instructions introduced might have side effects (and, thus, can't be
4804     // removed later). The call itself will be removed as soon as the
4805     // argument/return lowering is complete, so the fact that it has the wrong
4806     // kind of operands should not really matter.
4807     needIndirectCall = false;
4808   }
4809 
4810   if (needIndirectCall) {
4811     // Otherwise, this is an indirect call.  We have to use a MTCTR/BCTRL pair
4812     // to do the call, we can't use PPCISD::CALL.
4813     SDValue MTCTROps[] = {Chain, Callee, InFlag};
4814 
4815     if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
4816       // Function pointers in the 64-bit SVR4 ABI do not point to the function
4817       // entry point, but to the function descriptor (the function entry point
4818       // address is part of the function descriptor though).
4819       // The function descriptor is a three doubleword structure with the
4820       // following fields: function entry point, TOC base address and
4821       // environment pointer.
4822       // Thus for a call through a function pointer, the following actions need
4823       // to be performed:
4824       //   1. Save the TOC of the caller in the TOC save area of its stack
4825       //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
4826       //   2. Load the address of the function entry point from the function
4827       //      descriptor.
4828       //   3. Load the TOC of the callee from the function descriptor into r2.
4829       //   4. Load the environment pointer from the function descriptor into
4830       //      r11.
4831       //   5. Branch to the function entry point address.
4832       //   6. On return of the callee, the TOC of the caller needs to be
4833       //      restored (this is done in FinishCall()).
4834       //
4835       // The loads are scheduled at the beginning of the call sequence, and the
4836       // register copies are flagged together to ensure that no other
4837       // operations can be scheduled in between. E.g. without flagging the
4838       // copies together, a TOC access in the caller could be scheduled between
4839       // the assignment of the callee TOC and the branch to the callee, which
4840       // results in the TOC access going through the TOC of the callee instead
4841       // of going through the TOC of the caller, which leads to incorrect code.
4842 
4843       // Load the address of the function entry point from the function
4844       // descriptor.
4845       SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1);
4846       if (LDChain.getValueType() == MVT::Glue)
4847         LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2);
4848 
4849       auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
4850                           ? (MachineMemOperand::MODereferenceable |
4851                              MachineMemOperand::MOInvariant)
4852                           : MachineMemOperand::MONone;
4853 
4854       MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr);
4855       SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI,
4856                                         /* Alignment = */ 8, MMOFlags);
4857 
4858       // Load environment pointer into r11.
4859       SDValue PtrOff = DAG.getIntPtrConstant(16, dl);
4860       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
4861       SDValue LoadEnvPtr =
4862           DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16),
4863                       /* Alignment = */ 8, MMOFlags);
4864 
4865       SDValue TOCOff = DAG.getIntPtrConstant(8, dl);
4866       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
4867       SDValue TOCPtr =
4868           DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8),
4869                       /* Alignment = */ 8, MMOFlags);
4870 
4871       setUsesTOCBasePtr(DAG);
4872       SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr,
4873                                         InFlag);
4874       Chain = TOCVal.getValue(0);
4875       InFlag = TOCVal.getValue(1);
4876 
4877       // If the function call has an explicit 'nest' parameter, it takes the
4878       // place of the environment pointer.
4879       if (!hasNest) {
4880         SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
4881                                           InFlag);
4882 
4883         Chain = EnvVal.getValue(0);
4884         InFlag = EnvVal.getValue(1);
4885       }
4886 
4887       MTCTROps[0] = Chain;
4888       MTCTROps[1] = LoadFuncPtr;
4889       MTCTROps[2] = InFlag;
4890     }
4891 
4892     Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys,
4893                         makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
4894     InFlag = Chain.getValue(1);
4895 
4896     NodeTys.clear();
4897     NodeTys.push_back(MVT::Other);
4898     NodeTys.push_back(MVT::Glue);
4899     Ops.push_back(Chain);
4900     CallOpc = PPCISD::BCTRL;
4901     Callee.setNode(nullptr);
4902     // Add use of X11 (holding environment pointer)
4903     if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest)
4904       Ops.push_back(DAG.getRegister(PPC::X11, PtrVT));
4905     // Add CTR register as callee so a bctr can be emitted later.
4906     if (isTailCall)
4907       Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT));
4908   }
4909 
4910   // If this is a direct call, pass the chain and the callee.
4911   if (Callee.getNode()) {
4912     Ops.push_back(Chain);
4913     Ops.push_back(Callee);
4914   }
4915   // If this is a tail call add stack pointer delta.
4916   if (isTailCall)
4917     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
4918 
4919   // Add argument registers to the end of the list so that they are known live
4920   // into the call.
4921   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4922     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4923                                   RegsToPass[i].second.getValueType()));
4924 
4925   // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live
4926   // into the call.
4927   if (isSVR4ABI && isPPC64 && !isPatchPoint) {
4928     setUsesTOCBasePtr(DAG);
4929     Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
4930   }
4931 
4932   return CallOpc;
4933 }
4934 
4935 SDValue PPCTargetLowering::LowerCallResult(
4936     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
4937     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4938     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4939   SmallVector<CCValAssign, 16> RVLocs;
4940   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
4941                     *DAG.getContext());
4942 
4943   CCRetInfo.AnalyzeCallResult(
4944       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
4945                ? RetCC_PPC_Cold
4946                : RetCC_PPC);
4947 
4948   // Copy all of the result registers out of their specified physreg.
4949   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4950     CCValAssign &VA = RVLocs[i];
4951     assert(VA.isRegLoc() && "Can only return in registers!");
4952 
4953     SDValue Val = DAG.getCopyFromReg(Chain, dl,
4954                                      VA.getLocReg(), VA.getLocVT(), InFlag);
4955     Chain = Val.getValue(1);
4956     InFlag = Val.getValue(2);
4957 
4958     switch (VA.getLocInfo()) {
4959     default: llvm_unreachable("Unknown loc info!");
4960     case CCValAssign::Full: break;
4961     case CCValAssign::AExt:
4962       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4963       break;
4964     case CCValAssign::ZExt:
4965       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
4966                         DAG.getValueType(VA.getValVT()));
4967       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4968       break;
4969     case CCValAssign::SExt:
4970       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
4971                         DAG.getValueType(VA.getValVT()));
4972       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4973       break;
4974     }
4975 
4976     InVals.push_back(Val);
4977   }
4978 
4979   return Chain;
4980 }
4981 
4982 SDValue PPCTargetLowering::FinishCall(
4983     CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg,
4984     bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
4985     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag,
4986     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
4987     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
4988     SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const {
4989   std::vector<EVT> NodeTys;
4990   SmallVector<SDValue, 8> Ops;
4991   unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
4992                                  SPDiff, isTailCall, isPatchPoint, hasNest,
4993                                  RegsToPass, Ops, NodeTys, CS, Subtarget);
4994 
4995   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
4996   if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
4997     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
4998 
4999   // When performing tail call optimization the callee pops its arguments off
5000   // the stack. Account for this here so these bytes can be pushed back on in
5001   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5002   int BytesCalleePops =
5003     (CallConv == CallingConv::Fast &&
5004      getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
5005 
5006   // Add a register mask operand representing the call-preserved registers.
5007   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5008   const uint32_t *Mask =
5009       TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
5010   assert(Mask && "Missing call preserved mask for calling convention");
5011   Ops.push_back(DAG.getRegisterMask(Mask));
5012 
5013   if (InFlag.getNode())
5014     Ops.push_back(InFlag);
5015 
5016   // Emit tail call.
5017   if (isTailCall) {
5018     assert(((Callee.getOpcode() == ISD::Register &&
5019              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5020             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5021             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5022             isa<ConstantSDNode>(Callee)) &&
5023     "Expecting an global address, external symbol, absolute value or register");
5024 
5025     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5026     return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops);
5027   }
5028 
5029   // Add a NOP immediately after the branch instruction when using the 64-bit
5030   // SVR4 ABI. At link time, if caller and callee are in a different module and
5031   // thus have a different TOC, the call will be replaced with a call to a stub
5032   // function which saves the current TOC, loads the TOC of the callee and
5033   // branches to the callee. The NOP will be replaced with a load instruction
5034   // which restores the TOC of the caller from the TOC save slot of the current
5035   // stack frame. If caller and callee belong to the same module (and have the
5036   // same TOC), the NOP will remain unchanged.
5037 
5038   MachineFunction &MF = DAG.getMachineFunction();
5039   if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() &&
5040       !isPatchPoint) {
5041     if (CallOpc == PPCISD::BCTRL) {
5042       // This is a call through a function pointer.
5043       // Restore the caller TOC from the save area into R2.
5044       // See PrepareCall() for more information about calls through function
5045       // pointers in the 64-bit SVR4 ABI.
5046       // We are using a target-specific load with r2 hard coded, because the
5047       // result of a target-independent load would never go directly into r2,
5048       // since r2 is a reserved register (which prevents the register allocator
5049       // from allocating it), resulting in an additional register being
5050       // allocated and an unnecessary move instruction being generated.
5051       CallOpc = PPCISD::BCTRL_LOAD_TOC;
5052 
5053       EVT PtrVT = getPointerTy(DAG.getDataLayout());
5054       SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
5055       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5056       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5057       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
5058 
5059       // The address needs to go after the chain input but before the flag (or
5060       // any other variadic arguments).
5061       Ops.insert(std::next(Ops.begin()), AddTOC);
5062     } else if (CallOpc == PPCISD::CALL &&
5063       !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) {
5064       // Otherwise insert NOP for non-local calls.
5065       CallOpc = PPCISD::CALL_NOP;
5066     }
5067   }
5068 
5069   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
5070   InFlag = Chain.getValue(1);
5071 
5072   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5073                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5074                              InFlag, dl);
5075   if (!Ins.empty())
5076     InFlag = Chain.getValue(1);
5077 
5078   return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
5079                          Ins, dl, DAG, InVals);
5080 }
5081 
5082 SDValue
5083 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5084                              SmallVectorImpl<SDValue> &InVals) const {
5085   SelectionDAG &DAG                     = CLI.DAG;
5086   SDLoc &dl                             = CLI.DL;
5087   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5088   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5089   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5090   SDValue Chain                         = CLI.Chain;
5091   SDValue Callee                        = CLI.Callee;
5092   bool &isTailCall                      = CLI.IsTailCall;
5093   CallingConv::ID CallConv              = CLI.CallConv;
5094   bool isVarArg                         = CLI.IsVarArg;
5095   bool isPatchPoint                     = CLI.IsPatchPoint;
5096   ImmutableCallSite CS                  = CLI.CS;
5097 
5098   if (isTailCall) {
5099     if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall()))
5100       isTailCall = false;
5101     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5102       isTailCall =
5103         IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
5104                                                  isVarArg, Outs, Ins, DAG);
5105     else
5106       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5107                                                      Ins, DAG);
5108     if (isTailCall) {
5109       ++NumTailCalls;
5110       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5111         ++NumSiblingCalls;
5112 
5113       assert(isa<GlobalAddressSDNode>(Callee) &&
5114              "Callee should be an llvm::Function object.");
5115       DEBUG(
5116         const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5117         const unsigned Width = 80 - strlen("TCO caller: ")
5118                                   - strlen(", callee linkage: 0, 0");
5119         dbgs() << "TCO caller: "
5120                << left_justify(DAG.getMachineFunction().getName(), Width)
5121                << ", callee linkage: "
5122                << GV->getVisibility() << ", " << GV->getLinkage() << "\n"
5123       );
5124     }
5125   }
5126 
5127   if (!isTailCall && CS && CS.isMustTailCall())
5128     report_fatal_error("failed to perform tail call elimination on a call "
5129                        "site marked musttail");
5130 
5131   // When long calls (i.e. indirect calls) are always used, calls are always
5132   // made via function pointer. If we have a function name, first translate it
5133   // into a pointer.
5134   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5135       !isTailCall)
5136     Callee = LowerGlobalAddress(Callee, DAG);
5137 
5138   if (Subtarget.isSVR4ABI()) {
5139     if (Subtarget.isPPC64())
5140       return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
5141                               isTailCall, isPatchPoint, Outs, OutVals, Ins,
5142                               dl, DAG, InVals, CS);
5143     else
5144       return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
5145                               isTailCall, isPatchPoint, Outs, OutVals, Ins,
5146                               dl, DAG, InVals, CS);
5147   }
5148 
5149   return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
5150                           isTailCall, isPatchPoint, Outs, OutVals, Ins,
5151                           dl, DAG, InVals, CS);
5152 }
5153 
5154 SDValue PPCTargetLowering::LowerCall_32SVR4(
5155     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5156     bool isTailCall, bool isPatchPoint,
5157     const SmallVectorImpl<ISD::OutputArg> &Outs,
5158     const SmallVectorImpl<SDValue> &OutVals,
5159     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5160     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5161     ImmutableCallSite CS) const {
5162   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5163   // of the 32-bit SVR4 ABI stack frame layout.
5164 
5165   assert((CallConv == CallingConv::C ||
5166           CallConv == CallingConv::Cold ||
5167           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5168 
5169   unsigned PtrByteSize = 4;
5170 
5171   MachineFunction &MF = DAG.getMachineFunction();
5172 
5173   // Mark this function as potentially containing a function that contains a
5174   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5175   // and restoring the callers stack pointer in this functions epilog. This is
5176   // done because by tail calling the called function might overwrite the value
5177   // in this function's (MF) stack pointer stack slot 0(SP).
5178   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5179       CallConv == CallingConv::Fast)
5180     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5181 
5182   // Count how many bytes are to be pushed on the stack, including the linkage
5183   // area, parameter list area and the part of the local variable space which
5184   // contains copies of aggregates which are passed by value.
5185 
5186   // Assign locations to all of the outgoing arguments.
5187   SmallVector<CCValAssign, 16> ArgLocs;
5188   PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5189 
5190   // Reserve space for the linkage area on the stack.
5191   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5192                        PtrByteSize);
5193   if (useSoftFloat())
5194     CCInfo.PreAnalyzeCallOperands(Outs);
5195 
5196   if (isVarArg) {
5197     // Handle fixed and variable vector arguments differently.
5198     // Fixed vector arguments go into registers as long as registers are
5199     // available. Variable vector arguments always go into memory.
5200     unsigned NumArgs = Outs.size();
5201 
5202     for (unsigned i = 0; i != NumArgs; ++i) {
5203       MVT ArgVT = Outs[i].VT;
5204       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5205       bool Result;
5206 
5207       if (Outs[i].IsFixed) {
5208         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5209                                CCInfo);
5210       } else {
5211         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5212                                       ArgFlags, CCInfo);
5213       }
5214 
5215       if (Result) {
5216 #ifndef NDEBUG
5217         errs() << "Call operand #" << i << " has unhandled type "
5218              << EVT(ArgVT).getEVTString() << "\n";
5219 #endif
5220         llvm_unreachable(nullptr);
5221       }
5222     }
5223   } else {
5224     // All arguments are treated the same.
5225     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5226   }
5227   CCInfo.clearWasPPCF128();
5228 
5229   // Assign locations to all of the outgoing aggregate by value arguments.
5230   SmallVector<CCValAssign, 16> ByValArgLocs;
5231   CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext());
5232 
5233   // Reserve stack space for the allocations in CCInfo.
5234   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
5235 
5236   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5237 
5238   // Size of the linkage area, parameter list area and the part of the local
5239   // space variable where copies of aggregates which are passed by value are
5240   // stored.
5241   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5242 
5243   // Calculate by how many bytes the stack has to be adjusted in case of tail
5244   // call optimization.
5245   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5246 
5247   // Adjust the stack pointer for the new arguments...
5248   // These operations are automatically eliminated by the prolog/epilog pass
5249   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5250   SDValue CallSeqStart = Chain;
5251 
5252   // Load the return address and frame pointer so it can be moved somewhere else
5253   // later.
5254   SDValue LROp, FPOp;
5255   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5256 
5257   // Set up a copy of the stack pointer for use loading and storing any
5258   // arguments that may not fit in the registers available for argument
5259   // passing.
5260   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5261 
5262   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5263   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5264   SmallVector<SDValue, 8> MemOpChains;
5265 
5266   bool seenFloatArg = false;
5267   // Walk the register/memloc assignments, inserting copies/loads.
5268   for (unsigned i = 0, j = 0, e = ArgLocs.size();
5269        i != e;
5270        ++i) {
5271     CCValAssign &VA = ArgLocs[i];
5272     SDValue Arg = OutVals[i];
5273     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5274 
5275     if (Flags.isByVal()) {
5276       // Argument is an aggregate which is passed by value, thus we need to
5277       // create a copy of it in the local variable space of the current stack
5278       // frame (which is the stack frame of the caller) and pass the address of
5279       // this copy to the callee.
5280       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5281       CCValAssign &ByValVA = ByValArgLocs[j++];
5282       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5283 
5284       // Memory reserved in the local variable space of the callers stack frame.
5285       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5286 
5287       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5288       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5289                            StackPtr, PtrOff);
5290 
5291       // Create a copy of the argument in the local area of the current
5292       // stack frame.
5293       SDValue MemcpyCall =
5294         CreateCopyOfByValArgument(Arg, PtrOff,
5295                                   CallSeqStart.getNode()->getOperand(0),
5296                                   Flags, DAG, dl);
5297 
5298       // This must go outside the CALLSEQ_START..END.
5299       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5300                                                      SDLoc(MemcpyCall));
5301       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5302                              NewCallSeqStart.getNode());
5303       Chain = CallSeqStart = NewCallSeqStart;
5304 
5305       // Pass the address of the aggregate copy on the stack either in a
5306       // physical register or in the parameter list area of the current stack
5307       // frame to the callee.
5308       Arg = PtrOff;
5309     }
5310 
5311     if (VA.isRegLoc()) {
5312       if (Arg.getValueType() == MVT::i1)
5313         Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg);
5314 
5315       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5316       // Put argument in a physical register.
5317       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5318     } else {
5319       // Put argument in the parameter list area of the current stack frame.
5320       assert(VA.isMemLoc());
5321       unsigned LocMemOffset = VA.getLocMemOffset();
5322 
5323       if (!isTailCall) {
5324         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5325         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5326                              StackPtr, PtrOff);
5327 
5328         MemOpChains.push_back(
5329             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5330       } else {
5331         // Calculate and remember argument location.
5332         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5333                                  TailCallArguments);
5334       }
5335     }
5336   }
5337 
5338   if (!MemOpChains.empty())
5339     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5340 
5341   // Build a sequence of copy-to-reg nodes chained together with token chain
5342   // and flag operands which copy the outgoing args into the appropriate regs.
5343   SDValue InFlag;
5344   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5345     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5346                              RegsToPass[i].second, InFlag);
5347     InFlag = Chain.getValue(1);
5348   }
5349 
5350   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5351   // registers.
5352   if (isVarArg) {
5353     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5354     SDValue Ops[] = { Chain, InFlag };
5355 
5356     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5357                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5358 
5359     InFlag = Chain.getValue(1);
5360   }
5361 
5362   if (isTailCall)
5363     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5364                     TailCallArguments);
5365 
5366   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5367                     /* unused except on PPC64 ELFv1 */ false, DAG,
5368                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5369                     NumBytes, Ins, InVals, CS);
5370 }
5371 
5372 // Copy an argument into memory, being careful to do this outside the
5373 // call sequence for the call to which the argument belongs.
5374 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5375     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5376     SelectionDAG &DAG, const SDLoc &dl) const {
5377   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5378                         CallSeqStart.getNode()->getOperand(0),
5379                         Flags, DAG, dl);
5380   // The MEMCPY must go outside the CALLSEQ_START..END.
5381   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5382   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5383                                                  SDLoc(MemcpyCall));
5384   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5385                          NewCallSeqStart.getNode());
5386   return NewCallSeqStart;
5387 }
5388 
5389 SDValue PPCTargetLowering::LowerCall_64SVR4(
5390     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5391     bool isTailCall, bool isPatchPoint,
5392     const SmallVectorImpl<ISD::OutputArg> &Outs,
5393     const SmallVectorImpl<SDValue> &OutVals,
5394     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5395     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5396     ImmutableCallSite CS) const {
5397   bool isELFv2ABI = Subtarget.isELFv2ABI();
5398   bool isLittleEndian = Subtarget.isLittleEndian();
5399   unsigned NumOps = Outs.size();
5400   bool hasNest = false;
5401   bool IsSibCall = false;
5402 
5403   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5404   unsigned PtrByteSize = 8;
5405 
5406   MachineFunction &MF = DAG.getMachineFunction();
5407 
5408   if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5409     IsSibCall = true;
5410 
5411   // Mark this function as potentially containing a function that contains a
5412   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5413   // and restoring the callers stack pointer in this functions epilog. This is
5414   // done because by tail calling the called function might overwrite the value
5415   // in this function's (MF) stack pointer stack slot 0(SP).
5416   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5417       CallConv == CallingConv::Fast)
5418     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5419 
5420   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
5421          "fastcc not supported on varargs functions");
5422 
5423   // Count how many bytes are to be pushed on the stack, including the linkage
5424   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5425   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5426   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5427   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5428   unsigned NumBytes = LinkageSize;
5429   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5430   unsigned &QFPR_idx = FPR_idx;
5431 
5432   static const MCPhysReg GPR[] = {
5433     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5434     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5435   };
5436   static const MCPhysReg VR[] = {
5437     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5438     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5439   };
5440 
5441   const unsigned NumGPRs = array_lengthof(GPR);
5442   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5443   const unsigned NumVRs  = array_lengthof(VR);
5444   const unsigned NumQFPRs = NumFPRs;
5445 
5446   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5447   // can be passed to the callee in registers.
5448   // For the fast calling convention, there is another check below.
5449   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5450   bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast;
5451   if (!HasParameterArea) {
5452     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5453     unsigned AvailableFPRs = NumFPRs;
5454     unsigned AvailableVRs = NumVRs;
5455     unsigned NumBytesTmp = NumBytes;
5456     for (unsigned i = 0; i != NumOps; ++i) {
5457       if (Outs[i].Flags.isNest()) continue;
5458       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5459                                 PtrByteSize, LinkageSize, ParamAreaSize,
5460                                 NumBytesTmp, AvailableFPRs, AvailableVRs,
5461                                 Subtarget.hasQPX()))
5462         HasParameterArea = true;
5463     }
5464   }
5465 
5466   // When using the fast calling convention, we don't provide backing for
5467   // arguments that will be in registers.
5468   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5469 
5470   // Avoid allocating parameter area for fastcc functions if all the arguments
5471   // can be passed in the registers.
5472   if (CallConv == CallingConv::Fast)
5473     HasParameterArea = false;
5474 
5475   // Add up all the space actually used.
5476   for (unsigned i = 0; i != NumOps; ++i) {
5477     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5478     EVT ArgVT = Outs[i].VT;
5479     EVT OrigVT = Outs[i].ArgVT;
5480 
5481     if (Flags.isNest())
5482       continue;
5483 
5484     if (CallConv == CallingConv::Fast) {
5485       if (Flags.isByVal()) {
5486         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5487         if (NumGPRsUsed > NumGPRs)
5488           HasParameterArea = true;
5489       } else {
5490         switch (ArgVT.getSimpleVT().SimpleTy) {
5491         default: llvm_unreachable("Unexpected ValueType for argument!");
5492         case MVT::i1:
5493         case MVT::i32:
5494         case MVT::i64:
5495           if (++NumGPRsUsed <= NumGPRs)
5496             continue;
5497           break;
5498         case MVT::v4i32:
5499         case MVT::v8i16:
5500         case MVT::v16i8:
5501         case MVT::v2f64:
5502         case MVT::v2i64:
5503         case MVT::v1i128:
5504           if (++NumVRsUsed <= NumVRs)
5505             continue;
5506           break;
5507         case MVT::v4f32:
5508           // When using QPX, this is handled like a FP register, otherwise, it
5509           // is an Altivec register.
5510           if (Subtarget.hasQPX()) {
5511             if (++NumFPRsUsed <= NumFPRs)
5512               continue;
5513           } else {
5514             if (++NumVRsUsed <= NumVRs)
5515               continue;
5516           }
5517           break;
5518         case MVT::f32:
5519         case MVT::f64:
5520         case MVT::v4f64: // QPX
5521         case MVT::v4i1:  // QPX
5522           if (++NumFPRsUsed <= NumFPRs)
5523             continue;
5524           break;
5525         }
5526         HasParameterArea = true;
5527       }
5528     }
5529 
5530     /* Respect alignment of argument on the stack.  */
5531     unsigned Align =
5532       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5533     NumBytes = ((NumBytes + Align - 1) / Align) * Align;
5534 
5535     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5536     if (Flags.isInConsecutiveRegsLast())
5537       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5538   }
5539 
5540   unsigned NumBytesActuallyUsed = NumBytes;
5541 
5542   // In the old ELFv1 ABI,
5543   // the prolog code of the callee may store up to 8 GPR argument registers to
5544   // the stack, allowing va_start to index over them in memory if its varargs.
5545   // Because we cannot tell if this is needed on the caller side, we have to
5546   // conservatively assume that it is needed.  As such, make sure we have at
5547   // least enough stack space for the caller to store the 8 GPRs.
5548   // In the ELFv2 ABI, we allocate the parameter area iff a callee
5549   // really requires memory operands, e.g. a vararg function.
5550   if (HasParameterArea)
5551     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5552   else
5553     NumBytes = LinkageSize;
5554 
5555   // Tail call needs the stack to be aligned.
5556   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5557       CallConv == CallingConv::Fast)
5558     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5559 
5560   int SPDiff = 0;
5561 
5562   // Calculate by how many bytes the stack has to be adjusted in case of tail
5563   // call optimization.
5564   if (!IsSibCall)
5565     SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5566 
5567   // To protect arguments on the stack from being clobbered in a tail call,
5568   // force all the loads to happen before doing any other lowering.
5569   if (isTailCall)
5570     Chain = DAG.getStackArgumentTokenFactor(Chain);
5571 
5572   // Adjust the stack pointer for the new arguments...
5573   // These operations are automatically eliminated by the prolog/epilog pass
5574   if (!IsSibCall)
5575     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5576   SDValue CallSeqStart = Chain;
5577 
5578   // Load the return address and frame pointer so it can be move somewhere else
5579   // later.
5580   SDValue LROp, FPOp;
5581   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5582 
5583   // Set up a copy of the stack pointer for use loading and storing any
5584   // arguments that may not fit in the registers available for argument
5585   // passing.
5586   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5587 
5588   // Figure out which arguments are going to go in registers, and which in
5589   // memory.  Also, if this is a vararg function, floating point operations
5590   // must be stored to our stack, and loaded into integer regs as well, if
5591   // any integer regs are available for argument passing.
5592   unsigned ArgOffset = LinkageSize;
5593 
5594   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5595   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5596 
5597   SmallVector<SDValue, 8> MemOpChains;
5598   for (unsigned i = 0; i != NumOps; ++i) {
5599     SDValue Arg = OutVals[i];
5600     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5601     EVT ArgVT = Outs[i].VT;
5602     EVT OrigVT = Outs[i].ArgVT;
5603 
5604     // PtrOff will be used to store the current argument to the stack if a
5605     // register cannot be found for it.
5606     SDValue PtrOff;
5607 
5608     // We re-align the argument offset for each argument, except when using the
5609     // fast calling convention, when we need to make sure we do that only when
5610     // we'll actually use a stack slot.
5611     auto ComputePtrOff = [&]() {
5612       /* Respect alignment of argument on the stack.  */
5613       unsigned Align =
5614         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5615       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
5616 
5617       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
5618 
5619       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5620     };
5621 
5622     if (CallConv != CallingConv::Fast) {
5623       ComputePtrOff();
5624 
5625       /* Compute GPR index associated with argument offset.  */
5626       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5627       GPR_idx = std::min(GPR_idx, NumGPRs);
5628     }
5629 
5630     // Promote integers to 64-bit values.
5631     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
5632       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
5633       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5634       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
5635     }
5636 
5637     // FIXME memcpy is used way more than necessary.  Correctness first.
5638     // Note: "by value" is code for passing a structure by value, not
5639     // basic types.
5640     if (Flags.isByVal()) {
5641       // Note: Size includes alignment padding, so
5642       //   struct x { short a; char b; }
5643       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
5644       // These are the proper values we need for right-justifying the
5645       // aggregate in a parameter register.
5646       unsigned Size = Flags.getByValSize();
5647 
5648       // An empty aggregate parameter takes up no storage and no
5649       // registers.
5650       if (Size == 0)
5651         continue;
5652 
5653       if (CallConv == CallingConv::Fast)
5654         ComputePtrOff();
5655 
5656       // All aggregates smaller than 8 bytes must be passed right-justified.
5657       if (Size==1 || Size==2 || Size==4) {
5658         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
5659         if (GPR_idx != NumGPRs) {
5660           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
5661                                         MachinePointerInfo(), VT);
5662           MemOpChains.push_back(Load.getValue(1));
5663           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5664 
5665           ArgOffset += PtrByteSize;
5666           continue;
5667         }
5668       }
5669 
5670       if (GPR_idx == NumGPRs && Size < 8) {
5671         SDValue AddPtr = PtrOff;
5672         if (!isLittleEndian) {
5673           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
5674                                           PtrOff.getValueType());
5675           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5676         }
5677         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5678                                                           CallSeqStart,
5679                                                           Flags, DAG, dl);
5680         ArgOffset += PtrByteSize;
5681         continue;
5682       }
5683       // Copy entire object into memory.  There are cases where gcc-generated
5684       // code assumes it is there, even if it could be put entirely into
5685       // registers.  (This is not what the doc says.)
5686 
5687       // FIXME: The above statement is likely due to a misunderstanding of the
5688       // documents.  All arguments must be copied into the parameter area BY
5689       // THE CALLEE in the event that the callee takes the address of any
5690       // formal argument.  That has not yet been implemented.  However, it is
5691       // reasonable to use the stack area as a staging area for the register
5692       // load.
5693 
5694       // Skip this for small aggregates, as we will use the same slot for a
5695       // right-justified copy, below.
5696       if (Size >= 8)
5697         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5698                                                           CallSeqStart,
5699                                                           Flags, DAG, dl);
5700 
5701       // When a register is available, pass a small aggregate right-justified.
5702       if (Size < 8 && GPR_idx != NumGPRs) {
5703         // The easiest way to get this right-justified in a register
5704         // is to copy the structure into the rightmost portion of a
5705         // local variable slot, then load the whole slot into the
5706         // register.
5707         // FIXME: The memcpy seems to produce pretty awful code for
5708         // small aggregates, particularly for packed ones.
5709         // FIXME: It would be preferable to use the slot in the
5710         // parameter save area instead of a new local variable.
5711         SDValue AddPtr = PtrOff;
5712         if (!isLittleEndian) {
5713           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
5714           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5715         }
5716         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5717                                                           CallSeqStart,
5718                                                           Flags, DAG, dl);
5719 
5720         // Load the slot into the register.
5721         SDValue Load =
5722             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
5723         MemOpChains.push_back(Load.getValue(1));
5724         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5725 
5726         // Done with this argument.
5727         ArgOffset += PtrByteSize;
5728         continue;
5729       }
5730 
5731       // For aggregates larger than PtrByteSize, copy the pieces of the
5732       // object that fit into registers from the parameter save area.
5733       for (unsigned j=0; j<Size; j+=PtrByteSize) {
5734         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
5735         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
5736         if (GPR_idx != NumGPRs) {
5737           SDValue Load =
5738               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
5739           MemOpChains.push_back(Load.getValue(1));
5740           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5741           ArgOffset += PtrByteSize;
5742         } else {
5743           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5744           break;
5745         }
5746       }
5747       continue;
5748     }
5749 
5750     switch (Arg.getSimpleValueType().SimpleTy) {
5751     default: llvm_unreachable("Unexpected ValueType for argument!");
5752     case MVT::i1:
5753     case MVT::i32:
5754     case MVT::i64:
5755       if (Flags.isNest()) {
5756         // The 'nest' parameter, if any, is passed in R11.
5757         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
5758         hasNest = true;
5759         break;
5760       }
5761 
5762       // These can be scalar arguments or elements of an integer array type
5763       // passed directly.  Clang may use those instead of "byval" aggregate
5764       // types to avoid forcing arguments to memory unnecessarily.
5765       if (GPR_idx != NumGPRs) {
5766         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
5767       } else {
5768         if (CallConv == CallingConv::Fast)
5769           ComputePtrOff();
5770 
5771         assert(HasParameterArea &&
5772                "Parameter area must exist to pass an argument in memory.");
5773         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5774                          true, isTailCall, false, MemOpChains,
5775                          TailCallArguments, dl);
5776         if (CallConv == CallingConv::Fast)
5777           ArgOffset += PtrByteSize;
5778       }
5779       if (CallConv != CallingConv::Fast)
5780         ArgOffset += PtrByteSize;
5781       break;
5782     case MVT::f32:
5783     case MVT::f64: {
5784       // These can be scalar arguments or elements of a float array type
5785       // passed directly.  The latter are used to implement ELFv2 homogenous
5786       // float aggregates.
5787 
5788       // Named arguments go into FPRs first, and once they overflow, the
5789       // remaining arguments go into GPRs and then the parameter save area.
5790       // Unnamed arguments for vararg functions always go to GPRs and
5791       // then the parameter save area.  For now, put all arguments to vararg
5792       // routines always in both locations (FPR *and* GPR or stack slot).
5793       bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
5794       bool NeededLoad = false;
5795 
5796       // First load the argument into the next available FPR.
5797       if (FPR_idx != NumFPRs)
5798         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
5799 
5800       // Next, load the argument into GPR or stack slot if needed.
5801       if (!NeedGPROrStack)
5802         ;
5803       else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) {
5804         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
5805         // once we support fp <-> gpr moves.
5806 
5807         // In the non-vararg case, this can only ever happen in the
5808         // presence of f32 array types, since otherwise we never run
5809         // out of FPRs before running out of GPRs.
5810         SDValue ArgVal;
5811 
5812         // Double values are always passed in a single GPR.
5813         if (Arg.getValueType() != MVT::f32) {
5814           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
5815 
5816         // Non-array float values are extended and passed in a GPR.
5817         } else if (!Flags.isInConsecutiveRegs()) {
5818           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5819           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
5820 
5821         // If we have an array of floats, we collect every odd element
5822         // together with its predecessor into one GPR.
5823         } else if (ArgOffset % PtrByteSize != 0) {
5824           SDValue Lo, Hi;
5825           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
5826           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5827           if (!isLittleEndian)
5828             std::swap(Lo, Hi);
5829           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5830 
5831         // The final element, if even, goes into the first half of a GPR.
5832         } else if (Flags.isInConsecutiveRegsLast()) {
5833           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5834           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
5835           if (!isLittleEndian)
5836             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
5837                                  DAG.getConstant(32, dl, MVT::i32));
5838 
5839         // Non-final even elements are skipped; they will be handled
5840         // together the with subsequent argument on the next go-around.
5841         } else
5842           ArgVal = SDValue();
5843 
5844         if (ArgVal.getNode())
5845           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
5846       } else {
5847         if (CallConv == CallingConv::Fast)
5848           ComputePtrOff();
5849 
5850         // Single-precision floating-point values are mapped to the
5851         // second (rightmost) word of the stack doubleword.
5852         if (Arg.getValueType() == MVT::f32 &&
5853             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
5854           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
5855           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
5856         }
5857 
5858         assert(HasParameterArea &&
5859                "Parameter area must exist to pass an argument in memory.");
5860         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5861                          true, isTailCall, false, MemOpChains,
5862                          TailCallArguments, dl);
5863 
5864         NeededLoad = true;
5865       }
5866       // When passing an array of floats, the array occupies consecutive
5867       // space in the argument area; only round up to the next doubleword
5868       // at the end of the array.  Otherwise, each float takes 8 bytes.
5869       if (CallConv != CallingConv::Fast || NeededLoad) {
5870         ArgOffset += (Arg.getValueType() == MVT::f32 &&
5871                       Flags.isInConsecutiveRegs()) ? 4 : 8;
5872         if (Flags.isInConsecutiveRegsLast())
5873           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5874       }
5875       break;
5876     }
5877     case MVT::v4f32:
5878     case MVT::v4i32:
5879     case MVT::v8i16:
5880     case MVT::v16i8:
5881     case MVT::v2f64:
5882     case MVT::v2i64:
5883     case MVT::v1i128:
5884       if (!Subtarget.hasQPX()) {
5885       // These can be scalar arguments or elements of a vector array type
5886       // passed directly.  The latter are used to implement ELFv2 homogenous
5887       // vector aggregates.
5888 
5889       // For a varargs call, named arguments go into VRs or on the stack as
5890       // usual; unnamed arguments always go to the stack or the corresponding
5891       // GPRs when within range.  For now, we always put the value in both
5892       // locations (or even all three).
5893       if (isVarArg) {
5894         assert(HasParameterArea &&
5895                "Parameter area must exist if we have a varargs call.");
5896         // We could elide this store in the case where the object fits
5897         // entirely in R registers.  Maybe later.
5898         SDValue Store =
5899             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
5900         MemOpChains.push_back(Store);
5901         if (VR_idx != NumVRs) {
5902           SDValue Load =
5903               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
5904           MemOpChains.push_back(Load.getValue(1));
5905           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
5906         }
5907         ArgOffset += 16;
5908         for (unsigned i=0; i<16; i+=PtrByteSize) {
5909           if (GPR_idx == NumGPRs)
5910             break;
5911           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
5912                                    DAG.getConstant(i, dl, PtrVT));
5913           SDValue Load =
5914               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
5915           MemOpChains.push_back(Load.getValue(1));
5916           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5917         }
5918         break;
5919       }
5920 
5921       // Non-varargs Altivec params go into VRs or on the stack.
5922       if (VR_idx != NumVRs) {
5923         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
5924       } else {
5925         if (CallConv == CallingConv::Fast)
5926           ComputePtrOff();
5927 
5928         assert(HasParameterArea &&
5929                "Parameter area must exist to pass an argument in memory.");
5930         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5931                          true, isTailCall, true, MemOpChains,
5932                          TailCallArguments, dl);
5933         if (CallConv == CallingConv::Fast)
5934           ArgOffset += 16;
5935       }
5936 
5937       if (CallConv != CallingConv::Fast)
5938         ArgOffset += 16;
5939       break;
5940       } // not QPX
5941 
5942       assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
5943              "Invalid QPX parameter type");
5944 
5945       /* fall through */
5946     case MVT::v4f64:
5947     case MVT::v4i1: {
5948       bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
5949       if (isVarArg) {
5950         assert(HasParameterArea &&
5951                "Parameter area must exist if we have a varargs call.");
5952         // We could elide this store in the case where the object fits
5953         // entirely in R registers.  Maybe later.
5954         SDValue Store =
5955             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
5956         MemOpChains.push_back(Store);
5957         if (QFPR_idx != NumQFPRs) {
5958           SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
5959                                      PtrOff, MachinePointerInfo());
5960           MemOpChains.push_back(Load.getValue(1));
5961           RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
5962         }
5963         ArgOffset += (IsF32 ? 16 : 32);
5964         for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
5965           if (GPR_idx == NumGPRs)
5966             break;
5967           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
5968                                    DAG.getConstant(i, dl, PtrVT));
5969           SDValue Load =
5970               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
5971           MemOpChains.push_back(Load.getValue(1));
5972           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5973         }
5974         break;
5975       }
5976 
5977       // Non-varargs QPX params go into registers or on the stack.
5978       if (QFPR_idx != NumQFPRs) {
5979         RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
5980       } else {
5981         if (CallConv == CallingConv::Fast)
5982           ComputePtrOff();
5983 
5984         assert(HasParameterArea &&
5985                "Parameter area must exist to pass an argument in memory.");
5986         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5987                          true, isTailCall, true, MemOpChains,
5988                          TailCallArguments, dl);
5989         if (CallConv == CallingConv::Fast)
5990           ArgOffset += (IsF32 ? 16 : 32);
5991       }
5992 
5993       if (CallConv != CallingConv::Fast)
5994         ArgOffset += (IsF32 ? 16 : 32);
5995       break;
5996       }
5997     }
5998   }
5999 
6000   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6001          "mismatch in size of parameter area");
6002   (void)NumBytesActuallyUsed;
6003 
6004   if (!MemOpChains.empty())
6005     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6006 
6007   // Check if this is an indirect call (MTCTR/BCTRL).
6008   // See PrepareCall() for more information about calls through function
6009   // pointers in the 64-bit SVR4 ABI.
6010   if (!isTailCall && !isPatchPoint &&
6011       !isFunctionGlobalAddress(Callee) &&
6012       !isa<ExternalSymbolSDNode>(Callee)) {
6013     // Load r2 into a virtual register and store it to the TOC save area.
6014     setUsesTOCBasePtr(DAG);
6015     SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6016     // TOC save area offset.
6017     unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6018     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6019     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6020     Chain = DAG.getStore(
6021         Val.getValue(1), dl, Val, AddPtr,
6022         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
6023     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6024     // This does not mean the MTCTR instruction must use R12; it's easier
6025     // to model this as an extra parameter, so do that.
6026     if (isELFv2ABI && !isPatchPoint)
6027       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6028   }
6029 
6030   // Build a sequence of copy-to-reg nodes chained together with token chain
6031   // and flag operands which copy the outgoing args into the appropriate regs.
6032   SDValue InFlag;
6033   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6034     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6035                              RegsToPass[i].second, InFlag);
6036     InFlag = Chain.getValue(1);
6037   }
6038 
6039   if (isTailCall && !IsSibCall)
6040     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6041                     TailCallArguments);
6042 
6043   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
6044                     DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
6045                     SPDiff, NumBytes, Ins, InVals, CS);
6046 }
6047 
6048 SDValue PPCTargetLowering::LowerCall_Darwin(
6049     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
6050     bool isTailCall, bool isPatchPoint,
6051     const SmallVectorImpl<ISD::OutputArg> &Outs,
6052     const SmallVectorImpl<SDValue> &OutVals,
6053     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6054     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6055     ImmutableCallSite CS) const {
6056   unsigned NumOps = Outs.size();
6057 
6058   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6059   bool isPPC64 = PtrVT == MVT::i64;
6060   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6061 
6062   MachineFunction &MF = DAG.getMachineFunction();
6063 
6064   // Mark this function as potentially containing a function that contains a
6065   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6066   // and restoring the callers stack pointer in this functions epilog. This is
6067   // done because by tail calling the called function might overwrite the value
6068   // in this function's (MF) stack pointer stack slot 0(SP).
6069   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6070       CallConv == CallingConv::Fast)
6071     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6072 
6073   // Count how many bytes are to be pushed on the stack, including the linkage
6074   // area, and parameter passing area.  We start with 24/48 bytes, which is
6075   // prereserved space for [SP][CR][LR][3 x unused].
6076   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6077   unsigned NumBytes = LinkageSize;
6078 
6079   // Add up all the space actually used.
6080   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6081   // they all go in registers, but we must reserve stack space for them for
6082   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6083   // assigned stack space in order, with padding so Altivec parameters are
6084   // 16-byte aligned.
6085   unsigned nAltivecParamsAtEnd = 0;
6086   for (unsigned i = 0; i != NumOps; ++i) {
6087     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6088     EVT ArgVT = Outs[i].VT;
6089     // Varargs Altivec parameters are padded to a 16 byte boundary.
6090     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6091         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6092         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6093       if (!isVarArg && !isPPC64) {
6094         // Non-varargs Altivec parameters go after all the non-Altivec
6095         // parameters; handle those later so we know how much padding we need.
6096         nAltivecParamsAtEnd++;
6097         continue;
6098       }
6099       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6100       NumBytes = ((NumBytes+15)/16)*16;
6101     }
6102     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6103   }
6104 
6105   // Allow for Altivec parameters at the end, if needed.
6106   if (nAltivecParamsAtEnd) {
6107     NumBytes = ((NumBytes+15)/16)*16;
6108     NumBytes += 16*nAltivecParamsAtEnd;
6109   }
6110 
6111   // The prolog code of the callee may store up to 8 GPR argument registers to
6112   // the stack, allowing va_start to index over them in memory if its varargs.
6113   // Because we cannot tell if this is needed on the caller side, we have to
6114   // conservatively assume that it is needed.  As such, make sure we have at
6115   // least enough stack space for the caller to store the 8 GPRs.
6116   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6117 
6118   // Tail call needs the stack to be aligned.
6119   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6120       CallConv == CallingConv::Fast)
6121     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6122 
6123   // Calculate by how many bytes the stack has to be adjusted in case of tail
6124   // call optimization.
6125   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
6126 
6127   // To protect arguments on the stack from being clobbered in a tail call,
6128   // force all the loads to happen before doing any other lowering.
6129   if (isTailCall)
6130     Chain = DAG.getStackArgumentTokenFactor(Chain);
6131 
6132   // Adjust the stack pointer for the new arguments...
6133   // These operations are automatically eliminated by the prolog/epilog pass
6134   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6135   SDValue CallSeqStart = Chain;
6136 
6137   // Load the return address and frame pointer so it can be move somewhere else
6138   // later.
6139   SDValue LROp, FPOp;
6140   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6141 
6142   // Set up a copy of the stack pointer for use loading and storing any
6143   // arguments that may not fit in the registers available for argument
6144   // passing.
6145   SDValue StackPtr;
6146   if (isPPC64)
6147     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6148   else
6149     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6150 
6151   // Figure out which arguments are going to go in registers, and which in
6152   // memory.  Also, if this is a vararg function, floating point operations
6153   // must be stored to our stack, and loaded into integer regs as well, if
6154   // any integer regs are available for argument passing.
6155   unsigned ArgOffset = LinkageSize;
6156   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6157 
6158   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6159     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6160     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6161   };
6162   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6163     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6164     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6165   };
6166   static const MCPhysReg VR[] = {
6167     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6168     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6169   };
6170   const unsigned NumGPRs = array_lengthof(GPR_32);
6171   const unsigned NumFPRs = 13;
6172   const unsigned NumVRs  = array_lengthof(VR);
6173 
6174   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6175 
6176   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6177   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6178 
6179   SmallVector<SDValue, 8> MemOpChains;
6180   for (unsigned i = 0; i != NumOps; ++i) {
6181     SDValue Arg = OutVals[i];
6182     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6183 
6184     // PtrOff will be used to store the current argument to the stack if a
6185     // register cannot be found for it.
6186     SDValue PtrOff;
6187 
6188     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6189 
6190     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6191 
6192     // On PPC64, promote integers to 64-bit values.
6193     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6194       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6195       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6196       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6197     }
6198 
6199     // FIXME memcpy is used way more than necessary.  Correctness first.
6200     // Note: "by value" is code for passing a structure by value, not
6201     // basic types.
6202     if (Flags.isByVal()) {
6203       unsigned Size = Flags.getByValSize();
6204       // Very small objects are passed right-justified.  Everything else is
6205       // passed left-justified.
6206       if (Size==1 || Size==2) {
6207         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6208         if (GPR_idx != NumGPRs) {
6209           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6210                                         MachinePointerInfo(), VT);
6211           MemOpChains.push_back(Load.getValue(1));
6212           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6213 
6214           ArgOffset += PtrByteSize;
6215         } else {
6216           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6217                                           PtrOff.getValueType());
6218           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6219           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6220                                                             CallSeqStart,
6221                                                             Flags, DAG, dl);
6222           ArgOffset += PtrByteSize;
6223         }
6224         continue;
6225       }
6226       // Copy entire object into memory.  There are cases where gcc-generated
6227       // code assumes it is there, even if it could be put entirely into
6228       // registers.  (This is not what the doc says.)
6229       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6230                                                         CallSeqStart,
6231                                                         Flags, DAG, dl);
6232 
6233       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6234       // copy the pieces of the object that fit into registers from the
6235       // parameter save area.
6236       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6237         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6238         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6239         if (GPR_idx != NumGPRs) {
6240           SDValue Load =
6241               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6242           MemOpChains.push_back(Load.getValue(1));
6243           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6244           ArgOffset += PtrByteSize;
6245         } else {
6246           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6247           break;
6248         }
6249       }
6250       continue;
6251     }
6252 
6253     switch (Arg.getSimpleValueType().SimpleTy) {
6254     default: llvm_unreachable("Unexpected ValueType for argument!");
6255     case MVT::i1:
6256     case MVT::i32:
6257     case MVT::i64:
6258       if (GPR_idx != NumGPRs) {
6259         if (Arg.getValueType() == MVT::i1)
6260           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6261 
6262         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6263       } else {
6264         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6265                          isPPC64, isTailCall, false, MemOpChains,
6266                          TailCallArguments, dl);
6267       }
6268       ArgOffset += PtrByteSize;
6269       break;
6270     case MVT::f32:
6271     case MVT::f64:
6272       if (FPR_idx != NumFPRs) {
6273         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6274 
6275         if (isVarArg) {
6276           SDValue Store =
6277               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6278           MemOpChains.push_back(Store);
6279 
6280           // Float varargs are always shadowed in available integer registers
6281           if (GPR_idx != NumGPRs) {
6282             SDValue Load =
6283                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6284             MemOpChains.push_back(Load.getValue(1));
6285             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6286           }
6287           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6288             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6289             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6290             SDValue Load =
6291                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6292             MemOpChains.push_back(Load.getValue(1));
6293             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6294           }
6295         } else {
6296           // If we have any FPRs remaining, we may also have GPRs remaining.
6297           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6298           // GPRs.
6299           if (GPR_idx != NumGPRs)
6300             ++GPR_idx;
6301           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6302               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6303             ++GPR_idx;
6304         }
6305       } else
6306         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6307                          isPPC64, isTailCall, false, MemOpChains,
6308                          TailCallArguments, dl);
6309       if (isPPC64)
6310         ArgOffset += 8;
6311       else
6312         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6313       break;
6314     case MVT::v4f32:
6315     case MVT::v4i32:
6316     case MVT::v8i16:
6317     case MVT::v16i8:
6318       if (isVarArg) {
6319         // These go aligned on the stack, or in the corresponding R registers
6320         // when within range.  The Darwin PPC ABI doc claims they also go in
6321         // V registers; in fact gcc does this only for arguments that are
6322         // prototyped, not for those that match the ...  We do it for all
6323         // arguments, seems to work.
6324         while (ArgOffset % 16 !=0) {
6325           ArgOffset += PtrByteSize;
6326           if (GPR_idx != NumGPRs)
6327             GPR_idx++;
6328         }
6329         // We could elide this store in the case where the object fits
6330         // entirely in R registers.  Maybe later.
6331         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6332                              DAG.getConstant(ArgOffset, dl, PtrVT));
6333         SDValue Store =
6334             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6335         MemOpChains.push_back(Store);
6336         if (VR_idx != NumVRs) {
6337           SDValue Load =
6338               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6339           MemOpChains.push_back(Load.getValue(1));
6340           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6341         }
6342         ArgOffset += 16;
6343         for (unsigned i=0; i<16; i+=PtrByteSize) {
6344           if (GPR_idx == NumGPRs)
6345             break;
6346           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6347                                    DAG.getConstant(i, dl, PtrVT));
6348           SDValue Load =
6349               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6350           MemOpChains.push_back(Load.getValue(1));
6351           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6352         }
6353         break;
6354       }
6355 
6356       // Non-varargs Altivec params generally go in registers, but have
6357       // stack space allocated at the end.
6358       if (VR_idx != NumVRs) {
6359         // Doesn't have GPR space allocated.
6360         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6361       } else if (nAltivecParamsAtEnd==0) {
6362         // We are emitting Altivec params in order.
6363         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6364                          isPPC64, isTailCall, true, MemOpChains,
6365                          TailCallArguments, dl);
6366         ArgOffset += 16;
6367       }
6368       break;
6369     }
6370   }
6371   // If all Altivec parameters fit in registers, as they usually do,
6372   // they get stack space following the non-Altivec parameters.  We
6373   // don't track this here because nobody below needs it.
6374   // If there are more Altivec parameters than fit in registers emit
6375   // the stores here.
6376   if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
6377     unsigned j = 0;
6378     // Offset is aligned; skip 1st 12 params which go in V registers.
6379     ArgOffset = ((ArgOffset+15)/16)*16;
6380     ArgOffset += 12*16;
6381     for (unsigned i = 0; i != NumOps; ++i) {
6382       SDValue Arg = OutVals[i];
6383       EVT ArgType = Outs[i].VT;
6384       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6385           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6386         if (++j > NumVRs) {
6387           SDValue PtrOff;
6388           // We are emitting Altivec params in order.
6389           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6390                            isPPC64, isTailCall, true, MemOpChains,
6391                            TailCallArguments, dl);
6392           ArgOffset += 16;
6393         }
6394       }
6395     }
6396   }
6397 
6398   if (!MemOpChains.empty())
6399     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6400 
6401   // On Darwin, R12 must contain the address of an indirect callee.  This does
6402   // not mean the MTCTR instruction must use R12; it's easier to model this as
6403   // an extra parameter, so do that.
6404   if (!isTailCall &&
6405       !isFunctionGlobalAddress(Callee) &&
6406       !isa<ExternalSymbolSDNode>(Callee) &&
6407       !isBLACompatibleAddress(Callee, DAG))
6408     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6409                                                    PPC::R12), Callee));
6410 
6411   // Build a sequence of copy-to-reg nodes chained together with token chain
6412   // and flag operands which copy the outgoing args into the appropriate regs.
6413   SDValue InFlag;
6414   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6415     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6416                              RegsToPass[i].second, InFlag);
6417     InFlag = Chain.getValue(1);
6418   }
6419 
6420   if (isTailCall)
6421     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6422                     TailCallArguments);
6423 
6424   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6425                     /* unused except on PPC64 ELFv1 */ false, DAG,
6426                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6427                     NumBytes, Ins, InVals, CS);
6428 }
6429 
6430 bool
6431 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
6432                                   MachineFunction &MF, bool isVarArg,
6433                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
6434                                   LLVMContext &Context) const {
6435   SmallVector<CCValAssign, 16> RVLocs;
6436   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6437   return CCInfo.CheckReturn(
6438       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
6439                 ? RetCC_PPC_Cold
6440                 : RetCC_PPC);
6441 }
6442 
6443 SDValue
6444 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
6445                                bool isVarArg,
6446                                const SmallVectorImpl<ISD::OutputArg> &Outs,
6447                                const SmallVectorImpl<SDValue> &OutVals,
6448                                const SDLoc &dl, SelectionDAG &DAG) const {
6449   SmallVector<CCValAssign, 16> RVLocs;
6450   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
6451                  *DAG.getContext());
6452   CCInfo.AnalyzeReturn(Outs,
6453                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
6454                            ? RetCC_PPC_Cold
6455                            : RetCC_PPC);
6456 
6457   SDValue Flag;
6458   SmallVector<SDValue, 4> RetOps(1, Chain);
6459 
6460   // Copy the result values into the output registers.
6461   for (unsigned i = 0; i != RVLocs.size(); ++i) {
6462     CCValAssign &VA = RVLocs[i];
6463     assert(VA.isRegLoc() && "Can only return in registers!");
6464 
6465     SDValue Arg = OutVals[i];
6466 
6467     switch (VA.getLocInfo()) {
6468     default: llvm_unreachable("Unknown loc info!");
6469     case CCValAssign::Full: break;
6470     case CCValAssign::AExt:
6471       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
6472       break;
6473     case CCValAssign::ZExt:
6474       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
6475       break;
6476     case CCValAssign::SExt:
6477       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
6478       break;
6479     }
6480 
6481     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
6482     Flag = Chain.getValue(1);
6483     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
6484   }
6485 
6486   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
6487   const MCPhysReg *I =
6488     TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
6489   if (I) {
6490     for (; *I; ++I) {
6491 
6492       if (PPC::G8RCRegClass.contains(*I))
6493         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
6494       else if (PPC::F8RCRegClass.contains(*I))
6495         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
6496       else if (PPC::CRRCRegClass.contains(*I))
6497         RetOps.push_back(DAG.getRegister(*I, MVT::i1));
6498       else if (PPC::VRRCRegClass.contains(*I))
6499         RetOps.push_back(DAG.getRegister(*I, MVT::Other));
6500       else
6501         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
6502     }
6503   }
6504 
6505   RetOps[0] = Chain;  // Update chain.
6506 
6507   // Add the flag if we have it.
6508   if (Flag.getNode())
6509     RetOps.push_back(Flag);
6510 
6511   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
6512 }
6513 
6514 SDValue
6515 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
6516                                                 SelectionDAG &DAG) const {
6517   SDLoc dl(Op);
6518 
6519   // Get the correct type for integers.
6520   EVT IntVT = Op.getValueType();
6521 
6522   // Get the inputs.
6523   SDValue Chain = Op.getOperand(0);
6524   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6525   // Build a DYNAREAOFFSET node.
6526   SDValue Ops[2] = {Chain, FPSIdx};
6527   SDVTList VTs = DAG.getVTList(IntVT);
6528   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
6529 }
6530 
6531 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
6532                                              SelectionDAG &DAG) const {
6533   // When we pop the dynamic allocation we need to restore the SP link.
6534   SDLoc dl(Op);
6535 
6536   // Get the correct type for pointers.
6537   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6538 
6539   // Construct the stack pointer operand.
6540   bool isPPC64 = Subtarget.isPPC64();
6541   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
6542   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
6543 
6544   // Get the operands for the STACKRESTORE.
6545   SDValue Chain = Op.getOperand(0);
6546   SDValue SaveSP = Op.getOperand(1);
6547 
6548   // Load the old link SP.
6549   SDValue LoadLinkSP =
6550       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
6551 
6552   // Restore the stack pointer.
6553   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
6554 
6555   // Store the old link SP.
6556   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
6557 }
6558 
6559 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
6560   MachineFunction &MF = DAG.getMachineFunction();
6561   bool isPPC64 = Subtarget.isPPC64();
6562   EVT PtrVT = getPointerTy(MF.getDataLayout());
6563 
6564   // Get current frame pointer save index.  The users of this index will be
6565   // primarily DYNALLOC instructions.
6566   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6567   int RASI = FI->getReturnAddrSaveIndex();
6568 
6569   // If the frame pointer save index hasn't been defined yet.
6570   if (!RASI) {
6571     // Find out what the fix offset of the frame pointer save area.
6572     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
6573     // Allocate the frame index for frame pointer save area.
6574     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
6575     // Save the result.
6576     FI->setReturnAddrSaveIndex(RASI);
6577   }
6578   return DAG.getFrameIndex(RASI, PtrVT);
6579 }
6580 
6581 SDValue
6582 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
6583   MachineFunction &MF = DAG.getMachineFunction();
6584   bool isPPC64 = Subtarget.isPPC64();
6585   EVT PtrVT = getPointerTy(MF.getDataLayout());
6586 
6587   // Get current frame pointer save index.  The users of this index will be
6588   // primarily DYNALLOC instructions.
6589   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6590   int FPSI = FI->getFramePointerSaveIndex();
6591 
6592   // If the frame pointer save index hasn't been defined yet.
6593   if (!FPSI) {
6594     // Find out what the fix offset of the frame pointer save area.
6595     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
6596     // Allocate the frame index for frame pointer save area.
6597     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
6598     // Save the result.
6599     FI->setFramePointerSaveIndex(FPSI);
6600   }
6601   return DAG.getFrameIndex(FPSI, PtrVT);
6602 }
6603 
6604 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
6605                                                    SelectionDAG &DAG) const {
6606   // Get the inputs.
6607   SDValue Chain = Op.getOperand(0);
6608   SDValue Size  = Op.getOperand(1);
6609   SDLoc dl(Op);
6610 
6611   // Get the correct type for pointers.
6612   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6613   // Negate the size.
6614   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
6615                                 DAG.getConstant(0, dl, PtrVT), Size);
6616   // Construct a node for the frame pointer save index.
6617   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6618   // Build a DYNALLOC node.
6619   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
6620   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
6621   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
6622 }
6623 
6624 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
6625                                                      SelectionDAG &DAG) const {
6626   MachineFunction &MF = DAG.getMachineFunction();
6627 
6628   bool isPPC64 = Subtarget.isPPC64();
6629   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6630 
6631   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
6632   return DAG.getFrameIndex(FI, PtrVT);
6633 }
6634 
6635 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
6636                                                SelectionDAG &DAG) const {
6637   SDLoc DL(Op);
6638   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
6639                      DAG.getVTList(MVT::i32, MVT::Other),
6640                      Op.getOperand(0), Op.getOperand(1));
6641 }
6642 
6643 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
6644                                                 SelectionDAG &DAG) const {
6645   SDLoc DL(Op);
6646   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
6647                      Op.getOperand(0), Op.getOperand(1));
6648 }
6649 
6650 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
6651   if (Op.getValueType().isVector())
6652     return LowerVectorLoad(Op, DAG);
6653 
6654   assert(Op.getValueType() == MVT::i1 &&
6655          "Custom lowering only for i1 loads");
6656 
6657   // First, load 8 bits into 32 bits, then truncate to 1 bit.
6658 
6659   SDLoc dl(Op);
6660   LoadSDNode *LD = cast<LoadSDNode>(Op);
6661 
6662   SDValue Chain = LD->getChain();
6663   SDValue BasePtr = LD->getBasePtr();
6664   MachineMemOperand *MMO = LD->getMemOperand();
6665 
6666   SDValue NewLD =
6667       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
6668                      BasePtr, MVT::i8, MMO);
6669   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
6670 
6671   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
6672   return DAG.getMergeValues(Ops, dl);
6673 }
6674 
6675 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
6676   if (Op.getOperand(1).getValueType().isVector())
6677     return LowerVectorStore(Op, DAG);
6678 
6679   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
6680          "Custom lowering only for i1 stores");
6681 
6682   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
6683 
6684   SDLoc dl(Op);
6685   StoreSDNode *ST = cast<StoreSDNode>(Op);
6686 
6687   SDValue Chain = ST->getChain();
6688   SDValue BasePtr = ST->getBasePtr();
6689   SDValue Value = ST->getValue();
6690   MachineMemOperand *MMO = ST->getMemOperand();
6691 
6692   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
6693                       Value);
6694   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
6695 }
6696 
6697 // FIXME: Remove this once the ANDI glue bug is fixed:
6698 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
6699   assert(Op.getValueType() == MVT::i1 &&
6700          "Custom lowering only for i1 results");
6701 
6702   SDLoc DL(Op);
6703   return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1,
6704                      Op.getOperand(0));
6705 }
6706 
6707 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
6708 /// possible.
6709 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
6710   // Not FP? Not a fsel.
6711   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
6712       !Op.getOperand(2).getValueType().isFloatingPoint())
6713     return Op;
6714 
6715   // We might be able to do better than this under some circumstances, but in
6716   // general, fsel-based lowering of select is a finite-math-only optimization.
6717   // For more information, see section F.3 of the 2.06 ISA specification.
6718   if (!DAG.getTarget().Options.NoInfsFPMath ||
6719       !DAG.getTarget().Options.NoNaNsFPMath)
6720     return Op;
6721   // TODO: Propagate flags from the select rather than global settings.
6722   SDNodeFlags Flags;
6723   Flags.setNoInfs(true);
6724   Flags.setNoNaNs(true);
6725 
6726   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
6727 
6728   EVT ResVT = Op.getValueType();
6729   EVT CmpVT = Op.getOperand(0).getValueType();
6730   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
6731   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
6732   SDLoc dl(Op);
6733 
6734   // If the RHS of the comparison is a 0.0, we don't need to do the
6735   // subtraction at all.
6736   SDValue Sel1;
6737   if (isFloatingPointZero(RHS))
6738     switch (CC) {
6739     default: break;       // SETUO etc aren't handled by fsel.
6740     case ISD::SETNE:
6741       std::swap(TV, FV);
6742       LLVM_FALLTHROUGH;
6743     case ISD::SETEQ:
6744       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6745         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6746       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
6747       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
6748         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
6749       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6750                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
6751     case ISD::SETULT:
6752     case ISD::SETLT:
6753       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
6754       LLVM_FALLTHROUGH;
6755     case ISD::SETOGE:
6756     case ISD::SETGE:
6757       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6758         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6759       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
6760     case ISD::SETUGT:
6761     case ISD::SETGT:
6762       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
6763       LLVM_FALLTHROUGH;
6764     case ISD::SETOLE:
6765     case ISD::SETLE:
6766       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6767         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6768       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6769                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
6770     }
6771 
6772   SDValue Cmp;
6773   switch (CC) {
6774   default: break;       // SETUO etc aren't handled by fsel.
6775   case ISD::SETNE:
6776     std::swap(TV, FV);
6777     LLVM_FALLTHROUGH;
6778   case ISD::SETEQ:
6779     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
6780     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6781       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6782     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6783     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
6784       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
6785     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6786                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
6787   case ISD::SETULT:
6788   case ISD::SETLT:
6789     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
6790     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6791       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6792     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
6793   case ISD::SETOGE:
6794   case ISD::SETGE:
6795     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
6796     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6797       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6798     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6799   case ISD::SETUGT:
6800   case ISD::SETGT:
6801     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
6802     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6803       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6804     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
6805   case ISD::SETOLE:
6806   case ISD::SETLE:
6807     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
6808     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6809       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6810     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6811   }
6812   return Op;
6813 }
6814 
6815 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
6816                                                SelectionDAG &DAG,
6817                                                const SDLoc &dl) const {
6818   assert(Op.getOperand(0).getValueType().isFloatingPoint());
6819   SDValue Src = Op.getOperand(0);
6820   if (Src.getValueType() == MVT::f32)
6821     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
6822 
6823   SDValue Tmp;
6824   switch (Op.getSimpleValueType().SimpleTy) {
6825   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
6826   case MVT::i32:
6827     Tmp = DAG.getNode(
6828         Op.getOpcode() == ISD::FP_TO_SINT
6829             ? PPCISD::FCTIWZ
6830             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
6831         dl, MVT::f64, Src);
6832     break;
6833   case MVT::i64:
6834     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
6835            "i64 FP_TO_UINT is supported only with FPCVT");
6836     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
6837                                                         PPCISD::FCTIDUZ,
6838                       dl, MVT::f64, Src);
6839     break;
6840   }
6841 
6842   // Convert the FP value to an int value through memory.
6843   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
6844     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
6845   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
6846   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
6847   MachinePointerInfo MPI =
6848       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
6849 
6850   // Emit a store to the stack slot.
6851   SDValue Chain;
6852   if (i32Stack) {
6853     MachineFunction &MF = DAG.getMachineFunction();
6854     MachineMemOperand *MMO =
6855       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
6856     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
6857     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
6858               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
6859   } else
6860     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
6861 
6862   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
6863   // add in a bias on big endian.
6864   if (Op.getValueType() == MVT::i32 && !i32Stack) {
6865     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
6866                         DAG.getConstant(4, dl, FIPtr.getValueType()));
6867     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
6868   }
6869 
6870   RLI.Chain = Chain;
6871   RLI.Ptr = FIPtr;
6872   RLI.MPI = MPI;
6873 }
6874 
6875 /// \brief Custom lowers floating point to integer conversions to use
6876 /// the direct move instructions available in ISA 2.07 to avoid the
6877 /// need for load/store combinations.
6878 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
6879                                                     SelectionDAG &DAG,
6880                                                     const SDLoc &dl) const {
6881   assert(Op.getOperand(0).getValueType().isFloatingPoint());
6882   SDValue Src = Op.getOperand(0);
6883 
6884   if (Src.getValueType() == MVT::f32)
6885     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
6886 
6887   SDValue Tmp;
6888   switch (Op.getSimpleValueType().SimpleTy) {
6889   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
6890   case MVT::i32:
6891     Tmp = DAG.getNode(
6892         Op.getOpcode() == ISD::FP_TO_SINT
6893             ? PPCISD::FCTIWZ
6894             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
6895         dl, MVT::f64, Src);
6896     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
6897     break;
6898   case MVT::i64:
6899     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
6900            "i64 FP_TO_UINT is supported only with FPCVT");
6901     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
6902                                                         PPCISD::FCTIDUZ,
6903                       dl, MVT::f64, Src);
6904     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
6905     break;
6906   }
6907   return Tmp;
6908 }
6909 
6910 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
6911                                           const SDLoc &dl) const {
6912   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
6913     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
6914 
6915   ReuseLoadInfo RLI;
6916   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
6917 
6918   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
6919                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
6920 }
6921 
6922 // We're trying to insert a regular store, S, and then a load, L. If the
6923 // incoming value, O, is a load, we might just be able to have our load use the
6924 // address used by O. However, we don't know if anything else will store to
6925 // that address before we can load from it. To prevent this situation, we need
6926 // to insert our load, L, into the chain as a peer of O. To do this, we give L
6927 // the same chain operand as O, we create a token factor from the chain results
6928 // of O and L, and we replace all uses of O's chain result with that token
6929 // factor (see spliceIntoChain below for this last part).
6930 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
6931                                             ReuseLoadInfo &RLI,
6932                                             SelectionDAG &DAG,
6933                                             ISD::LoadExtType ET) const {
6934   SDLoc dl(Op);
6935   if (ET == ISD::NON_EXTLOAD &&
6936       (Op.getOpcode() == ISD::FP_TO_UINT ||
6937        Op.getOpcode() == ISD::FP_TO_SINT) &&
6938       isOperationLegalOrCustom(Op.getOpcode(),
6939                                Op.getOperand(0).getValueType())) {
6940 
6941     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
6942     return true;
6943   }
6944 
6945   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
6946   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
6947       LD->isNonTemporal())
6948     return false;
6949   if (LD->getMemoryVT() != MemVT)
6950     return false;
6951 
6952   RLI.Ptr = LD->getBasePtr();
6953   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
6954     assert(LD->getAddressingMode() == ISD::PRE_INC &&
6955            "Non-pre-inc AM on PPC?");
6956     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
6957                           LD->getOffset());
6958   }
6959 
6960   RLI.Chain = LD->getChain();
6961   RLI.MPI = LD->getPointerInfo();
6962   RLI.IsDereferenceable = LD->isDereferenceable();
6963   RLI.IsInvariant = LD->isInvariant();
6964   RLI.Alignment = LD->getAlignment();
6965   RLI.AAInfo = LD->getAAInfo();
6966   RLI.Ranges = LD->getRanges();
6967 
6968   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
6969   return true;
6970 }
6971 
6972 // Given the head of the old chain, ResChain, insert a token factor containing
6973 // it and NewResChain, and make users of ResChain now be users of that token
6974 // factor.
6975 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
6976 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
6977                                         SDValue NewResChain,
6978                                         SelectionDAG &DAG) const {
6979   if (!ResChain)
6980     return;
6981 
6982   SDLoc dl(NewResChain);
6983 
6984   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
6985                            NewResChain, DAG.getUNDEF(MVT::Other));
6986   assert(TF.getNode() != NewResChain.getNode() &&
6987          "A new TF really is required here");
6988 
6989   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
6990   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
6991 }
6992 
6993 /// \brief Analyze profitability of direct move
6994 /// prefer float load to int load plus direct move
6995 /// when there is no integer use of int load
6996 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
6997   SDNode *Origin = Op.getOperand(0).getNode();
6998   if (Origin->getOpcode() != ISD::LOAD)
6999     return true;
7000 
7001   // If there is no LXSIBZX/LXSIHZX, like Power8,
7002   // prefer direct move if the memory size is 1 or 2 bytes.
7003   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
7004   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
7005     return true;
7006 
7007   for (SDNode::use_iterator UI = Origin->use_begin(),
7008                             UE = Origin->use_end();
7009        UI != UE; ++UI) {
7010 
7011     // Only look at the users of the loaded value.
7012     if (UI.getUse().get().getResNo() != 0)
7013       continue;
7014 
7015     if (UI->getOpcode() != ISD::SINT_TO_FP &&
7016         UI->getOpcode() != ISD::UINT_TO_FP)
7017       return true;
7018   }
7019 
7020   return false;
7021 }
7022 
7023 /// \brief Custom lowers integer to floating point conversions to use
7024 /// the direct move instructions available in ISA 2.07 to avoid the
7025 /// need for load/store combinations.
7026 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
7027                                                     SelectionDAG &DAG,
7028                                                     const SDLoc &dl) const {
7029   assert((Op.getValueType() == MVT::f32 ||
7030           Op.getValueType() == MVT::f64) &&
7031          "Invalid floating point type as target of conversion");
7032   assert(Subtarget.hasFPCVT() &&
7033          "Int to FP conversions with direct moves require FPCVT");
7034   SDValue FP;
7035   SDValue Src = Op.getOperand(0);
7036   bool SinglePrec = Op.getValueType() == MVT::f32;
7037   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
7038   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
7039   unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
7040                              (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
7041 
7042   if (WordInt) {
7043     FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
7044                      dl, MVT::f64, Src);
7045     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7046   }
7047   else {
7048     FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
7049     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7050   }
7051 
7052   return FP;
7053 }
7054 
7055 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
7056                                           SelectionDAG &DAG) const {
7057   SDLoc dl(Op);
7058 
7059   if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
7060     if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
7061       return SDValue();
7062 
7063     SDValue Value = Op.getOperand(0);
7064     // The values are now known to be -1 (false) or 1 (true). To convert this
7065     // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
7066     // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
7067     Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
7068 
7069     SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
7070 
7071     Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
7072 
7073     if (Op.getValueType() != MVT::v4f64)
7074       Value = DAG.getNode(ISD::FP_ROUND, dl,
7075                           Op.getValueType(), Value,
7076                           DAG.getIntPtrConstant(1, dl));
7077     return Value;
7078   }
7079 
7080   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
7081   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
7082     return SDValue();
7083 
7084   if (Op.getOperand(0).getValueType() == MVT::i1)
7085     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
7086                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
7087                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
7088 
7089   // If we have direct moves, we can do all the conversion, skip the store/load
7090   // however, without FPCVT we can't do most conversions.
7091   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
7092       Subtarget.isPPC64() && Subtarget.hasFPCVT())
7093     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
7094 
7095   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
7096          "UINT_TO_FP is supported only with FPCVT");
7097 
7098   // If we have FCFIDS, then use it when converting to single-precision.
7099   // Otherwise, convert to double-precision and then round.
7100   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
7101                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
7102                                                             : PPCISD::FCFIDS)
7103                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
7104                                                             : PPCISD::FCFID);
7105   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
7106                   ? MVT::f32
7107                   : MVT::f64;
7108 
7109   if (Op.getOperand(0).getValueType() == MVT::i64) {
7110     SDValue SINT = Op.getOperand(0);
7111     // When converting to single-precision, we actually need to convert
7112     // to double-precision first and then round to single-precision.
7113     // To avoid double-rounding effects during that operation, we have
7114     // to prepare the input operand.  Bits that might be truncated when
7115     // converting to double-precision are replaced by a bit that won't
7116     // be lost at this stage, but is below the single-precision rounding
7117     // position.
7118     //
7119     // However, if -enable-unsafe-fp-math is in effect, accept double
7120     // rounding to avoid the extra overhead.
7121     if (Op.getValueType() == MVT::f32 &&
7122         !Subtarget.hasFPCVT() &&
7123         !DAG.getTarget().Options.UnsafeFPMath) {
7124 
7125       // Twiddle input to make sure the low 11 bits are zero.  (If this
7126       // is the case, we are guaranteed the value will fit into the 53 bit
7127       // mantissa of an IEEE double-precision value without rounding.)
7128       // If any of those low 11 bits were not zero originally, make sure
7129       // bit 12 (value 2048) is set instead, so that the final rounding
7130       // to single-precision gets the correct result.
7131       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
7132                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
7133       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
7134                           Round, DAG.getConstant(2047, dl, MVT::i64));
7135       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
7136       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
7137                           Round, DAG.getConstant(-2048, dl, MVT::i64));
7138 
7139       // However, we cannot use that value unconditionally: if the magnitude
7140       // of the input value is small, the bit-twiddling we did above might
7141       // end up visibly changing the output.  Fortunately, in that case, we
7142       // don't need to twiddle bits since the original input will convert
7143       // exactly to double-precision floating-point already.  Therefore,
7144       // construct a conditional to use the original value if the top 11
7145       // bits are all sign-bit copies, and use the rounded value computed
7146       // above otherwise.
7147       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
7148                                  SINT, DAG.getConstant(53, dl, MVT::i32));
7149       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
7150                          Cond, DAG.getConstant(1, dl, MVT::i64));
7151       Cond = DAG.getSetCC(dl, MVT::i32,
7152                           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
7153 
7154       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
7155     }
7156 
7157     ReuseLoadInfo RLI;
7158     SDValue Bits;
7159 
7160     MachineFunction &MF = DAG.getMachineFunction();
7161     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
7162       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7163                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7164       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7165     } else if (Subtarget.hasLFIWAX() &&
7166                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
7167       MachineMemOperand *MMO =
7168         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7169                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7170       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7171       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
7172                                      DAG.getVTList(MVT::f64, MVT::Other),
7173                                      Ops, MVT::i32, MMO);
7174       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7175     } else if (Subtarget.hasFPCVT() &&
7176                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
7177       MachineMemOperand *MMO =
7178         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7179                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7180       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7181       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
7182                                      DAG.getVTList(MVT::f64, MVT::Other),
7183                                      Ops, MVT::i32, MMO);
7184       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7185     } else if (((Subtarget.hasLFIWAX() &&
7186                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
7187                 (Subtarget.hasFPCVT() &&
7188                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
7189                SINT.getOperand(0).getValueType() == MVT::i32) {
7190       MachineFrameInfo &MFI = MF.getFrameInfo();
7191       EVT PtrVT = getPointerTy(DAG.getDataLayout());
7192 
7193       int FrameIdx = MFI.CreateStackObject(4, 4, false);
7194       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7195 
7196       SDValue Store =
7197           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
7198                        MachinePointerInfo::getFixedStack(
7199                            DAG.getMachineFunction(), FrameIdx));
7200 
7201       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
7202              "Expected an i32 store");
7203 
7204       RLI.Ptr = FIdx;
7205       RLI.Chain = Store;
7206       RLI.MPI =
7207           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7208       RLI.Alignment = 4;
7209 
7210       MachineMemOperand *MMO =
7211         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7212                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7213       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7214       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
7215                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
7216                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
7217                                      Ops, MVT::i32, MMO);
7218     } else
7219       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
7220 
7221     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
7222 
7223     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
7224       FP = DAG.getNode(ISD::FP_ROUND, dl,
7225                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
7226     return FP;
7227   }
7228 
7229   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
7230          "Unhandled INT_TO_FP type in custom expander!");
7231   // Since we only generate this in 64-bit mode, we can take advantage of
7232   // 64-bit registers.  In particular, sign extend the input value into the
7233   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
7234   // then lfd it and fcfid it.
7235   MachineFunction &MF = DAG.getMachineFunction();
7236   MachineFrameInfo &MFI = MF.getFrameInfo();
7237   EVT PtrVT = getPointerTy(MF.getDataLayout());
7238 
7239   SDValue Ld;
7240   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
7241     ReuseLoadInfo RLI;
7242     bool ReusingLoad;
7243     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
7244                                             DAG))) {
7245       int FrameIdx = MFI.CreateStackObject(4, 4, false);
7246       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7247 
7248       SDValue Store =
7249           DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
7250                        MachinePointerInfo::getFixedStack(
7251                            DAG.getMachineFunction(), FrameIdx));
7252 
7253       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
7254              "Expected an i32 store");
7255 
7256       RLI.Ptr = FIdx;
7257       RLI.Chain = Store;
7258       RLI.MPI =
7259           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7260       RLI.Alignment = 4;
7261     }
7262 
7263     MachineMemOperand *MMO =
7264       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7265                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7266     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7267     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
7268                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
7269                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
7270                                  Ops, MVT::i32, MMO);
7271     if (ReusingLoad)
7272       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
7273   } else {
7274     assert(Subtarget.isPPC64() &&
7275            "i32->FP without LFIWAX supported only on PPC64");
7276 
7277     int FrameIdx = MFI.CreateStackObject(8, 8, false);
7278     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7279 
7280     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
7281                                 Op.getOperand(0));
7282 
7283     // STD the extended value into the stack slot.
7284     SDValue Store = DAG.getStore(
7285         DAG.getEntryNode(), dl, Ext64, FIdx,
7286         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
7287 
7288     // Load the value as a double.
7289     Ld = DAG.getLoad(
7290         MVT::f64, dl, Store, FIdx,
7291         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
7292   }
7293 
7294   // FCFID it and return it.
7295   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
7296   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
7297     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
7298                      DAG.getIntPtrConstant(0, dl));
7299   return FP;
7300 }
7301 
7302 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
7303                                             SelectionDAG &DAG) const {
7304   SDLoc dl(Op);
7305   /*
7306    The rounding mode is in bits 30:31 of FPSR, and has the following
7307    settings:
7308      00 Round to nearest
7309      01 Round to 0
7310      10 Round to +inf
7311      11 Round to -inf
7312 
7313   FLT_ROUNDS, on the other hand, expects the following:
7314     -1 Undefined
7315      0 Round to 0
7316      1 Round to nearest
7317      2 Round to +inf
7318      3 Round to -inf
7319 
7320   To perform the conversion, we do:
7321     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
7322   */
7323 
7324   MachineFunction &MF = DAG.getMachineFunction();
7325   EVT VT = Op.getValueType();
7326   EVT PtrVT = getPointerTy(MF.getDataLayout());
7327 
7328   // Save FP Control Word to register
7329   EVT NodeTys[] = {
7330     MVT::f64,    // return register
7331     MVT::Glue    // unused in this context
7332   };
7333   SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
7334 
7335   // Save FP register to stack slot
7336   int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false);
7337   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
7338   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot,
7339                                MachinePointerInfo());
7340 
7341   // Load FP Control Word from low 32 bits of stack slot.
7342   SDValue Four = DAG.getConstant(4, dl, PtrVT);
7343   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
7344   SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo());
7345 
7346   // Transform as necessary
7347   SDValue CWD1 =
7348     DAG.getNode(ISD::AND, dl, MVT::i32,
7349                 CWD, DAG.getConstant(3, dl, MVT::i32));
7350   SDValue CWD2 =
7351     DAG.getNode(ISD::SRL, dl, MVT::i32,
7352                 DAG.getNode(ISD::AND, dl, MVT::i32,
7353                             DAG.getNode(ISD::XOR, dl, MVT::i32,
7354                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
7355                             DAG.getConstant(3, dl, MVT::i32)),
7356                 DAG.getConstant(1, dl, MVT::i32));
7357 
7358   SDValue RetVal =
7359     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
7360 
7361   return DAG.getNode((VT.getSizeInBits() < 16 ?
7362                       ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
7363 }
7364 
7365 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
7366   EVT VT = Op.getValueType();
7367   unsigned BitWidth = VT.getSizeInBits();
7368   SDLoc dl(Op);
7369   assert(Op.getNumOperands() == 3 &&
7370          VT == Op.getOperand(1).getValueType() &&
7371          "Unexpected SHL!");
7372 
7373   // Expand into a bunch of logical ops.  Note that these ops
7374   // depend on the PPC behavior for oversized shift amounts.
7375   SDValue Lo = Op.getOperand(0);
7376   SDValue Hi = Op.getOperand(1);
7377   SDValue Amt = Op.getOperand(2);
7378   EVT AmtVT = Amt.getValueType();
7379 
7380   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7381                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7382   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
7383   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
7384   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
7385   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7386                              DAG.getConstant(-BitWidth, dl, AmtVT));
7387   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
7388   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
7389   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
7390   SDValue OutOps[] = { OutLo, OutHi };
7391   return DAG.getMergeValues(OutOps, dl);
7392 }
7393 
7394 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
7395   EVT VT = Op.getValueType();
7396   SDLoc dl(Op);
7397   unsigned BitWidth = VT.getSizeInBits();
7398   assert(Op.getNumOperands() == 3 &&
7399          VT == Op.getOperand(1).getValueType() &&
7400          "Unexpected SRL!");
7401 
7402   // Expand into a bunch of logical ops.  Note that these ops
7403   // depend on the PPC behavior for oversized shift amounts.
7404   SDValue Lo = Op.getOperand(0);
7405   SDValue Hi = Op.getOperand(1);
7406   SDValue Amt = Op.getOperand(2);
7407   EVT AmtVT = Amt.getValueType();
7408 
7409   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7410                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7411   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
7412   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
7413   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
7414   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7415                              DAG.getConstant(-BitWidth, dl, AmtVT));
7416   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
7417   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
7418   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
7419   SDValue OutOps[] = { OutLo, OutHi };
7420   return DAG.getMergeValues(OutOps, dl);
7421 }
7422 
7423 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
7424   SDLoc dl(Op);
7425   EVT VT = Op.getValueType();
7426   unsigned BitWidth = VT.getSizeInBits();
7427   assert(Op.getNumOperands() == 3 &&
7428          VT == Op.getOperand(1).getValueType() &&
7429          "Unexpected SRA!");
7430 
7431   // Expand into a bunch of logical ops, followed by a select_cc.
7432   SDValue Lo = Op.getOperand(0);
7433   SDValue Hi = Op.getOperand(1);
7434   SDValue Amt = Op.getOperand(2);
7435   EVT AmtVT = Amt.getValueType();
7436 
7437   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7438                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7439   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
7440   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
7441   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
7442   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7443                              DAG.getConstant(-BitWidth, dl, AmtVT));
7444   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
7445   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
7446   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
7447                                   Tmp4, Tmp6, ISD::SETLE);
7448   SDValue OutOps[] = { OutLo, OutHi };
7449   return DAG.getMergeValues(OutOps, dl);
7450 }
7451 
7452 //===----------------------------------------------------------------------===//
7453 // Vector related lowering.
7454 //
7455 
7456 /// BuildSplatI - Build a canonical splati of Val with an element size of
7457 /// SplatSize.  Cast the result to VT.
7458 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
7459                            SelectionDAG &DAG, const SDLoc &dl) {
7460   assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
7461 
7462   static const MVT VTys[] = { // canonical VT to use for each size.
7463     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
7464   };
7465 
7466   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
7467 
7468   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
7469   if (Val == -1)
7470     SplatSize = 1;
7471 
7472   EVT CanonicalVT = VTys[SplatSize-1];
7473 
7474   // Build a canonical splat for this value.
7475   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
7476 }
7477 
7478 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
7479 /// specified intrinsic ID.
7480 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
7481                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
7482   if (DestVT == MVT::Other) DestVT = Op.getValueType();
7483   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7484                      DAG.getConstant(IID, dl, MVT::i32), Op);
7485 }
7486 
7487 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
7488 /// specified intrinsic ID.
7489 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
7490                                 SelectionDAG &DAG, const SDLoc &dl,
7491                                 EVT DestVT = MVT::Other) {
7492   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
7493   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7494                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
7495 }
7496 
7497 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
7498 /// specified intrinsic ID.
7499 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
7500                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
7501                                 EVT DestVT = MVT::Other) {
7502   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
7503   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7504                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
7505 }
7506 
7507 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
7508 /// amount.  The result has the specified value type.
7509 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
7510                            SelectionDAG &DAG, const SDLoc &dl) {
7511   // Force LHS/RHS to be the right type.
7512   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
7513   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
7514 
7515   int Ops[16];
7516   for (unsigned i = 0; i != 16; ++i)
7517     Ops[i] = i + Amt;
7518   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
7519   return DAG.getNode(ISD::BITCAST, dl, VT, T);
7520 }
7521 
7522 /// Do we have an efficient pattern in a .td file for this node?
7523 ///
7524 /// \param V - pointer to the BuildVectorSDNode being matched
7525 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
7526 ///
7527 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
7528 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
7529 /// the opposite is true (expansion is beneficial) are:
7530 /// - The node builds a vector out of integers that are not 32 or 64-bits
7531 /// - The node builds a vector out of constants
7532 /// - The node is a "load-and-splat"
7533 /// In all other cases, we will choose to keep the BUILD_VECTOR.
7534 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
7535                                             bool HasDirectMove,
7536                                             bool HasP8Vector) {
7537   EVT VecVT = V->getValueType(0);
7538   bool RightType = VecVT == MVT::v2f64 ||
7539     (HasP8Vector && VecVT == MVT::v4f32) ||
7540     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
7541   if (!RightType)
7542     return false;
7543 
7544   bool IsSplat = true;
7545   bool IsLoad = false;
7546   SDValue Op0 = V->getOperand(0);
7547 
7548   // This function is called in a block that confirms the node is not a constant
7549   // splat. So a constant BUILD_VECTOR here means the vector is built out of
7550   // different constants.
7551   if (V->isConstant())
7552     return false;
7553   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
7554     if (V->getOperand(i).isUndef())
7555       return false;
7556     // We want to expand nodes that represent load-and-splat even if the
7557     // loaded value is a floating point truncation or conversion to int.
7558     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
7559         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
7560          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
7561         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
7562          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
7563         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
7564          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
7565       IsLoad = true;
7566     // If the operands are different or the input is not a load and has more
7567     // uses than just this BV node, then it isn't a splat.
7568     if (V->getOperand(i) != Op0 ||
7569         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
7570       IsSplat = false;
7571   }
7572   return !(IsSplat && IsLoad);
7573 }
7574 
7575 // If this is a case we can't handle, return null and let the default
7576 // expansion code take care of it.  If we CAN select this case, and if it
7577 // selects to a single instruction, return Op.  Otherwise, if we can codegen
7578 // this case more efficiently than a constant pool load, lower it to the
7579 // sequence of ops that should be used.
7580 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
7581                                              SelectionDAG &DAG) const {
7582   SDLoc dl(Op);
7583   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
7584   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
7585 
7586   if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
7587     // We first build an i32 vector, load it into a QPX register,
7588     // then convert it to a floating-point vector and compare it
7589     // to a zero vector to get the boolean result.
7590     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7591     int FrameIdx = MFI.CreateStackObject(16, 16, false);
7592     MachinePointerInfo PtrInfo =
7593         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7594     EVT PtrVT = getPointerTy(DAG.getDataLayout());
7595     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7596 
7597     assert(BVN->getNumOperands() == 4 &&
7598       "BUILD_VECTOR for v4i1 does not have 4 operands");
7599 
7600     bool IsConst = true;
7601     for (unsigned i = 0; i < 4; ++i) {
7602       if (BVN->getOperand(i).isUndef()) continue;
7603       if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
7604         IsConst = false;
7605         break;
7606       }
7607     }
7608 
7609     if (IsConst) {
7610       Constant *One =
7611         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
7612       Constant *NegOne =
7613         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
7614 
7615       Constant *CV[4];
7616       for (unsigned i = 0; i < 4; ++i) {
7617         if (BVN->getOperand(i).isUndef())
7618           CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
7619         else if (isNullConstant(BVN->getOperand(i)))
7620           CV[i] = NegOne;
7621         else
7622           CV[i] = One;
7623       }
7624 
7625       Constant *CP = ConstantVector::get(CV);
7626       SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()),
7627                                           16 /* alignment */);
7628 
7629       SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
7630       SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
7631       return DAG.getMemIntrinsicNode(
7632           PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
7633           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
7634     }
7635 
7636     SmallVector<SDValue, 4> Stores;
7637     for (unsigned i = 0; i < 4; ++i) {
7638       if (BVN->getOperand(i).isUndef()) continue;
7639 
7640       unsigned Offset = 4*i;
7641       SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
7642       Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
7643 
7644       unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
7645       if (StoreSize > 4) {
7646         Stores.push_back(
7647             DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
7648                               PtrInfo.getWithOffset(Offset), MVT::i32));
7649       } else {
7650         SDValue StoreValue = BVN->getOperand(i);
7651         if (StoreSize < 4)
7652           StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
7653 
7654         Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
7655                                       PtrInfo.getWithOffset(Offset)));
7656       }
7657     }
7658 
7659     SDValue StoreChain;
7660     if (!Stores.empty())
7661       StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
7662     else
7663       StoreChain = DAG.getEntryNode();
7664 
7665     // Now load from v4i32 into the QPX register; this will extend it to
7666     // v4i64 but not yet convert it to a floating point. Nevertheless, this
7667     // is typed as v4f64 because the QPX register integer states are not
7668     // explicitly represented.
7669 
7670     SDValue Ops[] = {StoreChain,
7671                      DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
7672                      FIdx};
7673     SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
7674 
7675     SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
7676       dl, VTs, Ops, MVT::v4i32, PtrInfo);
7677     LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
7678       DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
7679       LoadedVect);
7680 
7681     SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
7682 
7683     return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
7684   }
7685 
7686   // All other QPX vectors are handled by generic code.
7687   if (Subtarget.hasQPX())
7688     return SDValue();
7689 
7690   // Check if this is a splat of a constant value.
7691   APInt APSplatBits, APSplatUndef;
7692   unsigned SplatBitSize;
7693   bool HasAnyUndefs;
7694   if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
7695                              HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
7696       SplatBitSize > 32) {
7697     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
7698     // lowered to VSX instructions under certain conditions.
7699     // Without VSX, there is no pattern more efficient than expanding the node.
7700     if (Subtarget.hasVSX() &&
7701         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
7702                                         Subtarget.hasP8Vector()))
7703       return Op;
7704     return SDValue();
7705   }
7706 
7707   unsigned SplatBits = APSplatBits.getZExtValue();
7708   unsigned SplatUndef = APSplatUndef.getZExtValue();
7709   unsigned SplatSize = SplatBitSize / 8;
7710 
7711   // First, handle single instruction cases.
7712 
7713   // All zeros?
7714   if (SplatBits == 0) {
7715     // Canonicalize all zero vectors to be v4i32.
7716     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
7717       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
7718       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
7719     }
7720     return Op;
7721   }
7722 
7723   // We have XXSPLTIB for constant splats one byte wide
7724   if (Subtarget.hasP9Vector() && SplatSize == 1) {
7725     // This is a splat of 1-byte elements with some elements potentially undef.
7726     // Rather than trying to match undef in the SDAG patterns, ensure that all
7727     // elements are the same constant.
7728     if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) {
7729       SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits,
7730                                                        dl, MVT::i32));
7731       SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops);
7732       if (Op.getValueType() != MVT::v16i8)
7733         return DAG.getBitcast(Op.getValueType(), NewBV);
7734       return NewBV;
7735     }
7736 
7737     // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll
7738     // detect that constant splats like v8i16: 0xABAB are really just splats
7739     // of a 1-byte constant. In this case, we need to convert the node to a
7740     // splat of v16i8 and a bitcast.
7741     if (Op.getValueType() != MVT::v16i8)
7742       return DAG.getBitcast(Op.getValueType(),
7743                             DAG.getConstant(SplatBits, dl, MVT::v16i8));
7744 
7745     return Op;
7746   }
7747 
7748   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
7749   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
7750                     (32-SplatBitSize));
7751   if (SextVal >= -16 && SextVal <= 15)
7752     return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
7753 
7754   // Two instruction sequences.
7755 
7756   // If this value is in the range [-32,30] and is even, use:
7757   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
7758   // If this value is in the range [17,31] and is odd, use:
7759   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
7760   // If this value is in the range [-31,-17] and is odd, use:
7761   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
7762   // Note the last two are three-instruction sequences.
7763   if (SextVal >= -32 && SextVal <= 31) {
7764     // To avoid having these optimizations undone by constant folding,
7765     // we convert to a pseudo that will be expanded later into one of
7766     // the above forms.
7767     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
7768     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
7769               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
7770     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
7771     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
7772     if (VT == Op.getValueType())
7773       return RetVal;
7774     else
7775       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
7776   }
7777 
7778   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
7779   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
7780   // for fneg/fabs.
7781   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
7782     // Make -1 and vspltisw -1:
7783     SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
7784 
7785     // Make the VSLW intrinsic, computing 0x8000_0000.
7786     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
7787                                    OnesV, DAG, dl);
7788 
7789     // xor by OnesV to invert it.
7790     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
7791     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7792   }
7793 
7794   // Check to see if this is a wide variety of vsplti*, binop self cases.
7795   static const signed char SplatCsts[] = {
7796     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
7797     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
7798   };
7799 
7800   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
7801     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
7802     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
7803     int i = SplatCsts[idx];
7804 
7805     // Figure out what shift amount will be used by altivec if shifted by i in
7806     // this splat size.
7807     unsigned TypeShiftAmt = i & (SplatBitSize-1);
7808 
7809     // vsplti + shl self.
7810     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
7811       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
7812       static const unsigned IIDs[] = { // Intrinsic to use for each size.
7813         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
7814         Intrinsic::ppc_altivec_vslw
7815       };
7816       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
7817       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7818     }
7819 
7820     // vsplti + srl self.
7821     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
7822       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
7823       static const unsigned IIDs[] = { // Intrinsic to use for each size.
7824         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
7825         Intrinsic::ppc_altivec_vsrw
7826       };
7827       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
7828       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7829     }
7830 
7831     // vsplti + sra self.
7832     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
7833       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
7834       static const unsigned IIDs[] = { // Intrinsic to use for each size.
7835         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
7836         Intrinsic::ppc_altivec_vsraw
7837       };
7838       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
7839       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7840     }
7841 
7842     // vsplti + rol self.
7843     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
7844                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
7845       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
7846       static const unsigned IIDs[] = { // Intrinsic to use for each size.
7847         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
7848         Intrinsic::ppc_altivec_vrlw
7849       };
7850       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
7851       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7852     }
7853 
7854     // t = vsplti c, result = vsldoi t, t, 1
7855     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
7856       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
7857       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
7858       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
7859     }
7860     // t = vsplti c, result = vsldoi t, t, 2
7861     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
7862       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
7863       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
7864       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
7865     }
7866     // t = vsplti c, result = vsldoi t, t, 3
7867     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
7868       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
7869       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
7870       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
7871     }
7872   }
7873 
7874   return SDValue();
7875 }
7876 
7877 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
7878 /// the specified operations to build the shuffle.
7879 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
7880                                       SDValue RHS, SelectionDAG &DAG,
7881                                       const SDLoc &dl) {
7882   unsigned OpNum = (PFEntry >> 26) & 0x0F;
7883   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7884   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
7885 
7886   enum {
7887     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
7888     OP_VMRGHW,
7889     OP_VMRGLW,
7890     OP_VSPLTISW0,
7891     OP_VSPLTISW1,
7892     OP_VSPLTISW2,
7893     OP_VSPLTISW3,
7894     OP_VSLDOI4,
7895     OP_VSLDOI8,
7896     OP_VSLDOI12
7897   };
7898 
7899   if (OpNum == OP_COPY) {
7900     if (LHSID == (1*9+2)*9+3) return LHS;
7901     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
7902     return RHS;
7903   }
7904 
7905   SDValue OpLHS, OpRHS;
7906   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
7907   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
7908 
7909   int ShufIdxs[16];
7910   switch (OpNum) {
7911   default: llvm_unreachable("Unknown i32 permute!");
7912   case OP_VMRGHW:
7913     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
7914     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
7915     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
7916     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
7917     break;
7918   case OP_VMRGLW:
7919     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
7920     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
7921     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
7922     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
7923     break;
7924   case OP_VSPLTISW0:
7925     for (unsigned i = 0; i != 16; ++i)
7926       ShufIdxs[i] = (i&3)+0;
7927     break;
7928   case OP_VSPLTISW1:
7929     for (unsigned i = 0; i != 16; ++i)
7930       ShufIdxs[i] = (i&3)+4;
7931     break;
7932   case OP_VSPLTISW2:
7933     for (unsigned i = 0; i != 16; ++i)
7934       ShufIdxs[i] = (i&3)+8;
7935     break;
7936   case OP_VSPLTISW3:
7937     for (unsigned i = 0; i != 16; ++i)
7938       ShufIdxs[i] = (i&3)+12;
7939     break;
7940   case OP_VSLDOI4:
7941     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
7942   case OP_VSLDOI8:
7943     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
7944   case OP_VSLDOI12:
7945     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
7946   }
7947   EVT VT = OpLHS.getValueType();
7948   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
7949   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
7950   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
7951   return DAG.getNode(ISD::BITCAST, dl, VT, T);
7952 }
7953 
7954 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
7955 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
7956 /// SDValue.
7957 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
7958                                            SelectionDAG &DAG) const {
7959   const unsigned BytesInVector = 16;
7960   bool IsLE = Subtarget.isLittleEndian();
7961   SDLoc dl(N);
7962   SDValue V1 = N->getOperand(0);
7963   SDValue V2 = N->getOperand(1);
7964   unsigned ShiftElts = 0, InsertAtByte = 0;
7965   bool Swap = false;
7966 
7967   // Shifts required to get the byte we want at element 7.
7968   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
7969                                    0, 15, 14, 13, 12, 11, 10, 9};
7970   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
7971                                 1, 2,  3,  4,  5,  6,  7,  8};
7972 
7973   ArrayRef<int> Mask = N->getMask();
7974   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
7975 
7976   // For each mask element, find out if we're just inserting something
7977   // from V2 into V1 or vice versa.
7978   // Possible permutations inserting an element from V2 into V1:
7979   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
7980   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
7981   //   ...
7982   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
7983   // Inserting from V1 into V2 will be similar, except mask range will be
7984   // [16,31].
7985 
7986   bool FoundCandidate = false;
7987   // If both vector operands for the shuffle are the same vector, the mask
7988   // will contain only elements from the first one and the second one will be
7989   // undef.
7990   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
7991   // Go through the mask of half-words to find an element that's being moved
7992   // from one vector to the other.
7993   for (unsigned i = 0; i < BytesInVector; ++i) {
7994     unsigned CurrentElement = Mask[i];
7995     // If 2nd operand is undefined, we should only look for element 7 in the
7996     // Mask.
7997     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
7998       continue;
7999 
8000     bool OtherElementsInOrder = true;
8001     // Examine the other elements in the Mask to see if they're in original
8002     // order.
8003     for (unsigned j = 0; j < BytesInVector; ++j) {
8004       if (j == i)
8005         continue;
8006       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
8007       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
8008       // in which we always assume we're always picking from the 1st operand.
8009       int MaskOffset =
8010           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
8011       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
8012         OtherElementsInOrder = false;
8013         break;
8014       }
8015     }
8016     // If other elements are in original order, we record the number of shifts
8017     // we need to get the element we want into element 7. Also record which byte
8018     // in the vector we should insert into.
8019     if (OtherElementsInOrder) {
8020       // If 2nd operand is undefined, we assume no shifts and no swapping.
8021       if (V2.isUndef()) {
8022         ShiftElts = 0;
8023         Swap = false;
8024       } else {
8025         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
8026         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
8027                          : BigEndianShifts[CurrentElement & 0xF];
8028         Swap = CurrentElement < BytesInVector;
8029       }
8030       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
8031       FoundCandidate = true;
8032       break;
8033     }
8034   }
8035 
8036   if (!FoundCandidate)
8037     return SDValue();
8038 
8039   // Candidate found, construct the proper SDAG sequence with VINSERTB,
8040   // optionally with VECSHL if shift is required.
8041   if (Swap)
8042     std::swap(V1, V2);
8043   if (V2.isUndef())
8044     V2 = V1;
8045   if (ShiftElts) {
8046     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8047                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8048     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
8049                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
8050   }
8051   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
8052                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
8053 }
8054 
8055 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
8056 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
8057 /// SDValue.
8058 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
8059                                            SelectionDAG &DAG) const {
8060   const unsigned NumHalfWords = 8;
8061   const unsigned BytesInVector = NumHalfWords * 2;
8062   // Check that the shuffle is on half-words.
8063   if (!isNByteElemShuffleMask(N, 2, 1))
8064     return SDValue();
8065 
8066   bool IsLE = Subtarget.isLittleEndian();
8067   SDLoc dl(N);
8068   SDValue V1 = N->getOperand(0);
8069   SDValue V2 = N->getOperand(1);
8070   unsigned ShiftElts = 0, InsertAtByte = 0;
8071   bool Swap = false;
8072 
8073   // Shifts required to get the half-word we want at element 3.
8074   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
8075   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
8076 
8077   uint32_t Mask = 0;
8078   uint32_t OriginalOrderLow = 0x1234567;
8079   uint32_t OriginalOrderHigh = 0x89ABCDEF;
8080   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
8081   // 32-bit space, only need 4-bit nibbles per element.
8082   for (unsigned i = 0; i < NumHalfWords; ++i) {
8083     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8084     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
8085   }
8086 
8087   // For each mask element, find out if we're just inserting something
8088   // from V2 into V1 or vice versa.  Possible permutations inserting an element
8089   // from V2 into V1:
8090   //   X, 1, 2, 3, 4, 5, 6, 7
8091   //   0, X, 2, 3, 4, 5, 6, 7
8092   //   0, 1, X, 3, 4, 5, 6, 7
8093   //   0, 1, 2, X, 4, 5, 6, 7
8094   //   0, 1, 2, 3, X, 5, 6, 7
8095   //   0, 1, 2, 3, 4, X, 6, 7
8096   //   0, 1, 2, 3, 4, 5, X, 7
8097   //   0, 1, 2, 3, 4, 5, 6, X
8098   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
8099 
8100   bool FoundCandidate = false;
8101   // Go through the mask of half-words to find an element that's being moved
8102   // from one vector to the other.
8103   for (unsigned i = 0; i < NumHalfWords; ++i) {
8104     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8105     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
8106     uint32_t MaskOtherElts = ~(0xF << MaskShift);
8107     uint32_t TargetOrder = 0x0;
8108 
8109     // If both vector operands for the shuffle are the same vector, the mask
8110     // will contain only elements from the first one and the second one will be
8111     // undef.
8112     if (V2.isUndef()) {
8113       ShiftElts = 0;
8114       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
8115       TargetOrder = OriginalOrderLow;
8116       Swap = false;
8117       // Skip if not the correct element or mask of other elements don't equal
8118       // to our expected order.
8119       if (MaskOneElt == VINSERTHSrcElem &&
8120           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8121         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8122         FoundCandidate = true;
8123         break;
8124       }
8125     } else { // If both operands are defined.
8126       // Target order is [8,15] if the current mask is between [0,7].
8127       TargetOrder =
8128           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
8129       // Skip if mask of other elements don't equal our expected order.
8130       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8131         // We only need the last 3 bits for the number of shifts.
8132         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
8133                          : BigEndianShifts[MaskOneElt & 0x7];
8134         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8135         Swap = MaskOneElt < NumHalfWords;
8136         FoundCandidate = true;
8137         break;
8138       }
8139     }
8140   }
8141 
8142   if (!FoundCandidate)
8143     return SDValue();
8144 
8145   // Candidate found, construct the proper SDAG sequence with VINSERTH,
8146   // optionally with VECSHL if shift is required.
8147   if (Swap)
8148     std::swap(V1, V2);
8149   if (V2.isUndef())
8150     V2 = V1;
8151   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
8152   if (ShiftElts) {
8153     // Double ShiftElts because we're left shifting on v16i8 type.
8154     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8155                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
8156     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
8157     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
8158                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
8159     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8160   }
8161   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
8162   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
8163                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
8164   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8165 }
8166 
8167 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
8168 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
8169 /// return the code it can be lowered into.  Worst case, it can always be
8170 /// lowered into a vperm.
8171 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
8172                                                SelectionDAG &DAG) const {
8173   SDLoc dl(Op);
8174   SDValue V1 = Op.getOperand(0);
8175   SDValue V2 = Op.getOperand(1);
8176   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8177   EVT VT = Op.getValueType();
8178   bool isLittleEndian = Subtarget.isLittleEndian();
8179 
8180   unsigned ShiftElts, InsertAtByte;
8181   bool Swap = false;
8182   if (Subtarget.hasP9Vector() &&
8183       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
8184                            isLittleEndian)) {
8185     if (Swap)
8186       std::swap(V1, V2);
8187     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8188     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
8189     if (ShiftElts) {
8190       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
8191                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
8192       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
8193                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
8194       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8195     }
8196     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
8197                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
8198     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8199   }
8200 
8201   if (Subtarget.hasP9Altivec()) {
8202     SDValue NewISDNode;
8203     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
8204       return NewISDNode;
8205 
8206     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
8207       return NewISDNode;
8208   }
8209 
8210   if (Subtarget.hasVSX() &&
8211       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
8212     if (Swap)
8213       std::swap(V1, V2);
8214     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8215     SDValue Conv2 =
8216         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
8217 
8218     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
8219                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8220     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
8221   }
8222 
8223   if (Subtarget.hasVSX() &&
8224     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
8225     if (Swap)
8226       std::swap(V1, V2);
8227     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
8228     SDValue Conv2 =
8229         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
8230 
8231     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
8232                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8233     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
8234   }
8235 
8236   if (Subtarget.hasP9Vector()) {
8237      if (PPC::isXXBRHShuffleMask(SVOp)) {
8238       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
8239       SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv);
8240       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
8241     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
8242       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8243       SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv);
8244       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
8245     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
8246       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
8247       SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv);
8248       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
8249     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
8250       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
8251       SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv);
8252       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
8253     }
8254   }
8255 
8256   if (Subtarget.hasVSX()) {
8257     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
8258       int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG);
8259 
8260       // If the source for the shuffle is a scalar_to_vector that came from a
8261       // 32-bit load, it will have used LXVWSX so we don't need to splat again.
8262       if (Subtarget.hasP9Vector() &&
8263           ((isLittleEndian && SplatIdx == 3) ||
8264            (!isLittleEndian && SplatIdx == 0))) {
8265         SDValue Src = V1.getOperand(0);
8266         if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR &&
8267             Src.getOperand(0).getOpcode() == ISD::LOAD &&
8268             Src.getOperand(0).hasOneUse())
8269           return V1;
8270       }
8271       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8272       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
8273                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
8274       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
8275     }
8276 
8277     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
8278     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
8279       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
8280       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
8281       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
8282     }
8283   }
8284 
8285   if (Subtarget.hasQPX()) {
8286     if (VT.getVectorNumElements() != 4)
8287       return SDValue();
8288 
8289     if (V2.isUndef()) V2 = V1;
8290 
8291     int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
8292     if (AlignIdx != -1) {
8293       return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
8294                          DAG.getConstant(AlignIdx, dl, MVT::i32));
8295     } else if (SVOp->isSplat()) {
8296       int SplatIdx = SVOp->getSplatIndex();
8297       if (SplatIdx >= 4) {
8298         std::swap(V1, V2);
8299         SplatIdx -= 4;
8300       }
8301 
8302       return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
8303                          DAG.getConstant(SplatIdx, dl, MVT::i32));
8304     }
8305 
8306     // Lower this into a qvgpci/qvfperm pair.
8307 
8308     // Compute the qvgpci literal
8309     unsigned idx = 0;
8310     for (unsigned i = 0; i < 4; ++i) {
8311       int m = SVOp->getMaskElt(i);
8312       unsigned mm = m >= 0 ? (unsigned) m : i;
8313       idx |= mm << (3-i)*3;
8314     }
8315 
8316     SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
8317                              DAG.getConstant(idx, dl, MVT::i32));
8318     return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
8319   }
8320 
8321   // Cases that are handled by instructions that take permute immediates
8322   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
8323   // selected by the instruction selector.
8324   if (V2.isUndef()) {
8325     if (PPC::isSplatShuffleMask(SVOp, 1) ||
8326         PPC::isSplatShuffleMask(SVOp, 2) ||
8327         PPC::isSplatShuffleMask(SVOp, 4) ||
8328         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
8329         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
8330         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
8331         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
8332         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
8333         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
8334         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
8335         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
8336         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
8337         (Subtarget.hasP8Altivec() && (
8338          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
8339          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
8340          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
8341       return Op;
8342     }
8343   }
8344 
8345   // Altivec has a variety of "shuffle immediates" that take two vector inputs
8346   // and produce a fixed permutation.  If any of these match, do not lower to
8347   // VPERM.
8348   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
8349   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8350       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8351       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
8352       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
8353       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
8354       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
8355       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
8356       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
8357       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
8358       (Subtarget.hasP8Altivec() && (
8359        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8360        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
8361        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
8362     return Op;
8363 
8364   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
8365   // perfect shuffle table to emit an optimal matching sequence.
8366   ArrayRef<int> PermMask = SVOp->getMask();
8367 
8368   unsigned PFIndexes[4];
8369   bool isFourElementShuffle = true;
8370   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
8371     unsigned EltNo = 8;   // Start out undef.
8372     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
8373       if (PermMask[i*4+j] < 0)
8374         continue;   // Undef, ignore it.
8375 
8376       unsigned ByteSource = PermMask[i*4+j];
8377       if ((ByteSource & 3) != j) {
8378         isFourElementShuffle = false;
8379         break;
8380       }
8381 
8382       if (EltNo == 8) {
8383         EltNo = ByteSource/4;
8384       } else if (EltNo != ByteSource/4) {
8385         isFourElementShuffle = false;
8386         break;
8387       }
8388     }
8389     PFIndexes[i] = EltNo;
8390   }
8391 
8392   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
8393   // perfect shuffle vector to determine if it is cost effective to do this as
8394   // discrete instructions, or whether we should use a vperm.
8395   // For now, we skip this for little endian until such time as we have a
8396   // little-endian perfect shuffle table.
8397   if (isFourElementShuffle && !isLittleEndian) {
8398     // Compute the index in the perfect shuffle table.
8399     unsigned PFTableIndex =
8400       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8401 
8402     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
8403     unsigned Cost  = (PFEntry >> 30);
8404 
8405     // Determining when to avoid vperm is tricky.  Many things affect the cost
8406     // of vperm, particularly how many times the perm mask needs to be computed.
8407     // For example, if the perm mask can be hoisted out of a loop or is already
8408     // used (perhaps because there are multiple permutes with the same shuffle
8409     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
8410     // the loop requires an extra register.
8411     //
8412     // As a compromise, we only emit discrete instructions if the shuffle can be
8413     // generated in 3 or fewer operations.  When we have loop information
8414     // available, if this block is within a loop, we should avoid using vperm
8415     // for 3-operation perms and use a constant pool load instead.
8416     if (Cost < 3)
8417       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8418   }
8419 
8420   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
8421   // vector that will get spilled to the constant pool.
8422   if (V2.isUndef()) V2 = V1;
8423 
8424   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
8425   // that it is in input element units, not in bytes.  Convert now.
8426 
8427   // For little endian, the order of the input vectors is reversed, and
8428   // the permutation mask is complemented with respect to 31.  This is
8429   // necessary to produce proper semantics with the big-endian-biased vperm
8430   // instruction.
8431   EVT EltVT = V1.getValueType().getVectorElementType();
8432   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
8433 
8434   SmallVector<SDValue, 16> ResultMask;
8435   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8436     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
8437 
8438     for (unsigned j = 0; j != BytesPerElement; ++j)
8439       if (isLittleEndian)
8440         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
8441                                              dl, MVT::i32));
8442       else
8443         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
8444                                              MVT::i32));
8445   }
8446 
8447   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
8448   if (isLittleEndian)
8449     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
8450                        V2, V1, VPermMask);
8451   else
8452     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
8453                        V1, V2, VPermMask);
8454 }
8455 
8456 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
8457 /// vector comparison.  If it is, return true and fill in Opc/isDot with
8458 /// information about the intrinsic.
8459 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
8460                                  bool &isDot, const PPCSubtarget &Subtarget) {
8461   unsigned IntrinsicID =
8462       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
8463   CompareOpc = -1;
8464   isDot = false;
8465   switch (IntrinsicID) {
8466   default:
8467     return false;
8468   // Comparison predicates.
8469   case Intrinsic::ppc_altivec_vcmpbfp_p:
8470     CompareOpc = 966;
8471     isDot = true;
8472     break;
8473   case Intrinsic::ppc_altivec_vcmpeqfp_p:
8474     CompareOpc = 198;
8475     isDot = true;
8476     break;
8477   case Intrinsic::ppc_altivec_vcmpequb_p:
8478     CompareOpc = 6;
8479     isDot = true;
8480     break;
8481   case Intrinsic::ppc_altivec_vcmpequh_p:
8482     CompareOpc = 70;
8483     isDot = true;
8484     break;
8485   case Intrinsic::ppc_altivec_vcmpequw_p:
8486     CompareOpc = 134;
8487     isDot = true;
8488     break;
8489   case Intrinsic::ppc_altivec_vcmpequd_p:
8490     if (Subtarget.hasP8Altivec()) {
8491       CompareOpc = 199;
8492       isDot = true;
8493     } else
8494       return false;
8495     break;
8496   case Intrinsic::ppc_altivec_vcmpneb_p:
8497   case Intrinsic::ppc_altivec_vcmpneh_p:
8498   case Intrinsic::ppc_altivec_vcmpnew_p:
8499   case Intrinsic::ppc_altivec_vcmpnezb_p:
8500   case Intrinsic::ppc_altivec_vcmpnezh_p:
8501   case Intrinsic::ppc_altivec_vcmpnezw_p:
8502     if (Subtarget.hasP9Altivec()) {
8503       switch (IntrinsicID) {
8504       default:
8505         llvm_unreachable("Unknown comparison intrinsic.");
8506       case Intrinsic::ppc_altivec_vcmpneb_p:
8507         CompareOpc = 7;
8508         break;
8509       case Intrinsic::ppc_altivec_vcmpneh_p:
8510         CompareOpc = 71;
8511         break;
8512       case Intrinsic::ppc_altivec_vcmpnew_p:
8513         CompareOpc = 135;
8514         break;
8515       case Intrinsic::ppc_altivec_vcmpnezb_p:
8516         CompareOpc = 263;
8517         break;
8518       case Intrinsic::ppc_altivec_vcmpnezh_p:
8519         CompareOpc = 327;
8520         break;
8521       case Intrinsic::ppc_altivec_vcmpnezw_p:
8522         CompareOpc = 391;
8523         break;
8524       }
8525       isDot = true;
8526     } else
8527       return false;
8528     break;
8529   case Intrinsic::ppc_altivec_vcmpgefp_p:
8530     CompareOpc = 454;
8531     isDot = true;
8532     break;
8533   case Intrinsic::ppc_altivec_vcmpgtfp_p:
8534     CompareOpc = 710;
8535     isDot = true;
8536     break;
8537   case Intrinsic::ppc_altivec_vcmpgtsb_p:
8538     CompareOpc = 774;
8539     isDot = true;
8540     break;
8541   case Intrinsic::ppc_altivec_vcmpgtsh_p:
8542     CompareOpc = 838;
8543     isDot = true;
8544     break;
8545   case Intrinsic::ppc_altivec_vcmpgtsw_p:
8546     CompareOpc = 902;
8547     isDot = true;
8548     break;
8549   case Intrinsic::ppc_altivec_vcmpgtsd_p:
8550     if (Subtarget.hasP8Altivec()) {
8551       CompareOpc = 967;
8552       isDot = true;
8553     } else
8554       return false;
8555     break;
8556   case Intrinsic::ppc_altivec_vcmpgtub_p:
8557     CompareOpc = 518;
8558     isDot = true;
8559     break;
8560   case Intrinsic::ppc_altivec_vcmpgtuh_p:
8561     CompareOpc = 582;
8562     isDot = true;
8563     break;
8564   case Intrinsic::ppc_altivec_vcmpgtuw_p:
8565     CompareOpc = 646;
8566     isDot = true;
8567     break;
8568   case Intrinsic::ppc_altivec_vcmpgtud_p:
8569     if (Subtarget.hasP8Altivec()) {
8570       CompareOpc = 711;
8571       isDot = true;
8572     } else
8573       return false;
8574     break;
8575 
8576   // VSX predicate comparisons use the same infrastructure
8577   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
8578   case Intrinsic::ppc_vsx_xvcmpgedp_p:
8579   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
8580   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
8581   case Intrinsic::ppc_vsx_xvcmpgesp_p:
8582   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
8583     if (Subtarget.hasVSX()) {
8584       switch (IntrinsicID) {
8585       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
8586         CompareOpc = 99;
8587         break;
8588       case Intrinsic::ppc_vsx_xvcmpgedp_p:
8589         CompareOpc = 115;
8590         break;
8591       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
8592         CompareOpc = 107;
8593         break;
8594       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
8595         CompareOpc = 67;
8596         break;
8597       case Intrinsic::ppc_vsx_xvcmpgesp_p:
8598         CompareOpc = 83;
8599         break;
8600       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
8601         CompareOpc = 75;
8602         break;
8603       }
8604       isDot = true;
8605     } else
8606       return false;
8607     break;
8608 
8609   // Normal Comparisons.
8610   case Intrinsic::ppc_altivec_vcmpbfp:
8611     CompareOpc = 966;
8612     break;
8613   case Intrinsic::ppc_altivec_vcmpeqfp:
8614     CompareOpc = 198;
8615     break;
8616   case Intrinsic::ppc_altivec_vcmpequb:
8617     CompareOpc = 6;
8618     break;
8619   case Intrinsic::ppc_altivec_vcmpequh:
8620     CompareOpc = 70;
8621     break;
8622   case Intrinsic::ppc_altivec_vcmpequw:
8623     CompareOpc = 134;
8624     break;
8625   case Intrinsic::ppc_altivec_vcmpequd:
8626     if (Subtarget.hasP8Altivec())
8627       CompareOpc = 199;
8628     else
8629       return false;
8630     break;
8631   case Intrinsic::ppc_altivec_vcmpneb:
8632   case Intrinsic::ppc_altivec_vcmpneh:
8633   case Intrinsic::ppc_altivec_vcmpnew:
8634   case Intrinsic::ppc_altivec_vcmpnezb:
8635   case Intrinsic::ppc_altivec_vcmpnezh:
8636   case Intrinsic::ppc_altivec_vcmpnezw:
8637     if (Subtarget.hasP9Altivec())
8638       switch (IntrinsicID) {
8639       default:
8640         llvm_unreachable("Unknown comparison intrinsic.");
8641       case Intrinsic::ppc_altivec_vcmpneb:
8642         CompareOpc = 7;
8643         break;
8644       case Intrinsic::ppc_altivec_vcmpneh:
8645         CompareOpc = 71;
8646         break;
8647       case Intrinsic::ppc_altivec_vcmpnew:
8648         CompareOpc = 135;
8649         break;
8650       case Intrinsic::ppc_altivec_vcmpnezb:
8651         CompareOpc = 263;
8652         break;
8653       case Intrinsic::ppc_altivec_vcmpnezh:
8654         CompareOpc = 327;
8655         break;
8656       case Intrinsic::ppc_altivec_vcmpnezw:
8657         CompareOpc = 391;
8658         break;
8659       }
8660     else
8661       return false;
8662     break;
8663   case Intrinsic::ppc_altivec_vcmpgefp:
8664     CompareOpc = 454;
8665     break;
8666   case Intrinsic::ppc_altivec_vcmpgtfp:
8667     CompareOpc = 710;
8668     break;
8669   case Intrinsic::ppc_altivec_vcmpgtsb:
8670     CompareOpc = 774;
8671     break;
8672   case Intrinsic::ppc_altivec_vcmpgtsh:
8673     CompareOpc = 838;
8674     break;
8675   case Intrinsic::ppc_altivec_vcmpgtsw:
8676     CompareOpc = 902;
8677     break;
8678   case Intrinsic::ppc_altivec_vcmpgtsd:
8679     if (Subtarget.hasP8Altivec())
8680       CompareOpc = 967;
8681     else
8682       return false;
8683     break;
8684   case Intrinsic::ppc_altivec_vcmpgtub:
8685     CompareOpc = 518;
8686     break;
8687   case Intrinsic::ppc_altivec_vcmpgtuh:
8688     CompareOpc = 582;
8689     break;
8690   case Intrinsic::ppc_altivec_vcmpgtuw:
8691     CompareOpc = 646;
8692     break;
8693   case Intrinsic::ppc_altivec_vcmpgtud:
8694     if (Subtarget.hasP8Altivec())
8695       CompareOpc = 711;
8696     else
8697       return false;
8698     break;
8699   }
8700   return true;
8701 }
8702 
8703 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
8704 /// lower, do it, otherwise return null.
8705 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
8706                                                    SelectionDAG &DAG) const {
8707   unsigned IntrinsicID =
8708     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8709 
8710   SDLoc dl(Op);
8711 
8712   if (IntrinsicID == Intrinsic::thread_pointer) {
8713     // Reads the thread pointer register, used for __builtin_thread_pointer.
8714     if (Subtarget.isPPC64())
8715       return DAG.getRegister(PPC::X13, MVT::i64);
8716     return DAG.getRegister(PPC::R2, MVT::i32);
8717   }
8718 
8719   // We are looking for absolute values here.
8720   // The idea is to try to fit one of two patterns:
8721   //  max (a, (0-a))  OR  max ((0-a), a)
8722   if (Subtarget.hasP9Vector() &&
8723       (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw ||
8724        IntrinsicID == Intrinsic::ppc_altivec_vmaxsh ||
8725        IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) {
8726     SDValue V1 = Op.getOperand(1);
8727     SDValue V2 = Op.getOperand(2);
8728     if (V1.getSimpleValueType() == V2.getSimpleValueType() &&
8729         (V1.getSimpleValueType() == MVT::v4i32 ||
8730          V1.getSimpleValueType() == MVT::v8i16 ||
8731          V1.getSimpleValueType() == MVT::v16i8)) {
8732       if ( V1.getOpcode() == ISD::SUB &&
8733            ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
8734            V1.getOperand(1) == V2 ) {
8735         // Generate the abs instruction with the operands
8736         return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2);
8737       }
8738 
8739       if ( V2.getOpcode() == ISD::SUB &&
8740            ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
8741            V2.getOperand(1) == V1 ) {
8742         // Generate the abs instruction with the operands
8743         return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1);
8744       }
8745     }
8746   }
8747 
8748   // If this is a lowered altivec predicate compare, CompareOpc is set to the
8749   // opcode number of the comparison.
8750   int CompareOpc;
8751   bool isDot;
8752   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
8753     return SDValue();    // Don't custom lower most intrinsics.
8754 
8755   // If this is a non-dot comparison, make the VCMP node and we are done.
8756   if (!isDot) {
8757     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
8758                               Op.getOperand(1), Op.getOperand(2),
8759                               DAG.getConstant(CompareOpc, dl, MVT::i32));
8760     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
8761   }
8762 
8763   // Create the PPCISD altivec 'dot' comparison node.
8764   SDValue Ops[] = {
8765     Op.getOperand(2),  // LHS
8766     Op.getOperand(3),  // RHS
8767     DAG.getConstant(CompareOpc, dl, MVT::i32)
8768   };
8769   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
8770   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
8771 
8772   // Now that we have the comparison, emit a copy from the CR to a GPR.
8773   // This is flagged to the above dot comparison.
8774   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
8775                                 DAG.getRegister(PPC::CR6, MVT::i32),
8776                                 CompNode.getValue(1));
8777 
8778   // Unpack the result based on how the target uses it.
8779   unsigned BitNo;   // Bit # of CR6.
8780   bool InvertBit;   // Invert result?
8781   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
8782   default:  // Can't happen, don't crash on invalid number though.
8783   case 0:   // Return the value of the EQ bit of CR6.
8784     BitNo = 0; InvertBit = false;
8785     break;
8786   case 1:   // Return the inverted value of the EQ bit of CR6.
8787     BitNo = 0; InvertBit = true;
8788     break;
8789   case 2:   // Return the value of the LT bit of CR6.
8790     BitNo = 2; InvertBit = false;
8791     break;
8792   case 3:   // Return the inverted value of the LT bit of CR6.
8793     BitNo = 2; InvertBit = true;
8794     break;
8795   }
8796 
8797   // Shift the bit into the low position.
8798   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
8799                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
8800   // Isolate the bit.
8801   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
8802                       DAG.getConstant(1, dl, MVT::i32));
8803 
8804   // If we are supposed to, toggle the bit.
8805   if (InvertBit)
8806     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
8807                         DAG.getConstant(1, dl, MVT::i32));
8808   return Flags;
8809 }
8810 
8811 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
8812                                                SelectionDAG &DAG) const {
8813   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
8814   // the beginning of the argument list.
8815   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
8816   SDLoc DL(Op);
8817   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
8818   case Intrinsic::ppc_cfence: {
8819     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
8820     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
8821     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
8822                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
8823                                                   Op.getOperand(ArgStart + 1)),
8824                                       Op.getOperand(0)),
8825                    0);
8826   }
8827   default:
8828     break;
8829   }
8830   return SDValue();
8831 }
8832 
8833 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const {
8834   // Check for a DIV with the same operands as this REM.
8835   for (auto UI : Op.getOperand(1)->uses()) {
8836     if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) ||
8837         (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV))
8838       if (UI->getOperand(0) == Op.getOperand(0) &&
8839           UI->getOperand(1) == Op.getOperand(1))
8840         return SDValue();
8841   }
8842   return Op;
8843 }
8844 
8845 // Lower scalar BSWAP64 to xxbrd.
8846 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
8847   SDLoc dl(Op);
8848   // MTVSRDD
8849   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
8850                    Op.getOperand(0));
8851   // XXBRD
8852   Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op);
8853   // MFVSRD
8854   int VectorIndex = 0;
8855   if (Subtarget.isLittleEndian())
8856     VectorIndex = 1;
8857   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
8858                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
8859   return Op;
8860 }
8861 
8862 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
8863 // compared to a value that is atomically loaded (atomic loads zero-extend).
8864 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
8865                                                 SelectionDAG &DAG) const {
8866   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
8867          "Expecting an atomic compare-and-swap here.");
8868   SDLoc dl(Op);
8869   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
8870   EVT MemVT = AtomicNode->getMemoryVT();
8871   if (MemVT.getSizeInBits() >= 32)
8872     return Op;
8873 
8874   SDValue CmpOp = Op.getOperand(2);
8875   // If this is already correctly zero-extended, leave it alone.
8876   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
8877   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
8878     return Op;
8879 
8880   // Clear the high bits of the compare operand.
8881   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
8882   SDValue NewCmpOp =
8883     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
8884                 DAG.getConstant(MaskVal, dl, MVT::i32));
8885 
8886   // Replace the existing compare operand with the properly zero-extended one.
8887   SmallVector<SDValue, 4> Ops;
8888   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
8889     Ops.push_back(AtomicNode->getOperand(i));
8890   Ops[2] = NewCmpOp;
8891   MachineMemOperand *MMO = AtomicNode->getMemOperand();
8892   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
8893   auto NodeTy =
8894     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
8895   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
8896 }
8897 
8898 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
8899                                                   SelectionDAG &DAG) const {
8900   SDLoc dl(Op);
8901   // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int
8902   // instructions), but for smaller types, we need to first extend up to v2i32
8903   // before doing going farther.
8904   if (Op.getValueType() == MVT::v2i64) {
8905     EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
8906     if (ExtVT != MVT::v2i32) {
8907       Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0));
8908       Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op,
8909                        DAG.getValueType(EVT::getVectorVT(*DAG.getContext(),
8910                                         ExtVT.getVectorElementType(), 4)));
8911       Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op);
8912       Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op,
8913                        DAG.getValueType(MVT::v2i32));
8914     }
8915 
8916     return Op;
8917   }
8918 
8919   return SDValue();
8920 }
8921 
8922 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
8923                                                  SelectionDAG &DAG) const {
8924   SDLoc dl(Op);
8925   // Create a stack slot that is 16-byte aligned.
8926   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8927   int FrameIdx = MFI.CreateStackObject(16, 16, false);
8928   EVT PtrVT = getPointerTy(DAG.getDataLayout());
8929   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8930 
8931   // Store the input value into Value#0 of the stack slot.
8932   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
8933                                MachinePointerInfo());
8934   // Load it out.
8935   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
8936 }
8937 
8938 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
8939                                                   SelectionDAG &DAG) const {
8940   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
8941          "Should only be called for ISD::INSERT_VECTOR_ELT");
8942 
8943   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
8944   // We have legal lowering for constant indices but not for variable ones.
8945   if (!C)
8946     return SDValue();
8947 
8948   EVT VT = Op.getValueType();
8949   SDLoc dl(Op);
8950   SDValue V1 = Op.getOperand(0);
8951   SDValue V2 = Op.getOperand(1);
8952   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
8953   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
8954     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
8955     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
8956     unsigned InsertAtElement = C->getZExtValue();
8957     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
8958     if (Subtarget.isLittleEndian()) {
8959       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
8960     }
8961     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
8962                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
8963   }
8964   return Op;
8965 }
8966 
8967 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
8968                                                    SelectionDAG &DAG) const {
8969   SDLoc dl(Op);
8970   SDNode *N = Op.getNode();
8971 
8972   assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
8973          "Unknown extract_vector_elt type");
8974 
8975   SDValue Value = N->getOperand(0);
8976 
8977   // The first part of this is like the store lowering except that we don't
8978   // need to track the chain.
8979 
8980   // The values are now known to be -1 (false) or 1 (true). To convert this
8981   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
8982   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8983   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8984 
8985   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
8986   // understand how to form the extending load.
8987   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8988 
8989   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8990 
8991   // Now convert to an integer and store.
8992   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
8993     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
8994     Value);
8995 
8996   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8997   int FrameIdx = MFI.CreateStackObject(16, 16, false);
8998   MachinePointerInfo PtrInfo =
8999       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9000   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9001   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9002 
9003   SDValue StoreChain = DAG.getEntryNode();
9004   SDValue Ops[] = {StoreChain,
9005                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
9006                    Value, FIdx};
9007   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
9008 
9009   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
9010     dl, VTs, Ops, MVT::v4i32, PtrInfo);
9011 
9012   // Extract the value requested.
9013   unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
9014   SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9015   Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9016 
9017   SDValue IntVal =
9018       DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
9019 
9020   if (!Subtarget.useCRBits())
9021     return IntVal;
9022 
9023   return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
9024 }
9025 
9026 /// Lowering for QPX v4i1 loads
9027 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
9028                                            SelectionDAG &DAG) const {
9029   SDLoc dl(Op);
9030   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
9031   SDValue LoadChain = LN->getChain();
9032   SDValue BasePtr = LN->getBasePtr();
9033 
9034   if (Op.getValueType() == MVT::v4f64 ||
9035       Op.getValueType() == MVT::v4f32) {
9036     EVT MemVT = LN->getMemoryVT();
9037     unsigned Alignment = LN->getAlignment();
9038 
9039     // If this load is properly aligned, then it is legal.
9040     if (Alignment >= MemVT.getStoreSize())
9041       return Op;
9042 
9043     EVT ScalarVT = Op.getValueType().getScalarType(),
9044         ScalarMemVT = MemVT.getScalarType();
9045     unsigned Stride = ScalarMemVT.getStoreSize();
9046 
9047     SDValue Vals[4], LoadChains[4];
9048     for (unsigned Idx = 0; Idx < 4; ++Idx) {
9049       SDValue Load;
9050       if (ScalarVT != ScalarMemVT)
9051         Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
9052                               BasePtr,
9053                               LN->getPointerInfo().getWithOffset(Idx * Stride),
9054                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
9055                               LN->getMemOperand()->getFlags(), LN->getAAInfo());
9056       else
9057         Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
9058                            LN->getPointerInfo().getWithOffset(Idx * Stride),
9059                            MinAlign(Alignment, Idx * Stride),
9060                            LN->getMemOperand()->getFlags(), LN->getAAInfo());
9061 
9062       if (Idx == 0 && LN->isIndexed()) {
9063         assert(LN->getAddressingMode() == ISD::PRE_INC &&
9064                "Unknown addressing mode on vector load");
9065         Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
9066                                   LN->getAddressingMode());
9067       }
9068 
9069       Vals[Idx] = Load;
9070       LoadChains[Idx] = Load.getValue(1);
9071 
9072       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
9073                             DAG.getConstant(Stride, dl,
9074                                             BasePtr.getValueType()));
9075     }
9076 
9077     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9078     SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
9079 
9080     if (LN->isIndexed()) {
9081       SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
9082       return DAG.getMergeValues(RetOps, dl);
9083     }
9084 
9085     SDValue RetOps[] = { Value, TF };
9086     return DAG.getMergeValues(RetOps, dl);
9087   }
9088 
9089   assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
9090   assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
9091 
9092   // To lower v4i1 from a byte array, we load the byte elements of the
9093   // vector and then reuse the BUILD_VECTOR logic.
9094 
9095   SDValue VectElmts[4], VectElmtChains[4];
9096   for (unsigned i = 0; i < 4; ++i) {
9097     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
9098     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
9099 
9100     VectElmts[i] = DAG.getExtLoad(
9101         ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
9102         LN->getPointerInfo().getWithOffset(i), MVT::i8,
9103         /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
9104     VectElmtChains[i] = VectElmts[i].getValue(1);
9105   }
9106 
9107   LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
9108   SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
9109 
9110   SDValue RVals[] = { Value, LoadChain };
9111   return DAG.getMergeValues(RVals, dl);
9112 }
9113 
9114 /// Lowering for QPX v4i1 stores
9115 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
9116                                             SelectionDAG &DAG) const {
9117   SDLoc dl(Op);
9118   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
9119   SDValue StoreChain = SN->getChain();
9120   SDValue BasePtr = SN->getBasePtr();
9121   SDValue Value = SN->getValue();
9122 
9123   if (Value.getValueType() == MVT::v4f64 ||
9124       Value.getValueType() == MVT::v4f32) {
9125     EVT MemVT = SN->getMemoryVT();
9126     unsigned Alignment = SN->getAlignment();
9127 
9128     // If this store is properly aligned, then it is legal.
9129     if (Alignment >= MemVT.getStoreSize())
9130       return Op;
9131 
9132     EVT ScalarVT = Value.getValueType().getScalarType(),
9133         ScalarMemVT = MemVT.getScalarType();
9134     unsigned Stride = ScalarMemVT.getStoreSize();
9135 
9136     SDValue Stores[4];
9137     for (unsigned Idx = 0; Idx < 4; ++Idx) {
9138       SDValue Ex = DAG.getNode(
9139           ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
9140           DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout())));
9141       SDValue Store;
9142       if (ScalarVT != ScalarMemVT)
9143         Store =
9144             DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
9145                               SN->getPointerInfo().getWithOffset(Idx * Stride),
9146                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
9147                               SN->getMemOperand()->getFlags(), SN->getAAInfo());
9148       else
9149         Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
9150                              SN->getPointerInfo().getWithOffset(Idx * Stride),
9151                              MinAlign(Alignment, Idx * Stride),
9152                              SN->getMemOperand()->getFlags(), SN->getAAInfo());
9153 
9154       if (Idx == 0 && SN->isIndexed()) {
9155         assert(SN->getAddressingMode() == ISD::PRE_INC &&
9156                "Unknown addressing mode on vector store");
9157         Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
9158                                     SN->getAddressingMode());
9159       }
9160 
9161       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
9162                             DAG.getConstant(Stride, dl,
9163                                             BasePtr.getValueType()));
9164       Stores[Idx] = Store;
9165     }
9166 
9167     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9168 
9169     if (SN->isIndexed()) {
9170       SDValue RetOps[] = { TF, Stores[0].getValue(1) };
9171       return DAG.getMergeValues(RetOps, dl);
9172     }
9173 
9174     return TF;
9175   }
9176 
9177   assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
9178   assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
9179 
9180   // The values are now known to be -1 (false) or 1 (true). To convert this
9181   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
9182   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
9183   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
9184 
9185   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
9186   // understand how to form the extending load.
9187   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
9188 
9189   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
9190 
9191   // Now convert to an integer and store.
9192   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9193     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
9194     Value);
9195 
9196   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9197   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9198   MachinePointerInfo PtrInfo =
9199       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9200   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9201   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9202 
9203   SDValue Ops[] = {StoreChain,
9204                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
9205                    Value, FIdx};
9206   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
9207 
9208   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
9209     dl, VTs, Ops, MVT::v4i32, PtrInfo);
9210 
9211   // Move data into the byte array.
9212   SDValue Loads[4], LoadChains[4];
9213   for (unsigned i = 0; i < 4; ++i) {
9214     unsigned Offset = 4*i;
9215     SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9216     Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9217 
9218     Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
9219                            PtrInfo.getWithOffset(Offset));
9220     LoadChains[i] = Loads[i].getValue(1);
9221   }
9222 
9223   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9224 
9225   SDValue Stores[4];
9226   for (unsigned i = 0; i < 4; ++i) {
9227     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
9228     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
9229 
9230     Stores[i] = DAG.getTruncStore(
9231         StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
9232         MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
9233         SN->getAAInfo());
9234   }
9235 
9236   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9237 
9238   return StoreChain;
9239 }
9240 
9241 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
9242   SDLoc dl(Op);
9243   if (Op.getValueType() == MVT::v4i32) {
9244     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9245 
9246     SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
9247     SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
9248 
9249     SDValue RHSSwap =   // = vrlw RHS, 16
9250       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
9251 
9252     // Shrinkify inputs to v8i16.
9253     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
9254     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
9255     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
9256 
9257     // Low parts multiplied together, generating 32-bit results (we ignore the
9258     // top parts).
9259     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
9260                                         LHS, RHS, DAG, dl, MVT::v4i32);
9261 
9262     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
9263                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
9264     // Shift the high parts up 16 bits.
9265     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
9266                               Neg16, DAG, dl);
9267     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
9268   } else if (Op.getValueType() == MVT::v8i16) {
9269     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9270 
9271     SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
9272 
9273     return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
9274                             LHS, RHS, Zero, DAG, dl);
9275   } else if (Op.getValueType() == MVT::v16i8) {
9276     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9277     bool isLittleEndian = Subtarget.isLittleEndian();
9278 
9279     // Multiply the even 8-bit parts, producing 16-bit sums.
9280     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
9281                                            LHS, RHS, DAG, dl, MVT::v8i16);
9282     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
9283 
9284     // Multiply the odd 8-bit parts, producing 16-bit sums.
9285     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
9286                                           LHS, RHS, DAG, dl, MVT::v8i16);
9287     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
9288 
9289     // Merge the results together.  Because vmuleub and vmuloub are
9290     // instructions with a big-endian bias, we must reverse the
9291     // element numbering and reverse the meaning of "odd" and "even"
9292     // when generating little endian code.
9293     int Ops[16];
9294     for (unsigned i = 0; i != 8; ++i) {
9295       if (isLittleEndian) {
9296         Ops[i*2  ] = 2*i;
9297         Ops[i*2+1] = 2*i+16;
9298       } else {
9299         Ops[i*2  ] = 2*i+1;
9300         Ops[i*2+1] = 2*i+1+16;
9301       }
9302     }
9303     if (isLittleEndian)
9304       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
9305     else
9306       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
9307   } else {
9308     llvm_unreachable("Unknown mul to lower!");
9309   }
9310 }
9311 
9312 /// LowerOperation - Provide custom lowering hooks for some operations.
9313 ///
9314 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9315   switch (Op.getOpcode()) {
9316   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
9317   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
9318   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
9319   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
9320   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
9321   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
9322   case ISD::SETCC:              return LowerSETCC(Op, DAG);
9323   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
9324   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
9325   case ISD::VASTART:
9326     return LowerVASTART(Op, DAG);
9327 
9328   case ISD::VAARG:
9329     return LowerVAARG(Op, DAG);
9330 
9331   case ISD::VACOPY:
9332     return LowerVACOPY(Op, DAG);
9333 
9334   case ISD::STACKRESTORE:
9335     return LowerSTACKRESTORE(Op, DAG);
9336 
9337   case ISD::DYNAMIC_STACKALLOC:
9338     return LowerDYNAMIC_STACKALLOC(Op, DAG);
9339 
9340   case ISD::GET_DYNAMIC_AREA_OFFSET:
9341     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
9342 
9343   case ISD::EH_DWARF_CFA:
9344     return LowerEH_DWARF_CFA(Op, DAG);
9345 
9346   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
9347   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
9348 
9349   case ISD::LOAD:               return LowerLOAD(Op, DAG);
9350   case ISD::STORE:              return LowerSTORE(Op, DAG);
9351   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
9352   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
9353   case ISD::FP_TO_UINT:
9354   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG,
9355                                                       SDLoc(Op));
9356   case ISD::UINT_TO_FP:
9357   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
9358   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
9359 
9360   // Lower 64-bit shifts.
9361   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
9362   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
9363   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
9364 
9365   // Vector-related lowering.
9366   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
9367   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
9368   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
9369   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
9370   case ISD::SIGN_EXTEND_INREG:  return LowerSIGN_EXTEND_INREG(Op, DAG);
9371   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
9372   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
9373   case ISD::MUL:                return LowerMUL(Op, DAG);
9374 
9375   // For counter-based loop handling.
9376   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
9377 
9378   // Frame & Return address.
9379   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
9380   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
9381 
9382   case ISD::INTRINSIC_VOID:
9383     return LowerINTRINSIC_VOID(Op, DAG);
9384   case ISD::SREM:
9385   case ISD::UREM:
9386     return LowerREM(Op, DAG);
9387   case ISD::BSWAP:
9388     return LowerBSWAP(Op, DAG);
9389   case ISD::ATOMIC_CMP_SWAP:
9390     return LowerATOMIC_CMP_SWAP(Op, DAG);
9391   }
9392 }
9393 
9394 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
9395                                            SmallVectorImpl<SDValue>&Results,
9396                                            SelectionDAG &DAG) const {
9397   SDLoc dl(N);
9398   switch (N->getOpcode()) {
9399   default:
9400     llvm_unreachable("Do not know how to custom type legalize this operation!");
9401   case ISD::READCYCLECOUNTER: {
9402     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
9403     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
9404 
9405     Results.push_back(RTB);
9406     Results.push_back(RTB.getValue(1));
9407     Results.push_back(RTB.getValue(2));
9408     break;
9409   }
9410   case ISD::INTRINSIC_W_CHAIN: {
9411     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
9412         Intrinsic::ppc_is_decremented_ctr_nonzero)
9413       break;
9414 
9415     assert(N->getValueType(0) == MVT::i1 &&
9416            "Unexpected result type for CTR decrement intrinsic");
9417     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
9418                                  N->getValueType(0));
9419     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
9420     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
9421                                  N->getOperand(1));
9422 
9423     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
9424     Results.push_back(NewInt.getValue(1));
9425     break;
9426   }
9427   case ISD::VAARG: {
9428     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
9429       return;
9430 
9431     EVT VT = N->getValueType(0);
9432 
9433     if (VT == MVT::i64) {
9434       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
9435 
9436       Results.push_back(NewNode);
9437       Results.push_back(NewNode.getValue(1));
9438     }
9439     return;
9440   }
9441   case ISD::FP_ROUND_INREG: {
9442     assert(N->getValueType(0) == MVT::ppcf128);
9443     assert(N->getOperand(0).getValueType() == MVT::ppcf128);
9444     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
9445                              MVT::f64, N->getOperand(0),
9446                              DAG.getIntPtrConstant(0, dl));
9447     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
9448                              MVT::f64, N->getOperand(0),
9449                              DAG.getIntPtrConstant(1, dl));
9450 
9451     // Add the two halves of the long double in round-to-zero mode.
9452     SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
9453 
9454     // We know the low half is about to be thrown away, so just use something
9455     // convenient.
9456     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
9457                                 FPreg, FPreg));
9458     return;
9459   }
9460   case ISD::FP_TO_SINT:
9461   case ISD::FP_TO_UINT:
9462     // LowerFP_TO_INT() can only handle f32 and f64.
9463     if (N->getOperand(0).getValueType() == MVT::ppcf128)
9464       return;
9465     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
9466     return;
9467   }
9468 }
9469 
9470 //===----------------------------------------------------------------------===//
9471 //  Other Lowering Code
9472 //===----------------------------------------------------------------------===//
9473 
9474 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
9475   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
9476   Function *Func = Intrinsic::getDeclaration(M, Id);
9477   return Builder.CreateCall(Func, {});
9478 }
9479 
9480 // The mappings for emitLeading/TrailingFence is taken from
9481 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
9482 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
9483                                                  Instruction *Inst,
9484                                                  AtomicOrdering Ord) const {
9485   if (Ord == AtomicOrdering::SequentiallyConsistent)
9486     return callIntrinsic(Builder, Intrinsic::ppc_sync);
9487   if (isReleaseOrStronger(Ord))
9488     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
9489   return nullptr;
9490 }
9491 
9492 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
9493                                                   Instruction *Inst,
9494                                                   AtomicOrdering Ord) const {
9495   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
9496     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
9497     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
9498     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
9499     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
9500       return Builder.CreateCall(
9501           Intrinsic::getDeclaration(
9502               Builder.GetInsertBlock()->getParent()->getParent(),
9503               Intrinsic::ppc_cfence, {Inst->getType()}),
9504           {Inst});
9505     // FIXME: Can use isync for rmw operation.
9506     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
9507   }
9508   return nullptr;
9509 }
9510 
9511 MachineBasicBlock *
9512 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
9513                                     unsigned AtomicSize,
9514                                     unsigned BinOpcode,
9515                                     unsigned CmpOpcode,
9516                                     unsigned CmpPred) const {
9517   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
9518   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9519 
9520   auto LoadMnemonic = PPC::LDARX;
9521   auto StoreMnemonic = PPC::STDCX;
9522   switch (AtomicSize) {
9523   default:
9524     llvm_unreachable("Unexpected size of atomic entity");
9525   case 1:
9526     LoadMnemonic = PPC::LBARX;
9527     StoreMnemonic = PPC::STBCX;
9528     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
9529     break;
9530   case 2:
9531     LoadMnemonic = PPC::LHARX;
9532     StoreMnemonic = PPC::STHCX;
9533     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
9534     break;
9535   case 4:
9536     LoadMnemonic = PPC::LWARX;
9537     StoreMnemonic = PPC::STWCX;
9538     break;
9539   case 8:
9540     LoadMnemonic = PPC::LDARX;
9541     StoreMnemonic = PPC::STDCX;
9542     break;
9543   }
9544 
9545   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9546   MachineFunction *F = BB->getParent();
9547   MachineFunction::iterator It = ++BB->getIterator();
9548 
9549   unsigned dest = MI.getOperand(0).getReg();
9550   unsigned ptrA = MI.getOperand(1).getReg();
9551   unsigned ptrB = MI.getOperand(2).getReg();
9552   unsigned incr = MI.getOperand(3).getReg();
9553   DebugLoc dl = MI.getDebugLoc();
9554 
9555   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
9556   MachineBasicBlock *loop2MBB =
9557     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
9558   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
9559   F->insert(It, loopMBB);
9560   if (CmpOpcode)
9561     F->insert(It, loop2MBB);
9562   F->insert(It, exitMBB);
9563   exitMBB->splice(exitMBB->begin(), BB,
9564                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9565   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
9566 
9567   MachineRegisterInfo &RegInfo = F->getRegInfo();
9568   unsigned TmpReg = (!BinOpcode) ? incr :
9569     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
9570                                            : &PPC::GPRCRegClass);
9571 
9572   //  thisMBB:
9573   //   ...
9574   //   fallthrough --> loopMBB
9575   BB->addSuccessor(loopMBB);
9576 
9577   //  loopMBB:
9578   //   l[wd]arx dest, ptr
9579   //   add r0, dest, incr
9580   //   st[wd]cx. r0, ptr
9581   //   bne- loopMBB
9582   //   fallthrough --> exitMBB
9583 
9584   // For max/min...
9585   //  loopMBB:
9586   //   l[wd]arx dest, ptr
9587   //   cmpl?[wd] incr, dest
9588   //   bgt exitMBB
9589   //  loop2MBB:
9590   //   st[wd]cx. dest, ptr
9591   //   bne- loopMBB
9592   //   fallthrough --> exitMBB
9593 
9594   BB = loopMBB;
9595   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
9596     .addReg(ptrA).addReg(ptrB);
9597   if (BinOpcode)
9598     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
9599   if (CmpOpcode) {
9600     // Signed comparisons of byte or halfword values must be sign-extended.
9601     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
9602       unsigned ExtReg =  RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
9603       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
9604               ExtReg).addReg(dest);
9605       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
9606         .addReg(incr).addReg(ExtReg);
9607     } else
9608       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
9609         .addReg(incr).addReg(dest);
9610 
9611     BuildMI(BB, dl, TII->get(PPC::BCC))
9612       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
9613     BB->addSuccessor(loop2MBB);
9614     BB->addSuccessor(exitMBB);
9615     BB = loop2MBB;
9616   }
9617   BuildMI(BB, dl, TII->get(StoreMnemonic))
9618     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
9619   BuildMI(BB, dl, TII->get(PPC::BCC))
9620     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
9621   BB->addSuccessor(loopMBB);
9622   BB->addSuccessor(exitMBB);
9623 
9624   //  exitMBB:
9625   //   ...
9626   BB = exitMBB;
9627   return BB;
9628 }
9629 
9630 MachineBasicBlock *
9631 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI,
9632                                             MachineBasicBlock *BB,
9633                                             bool is8bit, // operation
9634                                             unsigned BinOpcode,
9635                                             unsigned CmpOpcode,
9636                                             unsigned CmpPred) const {
9637   // If we support part-word atomic mnemonics, just use them
9638   if (Subtarget.hasPartwordAtomics())
9639     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode,
9640                             CmpOpcode, CmpPred);
9641 
9642   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
9643   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9644   // In 64 bit mode we have to use 64 bits for addresses, even though the
9645   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
9646   // registers without caring whether they're 32 or 64, but here we're
9647   // doing actual arithmetic on the addresses.
9648   bool is64bit = Subtarget.isPPC64();
9649   bool isLittleEndian = Subtarget.isLittleEndian();
9650   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
9651 
9652   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9653   MachineFunction *F = BB->getParent();
9654   MachineFunction::iterator It = ++BB->getIterator();
9655 
9656   unsigned dest = MI.getOperand(0).getReg();
9657   unsigned ptrA = MI.getOperand(1).getReg();
9658   unsigned ptrB = MI.getOperand(2).getReg();
9659   unsigned incr = MI.getOperand(3).getReg();
9660   DebugLoc dl = MI.getDebugLoc();
9661 
9662   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
9663   MachineBasicBlock *loop2MBB =
9664     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
9665   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
9666   F->insert(It, loopMBB);
9667   if (CmpOpcode)
9668     F->insert(It, loop2MBB);
9669   F->insert(It, exitMBB);
9670   exitMBB->splice(exitMBB->begin(), BB,
9671                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9672   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
9673 
9674   MachineRegisterInfo &RegInfo = F->getRegInfo();
9675   const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
9676                                           : &PPC::GPRCRegClass;
9677   unsigned PtrReg = RegInfo.createVirtualRegister(RC);
9678   unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
9679   unsigned ShiftReg =
9680     isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
9681   unsigned Incr2Reg = RegInfo.createVirtualRegister(RC);
9682   unsigned MaskReg = RegInfo.createVirtualRegister(RC);
9683   unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
9684   unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
9685   unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
9686   unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC);
9687   unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
9688   unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
9689   unsigned Ptr1Reg;
9690   unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC);
9691 
9692   //  thisMBB:
9693   //   ...
9694   //   fallthrough --> loopMBB
9695   BB->addSuccessor(loopMBB);
9696 
9697   // The 4-byte load must be aligned, while a char or short may be
9698   // anywhere in the word.  Hence all this nasty bookkeeping code.
9699   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
9700   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
9701   //   xori shift, shift1, 24 [16]
9702   //   rlwinm ptr, ptr1, 0, 0, 29
9703   //   slw incr2, incr, shift
9704   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
9705   //   slw mask, mask2, shift
9706   //  loopMBB:
9707   //   lwarx tmpDest, ptr
9708   //   add tmp, tmpDest, incr2
9709   //   andc tmp2, tmpDest, mask
9710   //   and tmp3, tmp, mask
9711   //   or tmp4, tmp3, tmp2
9712   //   stwcx. tmp4, ptr
9713   //   bne- loopMBB
9714   //   fallthrough --> exitMBB
9715   //   srw dest, tmpDest, shift
9716   if (ptrA != ZeroReg) {
9717     Ptr1Reg = RegInfo.createVirtualRegister(RC);
9718     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
9719       .addReg(ptrA).addReg(ptrB);
9720   } else {
9721     Ptr1Reg = ptrB;
9722   }
9723   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
9724       .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
9725   if (!isLittleEndian)
9726     BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
9727         .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
9728   if (is64bit)
9729     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
9730       .addReg(Ptr1Reg).addImm(0).addImm(61);
9731   else
9732     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
9733       .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
9734   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg)
9735       .addReg(incr).addReg(ShiftReg);
9736   if (is8bit)
9737     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
9738   else {
9739     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
9740     BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535);
9741   }
9742   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
9743       .addReg(Mask2Reg).addReg(ShiftReg);
9744 
9745   BB = loopMBB;
9746   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
9747     .addReg(ZeroReg).addReg(PtrReg);
9748   if (BinOpcode)
9749     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
9750       .addReg(Incr2Reg).addReg(TmpDestReg);
9751   BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
9752     .addReg(TmpDestReg).addReg(MaskReg);
9753   BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
9754     .addReg(TmpReg).addReg(MaskReg);
9755   if (CmpOpcode) {
9756     // For unsigned comparisons, we can directly compare the shifted values.
9757     // For signed comparisons we shift and sign extend.
9758     unsigned SReg = RegInfo.createVirtualRegister(RC);
9759     BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg)
9760       .addReg(TmpDestReg).addReg(MaskReg);
9761     unsigned ValueReg = SReg;
9762     unsigned CmpReg = Incr2Reg;
9763     if (CmpOpcode == PPC::CMPW) {
9764       ValueReg = RegInfo.createVirtualRegister(RC);
9765       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
9766         .addReg(SReg).addReg(ShiftReg);
9767       unsigned ValueSReg = RegInfo.createVirtualRegister(RC);
9768       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
9769         .addReg(ValueReg);
9770       ValueReg = ValueSReg;
9771       CmpReg = incr;
9772     }
9773     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
9774       .addReg(CmpReg).addReg(ValueReg);
9775     BuildMI(BB, dl, TII->get(PPC::BCC))
9776       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
9777     BB->addSuccessor(loop2MBB);
9778     BB->addSuccessor(exitMBB);
9779     BB = loop2MBB;
9780   }
9781   BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
9782     .addReg(Tmp3Reg).addReg(Tmp2Reg);
9783   BuildMI(BB, dl, TII->get(PPC::STWCX))
9784     .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg);
9785   BuildMI(BB, dl, TII->get(PPC::BCC))
9786     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
9787   BB->addSuccessor(loopMBB);
9788   BB->addSuccessor(exitMBB);
9789 
9790   //  exitMBB:
9791   //   ...
9792   BB = exitMBB;
9793   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg)
9794     .addReg(ShiftReg);
9795   return BB;
9796 }
9797 
9798 llvm::MachineBasicBlock *
9799 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
9800                                     MachineBasicBlock *MBB) const {
9801   DebugLoc DL = MI.getDebugLoc();
9802   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9803   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
9804 
9805   MachineFunction *MF = MBB->getParent();
9806   MachineRegisterInfo &MRI = MF->getRegInfo();
9807 
9808   const BasicBlock *BB = MBB->getBasicBlock();
9809   MachineFunction::iterator I = ++MBB->getIterator();
9810 
9811   // Memory Reference
9812   MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
9813   MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
9814 
9815   unsigned DstReg = MI.getOperand(0).getReg();
9816   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
9817   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
9818   unsigned mainDstReg = MRI.createVirtualRegister(RC);
9819   unsigned restoreDstReg = MRI.createVirtualRegister(RC);
9820 
9821   MVT PVT = getPointerTy(MF->getDataLayout());
9822   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
9823          "Invalid Pointer Size!");
9824   // For v = setjmp(buf), we generate
9825   //
9826   // thisMBB:
9827   //  SjLjSetup mainMBB
9828   //  bl mainMBB
9829   //  v_restore = 1
9830   //  b sinkMBB
9831   //
9832   // mainMBB:
9833   //  buf[LabelOffset] = LR
9834   //  v_main = 0
9835   //
9836   // sinkMBB:
9837   //  v = phi(main, restore)
9838   //
9839 
9840   MachineBasicBlock *thisMBB = MBB;
9841   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
9842   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
9843   MF->insert(I, mainMBB);
9844   MF->insert(I, sinkMBB);
9845 
9846   MachineInstrBuilder MIB;
9847 
9848   // Transfer the remainder of BB and its successor edges to sinkMBB.
9849   sinkMBB->splice(sinkMBB->begin(), MBB,
9850                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
9851   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
9852 
9853   // Note that the structure of the jmp_buf used here is not compatible
9854   // with that used by libc, and is not designed to be. Specifically, it
9855   // stores only those 'reserved' registers that LLVM does not otherwise
9856   // understand how to spill. Also, by convention, by the time this
9857   // intrinsic is called, Clang has already stored the frame address in the
9858   // first slot of the buffer and stack address in the third. Following the
9859   // X86 target code, we'll store the jump address in the second slot. We also
9860   // need to save the TOC pointer (R2) to handle jumps between shared
9861   // libraries, and that will be stored in the fourth slot. The thread
9862   // identifier (R13) is not affected.
9863 
9864   // thisMBB:
9865   const int64_t LabelOffset = 1 * PVT.getStoreSize();
9866   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
9867   const int64_t BPOffset    = 4 * PVT.getStoreSize();
9868 
9869   // Prepare IP either in reg.
9870   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
9871   unsigned LabelReg = MRI.createVirtualRegister(PtrRC);
9872   unsigned BufReg = MI.getOperand(1).getReg();
9873 
9874   if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
9875     setUsesTOCBasePtr(*MBB->getParent());
9876     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
9877             .addReg(PPC::X2)
9878             .addImm(TOCOffset)
9879             .addReg(BufReg);
9880     MIB.setMemRefs(MMOBegin, MMOEnd);
9881   }
9882 
9883   // Naked functions never have a base pointer, and so we use r1. For all
9884   // other functions, this decision must be delayed until during PEI.
9885   unsigned BaseReg;
9886   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
9887     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
9888   else
9889     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
9890 
9891   MIB = BuildMI(*thisMBB, MI, DL,
9892                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
9893             .addReg(BaseReg)
9894             .addImm(BPOffset)
9895             .addReg(BufReg);
9896   MIB.setMemRefs(MMOBegin, MMOEnd);
9897 
9898   // Setup
9899   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
9900   MIB.addRegMask(TRI->getNoPreservedMask());
9901 
9902   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
9903 
9904   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
9905           .addMBB(mainMBB);
9906   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
9907 
9908   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
9909   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
9910 
9911   // mainMBB:
9912   //  mainDstReg = 0
9913   MIB =
9914       BuildMI(mainMBB, DL,
9915               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
9916 
9917   // Store IP
9918   if (Subtarget.isPPC64()) {
9919     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
9920             .addReg(LabelReg)
9921             .addImm(LabelOffset)
9922             .addReg(BufReg);
9923   } else {
9924     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
9925             .addReg(LabelReg)
9926             .addImm(LabelOffset)
9927             .addReg(BufReg);
9928   }
9929 
9930   MIB.setMemRefs(MMOBegin, MMOEnd);
9931 
9932   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
9933   mainMBB->addSuccessor(sinkMBB);
9934 
9935   // sinkMBB:
9936   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
9937           TII->get(PPC::PHI), DstReg)
9938     .addReg(mainDstReg).addMBB(mainMBB)
9939     .addReg(restoreDstReg).addMBB(thisMBB);
9940 
9941   MI.eraseFromParent();
9942   return sinkMBB;
9943 }
9944 
9945 MachineBasicBlock *
9946 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
9947                                      MachineBasicBlock *MBB) const {
9948   DebugLoc DL = MI.getDebugLoc();
9949   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9950 
9951   MachineFunction *MF = MBB->getParent();
9952   MachineRegisterInfo &MRI = MF->getRegInfo();
9953 
9954   // Memory Reference
9955   MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
9956   MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
9957 
9958   MVT PVT = getPointerTy(MF->getDataLayout());
9959   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
9960          "Invalid Pointer Size!");
9961 
9962   const TargetRegisterClass *RC =
9963     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
9964   unsigned Tmp = MRI.createVirtualRegister(RC);
9965   // Since FP is only updated here but NOT referenced, it's treated as GPR.
9966   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
9967   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
9968   unsigned BP =
9969       (PVT == MVT::i64)
9970           ? PPC::X30
9971           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
9972                                                               : PPC::R30);
9973 
9974   MachineInstrBuilder MIB;
9975 
9976   const int64_t LabelOffset = 1 * PVT.getStoreSize();
9977   const int64_t SPOffset    = 2 * PVT.getStoreSize();
9978   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
9979   const int64_t BPOffset    = 4 * PVT.getStoreSize();
9980 
9981   unsigned BufReg = MI.getOperand(0).getReg();
9982 
9983   // Reload FP (the jumped-to function may not have had a
9984   // frame pointer, and if so, then its r31 will be restored
9985   // as necessary).
9986   if (PVT == MVT::i64) {
9987     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
9988             .addImm(0)
9989             .addReg(BufReg);
9990   } else {
9991     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
9992             .addImm(0)
9993             .addReg(BufReg);
9994   }
9995   MIB.setMemRefs(MMOBegin, MMOEnd);
9996 
9997   // Reload IP
9998   if (PVT == MVT::i64) {
9999     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
10000             .addImm(LabelOffset)
10001             .addReg(BufReg);
10002   } else {
10003     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
10004             .addImm(LabelOffset)
10005             .addReg(BufReg);
10006   }
10007   MIB.setMemRefs(MMOBegin, MMOEnd);
10008 
10009   // Reload SP
10010   if (PVT == MVT::i64) {
10011     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
10012             .addImm(SPOffset)
10013             .addReg(BufReg);
10014   } else {
10015     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
10016             .addImm(SPOffset)
10017             .addReg(BufReg);
10018   }
10019   MIB.setMemRefs(MMOBegin, MMOEnd);
10020 
10021   // Reload BP
10022   if (PVT == MVT::i64) {
10023     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
10024             .addImm(BPOffset)
10025             .addReg(BufReg);
10026   } else {
10027     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
10028             .addImm(BPOffset)
10029             .addReg(BufReg);
10030   }
10031   MIB.setMemRefs(MMOBegin, MMOEnd);
10032 
10033   // Reload TOC
10034   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
10035     setUsesTOCBasePtr(*MBB->getParent());
10036     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
10037             .addImm(TOCOffset)
10038             .addReg(BufReg);
10039 
10040     MIB.setMemRefs(MMOBegin, MMOEnd);
10041   }
10042 
10043   // Jump
10044   BuildMI(*MBB, MI, DL,
10045           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
10046   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
10047 
10048   MI.eraseFromParent();
10049   return MBB;
10050 }
10051 
10052 MachineBasicBlock *
10053 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
10054                                                MachineBasicBlock *BB) const {
10055   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
10056       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
10057     if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() &&
10058         MI.getOpcode() == TargetOpcode::PATCHPOINT) {
10059       // Call lowering should have added an r2 operand to indicate a dependence
10060       // on the TOC base pointer value. It can't however, because there is no
10061       // way to mark the dependence as implicit there, and so the stackmap code
10062       // will confuse it with a regular operand. Instead, add the dependence
10063       // here.
10064       setUsesTOCBasePtr(*BB->getParent());
10065       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
10066     }
10067 
10068     return emitPatchPoint(MI, BB);
10069   }
10070 
10071   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
10072       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
10073     return emitEHSjLjSetJmp(MI, BB);
10074   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
10075              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
10076     return emitEHSjLjLongJmp(MI, BB);
10077   }
10078 
10079   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10080 
10081   // To "insert" these instructions we actually have to insert their
10082   // control-flow patterns.
10083   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10084   MachineFunction::iterator It = ++BB->getIterator();
10085 
10086   MachineFunction *F = BB->getParent();
10087 
10088   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10089        MI.getOpcode() == PPC::SELECT_CC_I8 ||
10090        MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) {
10091     SmallVector<MachineOperand, 2> Cond;
10092     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10093         MI.getOpcode() == PPC::SELECT_CC_I8)
10094       Cond.push_back(MI.getOperand(4));
10095     else
10096       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
10097     Cond.push_back(MI.getOperand(1));
10098 
10099     DebugLoc dl = MI.getDebugLoc();
10100     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
10101                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
10102   } else if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10103              MI.getOpcode() == PPC::SELECT_CC_I8 ||
10104              MI.getOpcode() == PPC::SELECT_CC_F4 ||
10105              MI.getOpcode() == PPC::SELECT_CC_F8 ||
10106              MI.getOpcode() == PPC::SELECT_CC_QFRC ||
10107              MI.getOpcode() == PPC::SELECT_CC_QSRC ||
10108              MI.getOpcode() == PPC::SELECT_CC_QBRC ||
10109              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
10110              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
10111              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
10112              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
10113              MI.getOpcode() == PPC::SELECT_I4 ||
10114              MI.getOpcode() == PPC::SELECT_I8 ||
10115              MI.getOpcode() == PPC::SELECT_F4 ||
10116              MI.getOpcode() == PPC::SELECT_F8 ||
10117              MI.getOpcode() == PPC::SELECT_QFRC ||
10118              MI.getOpcode() == PPC::SELECT_QSRC ||
10119              MI.getOpcode() == PPC::SELECT_QBRC ||
10120              MI.getOpcode() == PPC::SELECT_VRRC ||
10121              MI.getOpcode() == PPC::SELECT_VSFRC ||
10122              MI.getOpcode() == PPC::SELECT_VSSRC ||
10123              MI.getOpcode() == PPC::SELECT_VSRC) {
10124     // The incoming instruction knows the destination vreg to set, the
10125     // condition code register to branch on, the true/false values to
10126     // select between, and a branch opcode to use.
10127 
10128     //  thisMBB:
10129     //  ...
10130     //   TrueVal = ...
10131     //   cmpTY ccX, r1, r2
10132     //   bCC copy1MBB
10133     //   fallthrough --> copy0MBB
10134     MachineBasicBlock *thisMBB = BB;
10135     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
10136     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
10137     DebugLoc dl = MI.getDebugLoc();
10138     F->insert(It, copy0MBB);
10139     F->insert(It, sinkMBB);
10140 
10141     // Transfer the remainder of BB and its successor edges to sinkMBB.
10142     sinkMBB->splice(sinkMBB->begin(), BB,
10143                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10144     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10145 
10146     // Next, add the true and fallthrough blocks as its successors.
10147     BB->addSuccessor(copy0MBB);
10148     BB->addSuccessor(sinkMBB);
10149 
10150     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
10151         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
10152         MI.getOpcode() == PPC::SELECT_QFRC ||
10153         MI.getOpcode() == PPC::SELECT_QSRC ||
10154         MI.getOpcode() == PPC::SELECT_QBRC ||
10155         MI.getOpcode() == PPC::SELECT_VRRC ||
10156         MI.getOpcode() == PPC::SELECT_VSFRC ||
10157         MI.getOpcode() == PPC::SELECT_VSSRC ||
10158         MI.getOpcode() == PPC::SELECT_VSRC) {
10159       BuildMI(BB, dl, TII->get(PPC::BC))
10160           .addReg(MI.getOperand(1).getReg())
10161           .addMBB(sinkMBB);
10162     } else {
10163       unsigned SelectPred = MI.getOperand(4).getImm();
10164       BuildMI(BB, dl, TII->get(PPC::BCC))
10165           .addImm(SelectPred)
10166           .addReg(MI.getOperand(1).getReg())
10167           .addMBB(sinkMBB);
10168     }
10169 
10170     //  copy0MBB:
10171     //   %FalseValue = ...
10172     //   # fallthrough to sinkMBB
10173     BB = copy0MBB;
10174 
10175     // Update machine-CFG edges
10176     BB->addSuccessor(sinkMBB);
10177 
10178     //  sinkMBB:
10179     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
10180     //  ...
10181     BB = sinkMBB;
10182     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
10183         .addReg(MI.getOperand(3).getReg())
10184         .addMBB(copy0MBB)
10185         .addReg(MI.getOperand(2).getReg())
10186         .addMBB(thisMBB);
10187   } else if (MI.getOpcode() == PPC::ReadTB) {
10188     // To read the 64-bit time-base register on a 32-bit target, we read the
10189     // two halves. Should the counter have wrapped while it was being read, we
10190     // need to try again.
10191     // ...
10192     // readLoop:
10193     // mfspr Rx,TBU # load from TBU
10194     // mfspr Ry,TB  # load from TB
10195     // mfspr Rz,TBU # load from TBU
10196     // cmpw crX,Rx,Rz # check if 'old'='new'
10197     // bne readLoop   # branch if they're not equal
10198     // ...
10199 
10200     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
10201     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
10202     DebugLoc dl = MI.getDebugLoc();
10203     F->insert(It, readMBB);
10204     F->insert(It, sinkMBB);
10205 
10206     // Transfer the remainder of BB and its successor edges to sinkMBB.
10207     sinkMBB->splice(sinkMBB->begin(), BB,
10208                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10209     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10210 
10211     BB->addSuccessor(readMBB);
10212     BB = readMBB;
10213 
10214     MachineRegisterInfo &RegInfo = F->getRegInfo();
10215     unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10216     unsigned LoReg = MI.getOperand(0).getReg();
10217     unsigned HiReg = MI.getOperand(1).getReg();
10218 
10219     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
10220     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
10221     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
10222 
10223     unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
10224 
10225     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
10226       .addReg(HiReg).addReg(ReadAgainReg);
10227     BuildMI(BB, dl, TII->get(PPC::BCC))
10228       .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB);
10229 
10230     BB->addSuccessor(readMBB);
10231     BB->addSuccessor(sinkMBB);
10232   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
10233     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
10234   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
10235     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
10236   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
10237     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
10238   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
10239     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
10240 
10241   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
10242     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
10243   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
10244     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
10245   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
10246     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
10247   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
10248     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
10249 
10250   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
10251     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
10252   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
10253     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
10254   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
10255     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
10256   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
10257     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
10258 
10259   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
10260     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
10261   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
10262     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
10263   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
10264     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
10265   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
10266     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
10267 
10268   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
10269     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
10270   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
10271     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
10272   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
10273     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
10274   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
10275     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
10276 
10277   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
10278     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
10279   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
10280     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
10281   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
10282     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
10283   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
10284     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
10285 
10286   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
10287     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
10288   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
10289     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
10290   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
10291     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
10292   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
10293     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
10294 
10295   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
10296     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
10297   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
10298     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
10299   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
10300     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
10301   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
10302     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
10303 
10304   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
10305     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
10306   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
10307     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
10308   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
10309     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
10310   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
10311     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
10312 
10313   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
10314     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
10315   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
10316     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
10317   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
10318     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
10319   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
10320     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
10321 
10322   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
10323     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
10324   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
10325     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
10326   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
10327     BB = EmitAtomicBinary(MI, BB, 4, 0);
10328   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
10329     BB = EmitAtomicBinary(MI, BB, 8, 0);
10330   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
10331            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
10332            (Subtarget.hasPartwordAtomics() &&
10333             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
10334            (Subtarget.hasPartwordAtomics() &&
10335             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
10336     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
10337 
10338     auto LoadMnemonic = PPC::LDARX;
10339     auto StoreMnemonic = PPC::STDCX;
10340     switch (MI.getOpcode()) {
10341     default:
10342       llvm_unreachable("Compare and swap of unknown size");
10343     case PPC::ATOMIC_CMP_SWAP_I8:
10344       LoadMnemonic = PPC::LBARX;
10345       StoreMnemonic = PPC::STBCX;
10346       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
10347       break;
10348     case PPC::ATOMIC_CMP_SWAP_I16:
10349       LoadMnemonic = PPC::LHARX;
10350       StoreMnemonic = PPC::STHCX;
10351       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
10352       break;
10353     case PPC::ATOMIC_CMP_SWAP_I32:
10354       LoadMnemonic = PPC::LWARX;
10355       StoreMnemonic = PPC::STWCX;
10356       break;
10357     case PPC::ATOMIC_CMP_SWAP_I64:
10358       LoadMnemonic = PPC::LDARX;
10359       StoreMnemonic = PPC::STDCX;
10360       break;
10361     }
10362     unsigned dest = MI.getOperand(0).getReg();
10363     unsigned ptrA = MI.getOperand(1).getReg();
10364     unsigned ptrB = MI.getOperand(2).getReg();
10365     unsigned oldval = MI.getOperand(3).getReg();
10366     unsigned newval = MI.getOperand(4).getReg();
10367     DebugLoc dl = MI.getDebugLoc();
10368 
10369     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
10370     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
10371     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
10372     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10373     F->insert(It, loop1MBB);
10374     F->insert(It, loop2MBB);
10375     F->insert(It, midMBB);
10376     F->insert(It, exitMBB);
10377     exitMBB->splice(exitMBB->begin(), BB,
10378                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10379     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10380 
10381     //  thisMBB:
10382     //   ...
10383     //   fallthrough --> loopMBB
10384     BB->addSuccessor(loop1MBB);
10385 
10386     // loop1MBB:
10387     //   l[bhwd]arx dest, ptr
10388     //   cmp[wd] dest, oldval
10389     //   bne- midMBB
10390     // loop2MBB:
10391     //   st[bhwd]cx. newval, ptr
10392     //   bne- loopMBB
10393     //   b exitBB
10394     // midMBB:
10395     //   st[bhwd]cx. dest, ptr
10396     // exitBB:
10397     BB = loop1MBB;
10398     BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10399       .addReg(ptrA).addReg(ptrB);
10400     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
10401       .addReg(oldval).addReg(dest);
10402     BuildMI(BB, dl, TII->get(PPC::BCC))
10403       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
10404     BB->addSuccessor(loop2MBB);
10405     BB->addSuccessor(midMBB);
10406 
10407     BB = loop2MBB;
10408     BuildMI(BB, dl, TII->get(StoreMnemonic))
10409       .addReg(newval).addReg(ptrA).addReg(ptrB);
10410     BuildMI(BB, dl, TII->get(PPC::BCC))
10411       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
10412     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
10413     BB->addSuccessor(loop1MBB);
10414     BB->addSuccessor(exitMBB);
10415 
10416     BB = midMBB;
10417     BuildMI(BB, dl, TII->get(StoreMnemonic))
10418       .addReg(dest).addReg(ptrA).addReg(ptrB);
10419     BB->addSuccessor(exitMBB);
10420 
10421     //  exitMBB:
10422     //   ...
10423     BB = exitMBB;
10424   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
10425              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
10426     // We must use 64-bit registers for addresses when targeting 64-bit,
10427     // since we're actually doing arithmetic on them.  Other registers
10428     // can be 32-bit.
10429     bool is64bit = Subtarget.isPPC64();
10430     bool isLittleEndian = Subtarget.isLittleEndian();
10431     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
10432 
10433     unsigned dest = MI.getOperand(0).getReg();
10434     unsigned ptrA = MI.getOperand(1).getReg();
10435     unsigned ptrB = MI.getOperand(2).getReg();
10436     unsigned oldval = MI.getOperand(3).getReg();
10437     unsigned newval = MI.getOperand(4).getReg();
10438     DebugLoc dl = MI.getDebugLoc();
10439 
10440     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
10441     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
10442     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
10443     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10444     F->insert(It, loop1MBB);
10445     F->insert(It, loop2MBB);
10446     F->insert(It, midMBB);
10447     F->insert(It, exitMBB);
10448     exitMBB->splice(exitMBB->begin(), BB,
10449                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10450     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10451 
10452     MachineRegisterInfo &RegInfo = F->getRegInfo();
10453     const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
10454                                             : &PPC::GPRCRegClass;
10455     unsigned PtrReg = RegInfo.createVirtualRegister(RC);
10456     unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
10457     unsigned ShiftReg =
10458       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
10459     unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC);
10460     unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC);
10461     unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC);
10462     unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC);
10463     unsigned MaskReg = RegInfo.createVirtualRegister(RC);
10464     unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
10465     unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
10466     unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
10467     unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
10468     unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
10469     unsigned Ptr1Reg;
10470     unsigned TmpReg = RegInfo.createVirtualRegister(RC);
10471     unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10472     //  thisMBB:
10473     //   ...
10474     //   fallthrough --> loopMBB
10475     BB->addSuccessor(loop1MBB);
10476 
10477     // The 4-byte load must be aligned, while a char or short may be
10478     // anywhere in the word.  Hence all this nasty bookkeeping code.
10479     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
10480     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
10481     //   xori shift, shift1, 24 [16]
10482     //   rlwinm ptr, ptr1, 0, 0, 29
10483     //   slw newval2, newval, shift
10484     //   slw oldval2, oldval,shift
10485     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
10486     //   slw mask, mask2, shift
10487     //   and newval3, newval2, mask
10488     //   and oldval3, oldval2, mask
10489     // loop1MBB:
10490     //   lwarx tmpDest, ptr
10491     //   and tmp, tmpDest, mask
10492     //   cmpw tmp, oldval3
10493     //   bne- midMBB
10494     // loop2MBB:
10495     //   andc tmp2, tmpDest, mask
10496     //   or tmp4, tmp2, newval3
10497     //   stwcx. tmp4, ptr
10498     //   bne- loop1MBB
10499     //   b exitBB
10500     // midMBB:
10501     //   stwcx. tmpDest, ptr
10502     // exitBB:
10503     //   srw dest, tmpDest, shift
10504     if (ptrA != ZeroReg) {
10505       Ptr1Reg = RegInfo.createVirtualRegister(RC);
10506       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10507         .addReg(ptrA).addReg(ptrB);
10508     } else {
10509       Ptr1Reg = ptrB;
10510     }
10511     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
10512         .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
10513     if (!isLittleEndian)
10514       BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
10515           .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
10516     if (is64bit)
10517       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
10518         .addReg(Ptr1Reg).addImm(0).addImm(61);
10519     else
10520       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
10521         .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
10522     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
10523         .addReg(newval).addReg(ShiftReg);
10524     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
10525         .addReg(oldval).addReg(ShiftReg);
10526     if (is8bit)
10527       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
10528     else {
10529       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
10530       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
10531         .addReg(Mask3Reg).addImm(65535);
10532     }
10533     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
10534         .addReg(Mask2Reg).addReg(ShiftReg);
10535     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
10536         .addReg(NewVal2Reg).addReg(MaskReg);
10537     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
10538         .addReg(OldVal2Reg).addReg(MaskReg);
10539 
10540     BB = loop1MBB;
10541     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
10542         .addReg(ZeroReg).addReg(PtrReg);
10543     BuildMI(BB, dl, TII->get(PPC::AND),TmpReg)
10544         .addReg(TmpDestReg).addReg(MaskReg);
10545     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
10546         .addReg(TmpReg).addReg(OldVal3Reg);
10547     BuildMI(BB, dl, TII->get(PPC::BCC))
10548         .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
10549     BB->addSuccessor(loop2MBB);
10550     BB->addSuccessor(midMBB);
10551 
10552     BB = loop2MBB;
10553     BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg)
10554         .addReg(TmpDestReg).addReg(MaskReg);
10555     BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg)
10556         .addReg(Tmp2Reg).addReg(NewVal3Reg);
10557     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg)
10558         .addReg(ZeroReg).addReg(PtrReg);
10559     BuildMI(BB, dl, TII->get(PPC::BCC))
10560       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
10561     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
10562     BB->addSuccessor(loop1MBB);
10563     BB->addSuccessor(exitMBB);
10564 
10565     BB = midMBB;
10566     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg)
10567       .addReg(ZeroReg).addReg(PtrReg);
10568     BB->addSuccessor(exitMBB);
10569 
10570     //  exitMBB:
10571     //   ...
10572     BB = exitMBB;
10573     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg)
10574       .addReg(ShiftReg);
10575   } else if (MI.getOpcode() == PPC::FADDrtz) {
10576     // This pseudo performs an FADD with rounding mode temporarily forced
10577     // to round-to-zero.  We emit this via custom inserter since the FPSCR
10578     // is not modeled at the SelectionDAG level.
10579     unsigned Dest = MI.getOperand(0).getReg();
10580     unsigned Src1 = MI.getOperand(1).getReg();
10581     unsigned Src2 = MI.getOperand(2).getReg();
10582     DebugLoc dl = MI.getDebugLoc();
10583 
10584     MachineRegisterInfo &RegInfo = F->getRegInfo();
10585     unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
10586 
10587     // Save FPSCR value.
10588     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
10589 
10590     // Set rounding mode to round-to-zero.
10591     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
10592     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
10593 
10594     // Perform addition.
10595     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
10596 
10597     // Restore FPSCR value.
10598     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
10599   } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
10600              MI.getOpcode() == PPC::ANDIo_1_GT_BIT ||
10601              MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
10602              MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) {
10603     unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
10604                        MI.getOpcode() == PPC::ANDIo_1_GT_BIT8)
10605                           ? PPC::ANDIo8
10606                           : PPC::ANDIo;
10607     bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
10608                  MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8);
10609 
10610     MachineRegisterInfo &RegInfo = F->getRegInfo();
10611     unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ?
10612                                                   &PPC::GPRCRegClass :
10613                                                   &PPC::G8RCRegClass);
10614 
10615     DebugLoc dl = MI.getDebugLoc();
10616     BuildMI(*BB, MI, dl, TII->get(Opcode), Dest)
10617         .addReg(MI.getOperand(1).getReg())
10618         .addImm(1);
10619     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY),
10620             MI.getOperand(0).getReg())
10621         .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
10622   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
10623     DebugLoc Dl = MI.getDebugLoc();
10624     MachineRegisterInfo &RegInfo = F->getRegInfo();
10625     unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
10626     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
10627     return BB;
10628   } else {
10629     llvm_unreachable("Unexpected instr type to insert");
10630   }
10631 
10632   MI.eraseFromParent(); // The pseudo instruction is gone now.
10633   return BB;
10634 }
10635 
10636 //===----------------------------------------------------------------------===//
10637 // Target Optimization Hooks
10638 //===----------------------------------------------------------------------===//
10639 
10640 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
10641   // For the estimates, convergence is quadratic, so we essentially double the
10642   // number of digits correct after every iteration. For both FRE and FRSQRTE,
10643   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
10644   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
10645   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
10646   if (VT.getScalarType() == MVT::f64)
10647     RefinementSteps++;
10648   return RefinementSteps;
10649 }
10650 
10651 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
10652                                            int Enabled, int &RefinementSteps,
10653                                            bool &UseOneConstNR,
10654                                            bool Reciprocal) const {
10655   EVT VT = Operand.getValueType();
10656   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
10657       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
10658       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
10659       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
10660       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
10661       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
10662     if (RefinementSteps == ReciprocalEstimate::Unspecified)
10663       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
10664 
10665     UseOneConstNR = true;
10666     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
10667   }
10668   return SDValue();
10669 }
10670 
10671 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
10672                                             int Enabled,
10673                                             int &RefinementSteps) const {
10674   EVT VT = Operand.getValueType();
10675   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
10676       (VT == MVT::f64 && Subtarget.hasFRE()) ||
10677       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
10678       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
10679       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
10680       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
10681     if (RefinementSteps == ReciprocalEstimate::Unspecified)
10682       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
10683     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
10684   }
10685   return SDValue();
10686 }
10687 
10688 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
10689   // Note: This functionality is used only when unsafe-fp-math is enabled, and
10690   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
10691   // enabled for division), this functionality is redundant with the default
10692   // combiner logic (once the division -> reciprocal/multiply transformation
10693   // has taken place). As a result, this matters more for older cores than for
10694   // newer ones.
10695 
10696   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
10697   // reciprocal if there are two or more FDIVs (for embedded cores with only
10698   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
10699   switch (Subtarget.getDarwinDirective()) {
10700   default:
10701     return 3;
10702   case PPC::DIR_440:
10703   case PPC::DIR_A2:
10704   case PPC::DIR_E500mc:
10705   case PPC::DIR_E5500:
10706     return 2;
10707   }
10708 }
10709 
10710 // isConsecutiveLSLoc needs to work even if all adds have not yet been
10711 // collapsed, and so we need to look through chains of them.
10712 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
10713                                      int64_t& Offset, SelectionDAG &DAG) {
10714   if (DAG.isBaseWithConstantOffset(Loc)) {
10715     Base = Loc.getOperand(0);
10716     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
10717 
10718     // The base might itself be a base plus an offset, and if so, accumulate
10719     // that as well.
10720     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
10721   }
10722 }
10723 
10724 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
10725                             unsigned Bytes, int Dist,
10726                             SelectionDAG &DAG) {
10727   if (VT.getSizeInBits() / 8 != Bytes)
10728     return false;
10729 
10730   SDValue BaseLoc = Base->getBasePtr();
10731   if (Loc.getOpcode() == ISD::FrameIndex) {
10732     if (BaseLoc.getOpcode() != ISD::FrameIndex)
10733       return false;
10734     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10735     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
10736     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
10737     int FS  = MFI.getObjectSize(FI);
10738     int BFS = MFI.getObjectSize(BFI);
10739     if (FS != BFS || FS != (int)Bytes) return false;
10740     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
10741   }
10742 
10743   SDValue Base1 = Loc, Base2 = BaseLoc;
10744   int64_t Offset1 = 0, Offset2 = 0;
10745   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
10746   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
10747   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
10748     return true;
10749 
10750   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10751   const GlobalValue *GV1 = nullptr;
10752   const GlobalValue *GV2 = nullptr;
10753   Offset1 = 0;
10754   Offset2 = 0;
10755   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
10756   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
10757   if (isGA1 && isGA2 && GV1 == GV2)
10758     return Offset1 == (Offset2 + Dist*Bytes);
10759   return false;
10760 }
10761 
10762 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
10763 // not enforce equality of the chain operands.
10764 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
10765                             unsigned Bytes, int Dist,
10766                             SelectionDAG &DAG) {
10767   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
10768     EVT VT = LS->getMemoryVT();
10769     SDValue Loc = LS->getBasePtr();
10770     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
10771   }
10772 
10773   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
10774     EVT VT;
10775     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
10776     default: return false;
10777     case Intrinsic::ppc_qpx_qvlfd:
10778     case Intrinsic::ppc_qpx_qvlfda:
10779       VT = MVT::v4f64;
10780       break;
10781     case Intrinsic::ppc_qpx_qvlfs:
10782     case Intrinsic::ppc_qpx_qvlfsa:
10783       VT = MVT::v4f32;
10784       break;
10785     case Intrinsic::ppc_qpx_qvlfcd:
10786     case Intrinsic::ppc_qpx_qvlfcda:
10787       VT = MVT::v2f64;
10788       break;
10789     case Intrinsic::ppc_qpx_qvlfcs:
10790     case Intrinsic::ppc_qpx_qvlfcsa:
10791       VT = MVT::v2f32;
10792       break;
10793     case Intrinsic::ppc_qpx_qvlfiwa:
10794     case Intrinsic::ppc_qpx_qvlfiwz:
10795     case Intrinsic::ppc_altivec_lvx:
10796     case Intrinsic::ppc_altivec_lvxl:
10797     case Intrinsic::ppc_vsx_lxvw4x:
10798     case Intrinsic::ppc_vsx_lxvw4x_be:
10799       VT = MVT::v4i32;
10800       break;
10801     case Intrinsic::ppc_vsx_lxvd2x:
10802     case Intrinsic::ppc_vsx_lxvd2x_be:
10803       VT = MVT::v2f64;
10804       break;
10805     case Intrinsic::ppc_altivec_lvebx:
10806       VT = MVT::i8;
10807       break;
10808     case Intrinsic::ppc_altivec_lvehx:
10809       VT = MVT::i16;
10810       break;
10811     case Intrinsic::ppc_altivec_lvewx:
10812       VT = MVT::i32;
10813       break;
10814     }
10815 
10816     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
10817   }
10818 
10819   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
10820     EVT VT;
10821     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
10822     default: return false;
10823     case Intrinsic::ppc_qpx_qvstfd:
10824     case Intrinsic::ppc_qpx_qvstfda:
10825       VT = MVT::v4f64;
10826       break;
10827     case Intrinsic::ppc_qpx_qvstfs:
10828     case Intrinsic::ppc_qpx_qvstfsa:
10829       VT = MVT::v4f32;
10830       break;
10831     case Intrinsic::ppc_qpx_qvstfcd:
10832     case Intrinsic::ppc_qpx_qvstfcda:
10833       VT = MVT::v2f64;
10834       break;
10835     case Intrinsic::ppc_qpx_qvstfcs:
10836     case Intrinsic::ppc_qpx_qvstfcsa:
10837       VT = MVT::v2f32;
10838       break;
10839     case Intrinsic::ppc_qpx_qvstfiw:
10840     case Intrinsic::ppc_qpx_qvstfiwa:
10841     case Intrinsic::ppc_altivec_stvx:
10842     case Intrinsic::ppc_altivec_stvxl:
10843     case Intrinsic::ppc_vsx_stxvw4x:
10844       VT = MVT::v4i32;
10845       break;
10846     case Intrinsic::ppc_vsx_stxvd2x:
10847       VT = MVT::v2f64;
10848       break;
10849     case Intrinsic::ppc_vsx_stxvw4x_be:
10850       VT = MVT::v4i32;
10851       break;
10852     case Intrinsic::ppc_vsx_stxvd2x_be:
10853       VT = MVT::v2f64;
10854       break;
10855     case Intrinsic::ppc_altivec_stvebx:
10856       VT = MVT::i8;
10857       break;
10858     case Intrinsic::ppc_altivec_stvehx:
10859       VT = MVT::i16;
10860       break;
10861     case Intrinsic::ppc_altivec_stvewx:
10862       VT = MVT::i32;
10863       break;
10864     }
10865 
10866     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
10867   }
10868 
10869   return false;
10870 }
10871 
10872 // Return true is there is a nearyby consecutive load to the one provided
10873 // (regardless of alignment). We search up and down the chain, looking though
10874 // token factors and other loads (but nothing else). As a result, a true result
10875 // indicates that it is safe to create a new consecutive load adjacent to the
10876 // load provided.
10877 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
10878   SDValue Chain = LD->getChain();
10879   EVT VT = LD->getMemoryVT();
10880 
10881   SmallSet<SDNode *, 16> LoadRoots;
10882   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
10883   SmallSet<SDNode *, 16> Visited;
10884 
10885   // First, search up the chain, branching to follow all token-factor operands.
10886   // If we find a consecutive load, then we're done, otherwise, record all
10887   // nodes just above the top-level loads and token factors.
10888   while (!Queue.empty()) {
10889     SDNode *ChainNext = Queue.pop_back_val();
10890     if (!Visited.insert(ChainNext).second)
10891       continue;
10892 
10893     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
10894       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
10895         return true;
10896 
10897       if (!Visited.count(ChainLD->getChain().getNode()))
10898         Queue.push_back(ChainLD->getChain().getNode());
10899     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
10900       for (const SDUse &O : ChainNext->ops())
10901         if (!Visited.count(O.getNode()))
10902           Queue.push_back(O.getNode());
10903     } else
10904       LoadRoots.insert(ChainNext);
10905   }
10906 
10907   // Second, search down the chain, starting from the top-level nodes recorded
10908   // in the first phase. These top-level nodes are the nodes just above all
10909   // loads and token factors. Starting with their uses, recursively look though
10910   // all loads (just the chain uses) and token factors to find a consecutive
10911   // load.
10912   Visited.clear();
10913   Queue.clear();
10914 
10915   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
10916        IE = LoadRoots.end(); I != IE; ++I) {
10917     Queue.push_back(*I);
10918 
10919     while (!Queue.empty()) {
10920       SDNode *LoadRoot = Queue.pop_back_val();
10921       if (!Visited.insert(LoadRoot).second)
10922         continue;
10923 
10924       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
10925         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
10926           return true;
10927 
10928       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
10929            UE = LoadRoot->use_end(); UI != UE; ++UI)
10930         if (((isa<MemSDNode>(*UI) &&
10931             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
10932             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
10933           Queue.push_back(*UI);
10934     }
10935   }
10936 
10937   return false;
10938 }
10939 
10940 /// This function is called when we have proved that a SETCC node can be replaced
10941 /// by subtraction (and other supporting instructions) so that the result of
10942 /// comparison is kept in a GPR instead of CR. This function is purely for
10943 /// codegen purposes and has some flags to guide the codegen process.
10944 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
10945                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
10946   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
10947 
10948   // Zero extend the operands to the largest legal integer. Originally, they
10949   // must be of a strictly smaller size.
10950   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
10951                          DAG.getConstant(Size, DL, MVT::i32));
10952   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
10953                          DAG.getConstant(Size, DL, MVT::i32));
10954 
10955   // Swap if needed. Depends on the condition code.
10956   if (Swap)
10957     std::swap(Op0, Op1);
10958 
10959   // Subtract extended integers.
10960   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
10961 
10962   // Move the sign bit to the least significant position and zero out the rest.
10963   // Now the least significant bit carries the result of original comparison.
10964   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
10965                              DAG.getConstant(Size - 1, DL, MVT::i32));
10966   auto Final = Shifted;
10967 
10968   // Complement the result if needed. Based on the condition code.
10969   if (Complement)
10970     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
10971                         DAG.getConstant(1, DL, MVT::i64));
10972 
10973   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
10974 }
10975 
10976 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
10977                                                   DAGCombinerInfo &DCI) const {
10978   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
10979 
10980   SelectionDAG &DAG = DCI.DAG;
10981   SDLoc DL(N);
10982 
10983   // Size of integers being compared has a critical role in the following
10984   // analysis, so we prefer to do this when all types are legal.
10985   if (!DCI.isAfterLegalizeDAG())
10986     return SDValue();
10987 
10988   // If all users of SETCC extend its value to a legal integer type
10989   // then we replace SETCC with a subtraction
10990   for (SDNode::use_iterator UI = N->use_begin(),
10991        UE = N->use_end(); UI != UE; ++UI) {
10992     if (UI->getOpcode() != ISD::ZERO_EXTEND)
10993       return SDValue();
10994   }
10995 
10996   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
10997   auto OpSize = N->getOperand(0).getValueSizeInBits();
10998 
10999   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
11000 
11001   if (OpSize < Size) {
11002     switch (CC) {
11003     default: break;
11004     case ISD::SETULT:
11005       return generateEquivalentSub(N, Size, false, false, DL, DAG);
11006     case ISD::SETULE:
11007       return generateEquivalentSub(N, Size, true, true, DL, DAG);
11008     case ISD::SETUGT:
11009       return generateEquivalentSub(N, Size, false, true, DL, DAG);
11010     case ISD::SETUGE:
11011       return generateEquivalentSub(N, Size, true, false, DL, DAG);
11012     }
11013   }
11014 
11015   return SDValue();
11016 }
11017 
11018 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
11019                                                   DAGCombinerInfo &DCI) const {
11020   SelectionDAG &DAG = DCI.DAG;
11021   SDLoc dl(N);
11022 
11023   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
11024   // If we're tracking CR bits, we need to be careful that we don't have:
11025   //   trunc(binary-ops(zext(x), zext(y)))
11026   // or
11027   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
11028   // such that we're unnecessarily moving things into GPRs when it would be
11029   // better to keep them in CR bits.
11030 
11031   // Note that trunc here can be an actual i1 trunc, or can be the effective
11032   // truncation that comes from a setcc or select_cc.
11033   if (N->getOpcode() == ISD::TRUNCATE &&
11034       N->getValueType(0) != MVT::i1)
11035     return SDValue();
11036 
11037   if (N->getOperand(0).getValueType() != MVT::i32 &&
11038       N->getOperand(0).getValueType() != MVT::i64)
11039     return SDValue();
11040 
11041   if (N->getOpcode() == ISD::SETCC ||
11042       N->getOpcode() == ISD::SELECT_CC) {
11043     // If we're looking at a comparison, then we need to make sure that the
11044     // high bits (all except for the first) don't matter the result.
11045     ISD::CondCode CC =
11046       cast<CondCodeSDNode>(N->getOperand(
11047         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
11048     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
11049 
11050     if (ISD::isSignedIntSetCC(CC)) {
11051       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
11052           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
11053         return SDValue();
11054     } else if (ISD::isUnsignedIntSetCC(CC)) {
11055       if (!DAG.MaskedValueIsZero(N->getOperand(0),
11056                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
11057           !DAG.MaskedValueIsZero(N->getOperand(1),
11058                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
11059         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
11060                                              : SDValue());
11061     } else {
11062       // This is neither a signed nor an unsigned comparison, just make sure
11063       // that the high bits are equal.
11064       KnownBits Op1Known, Op2Known;
11065       DAG.computeKnownBits(N->getOperand(0), Op1Known);
11066       DAG.computeKnownBits(N->getOperand(1), Op2Known);
11067 
11068       // We don't really care about what is known about the first bit (if
11069       // anything), so clear it in all masks prior to comparing them.
11070       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
11071       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
11072 
11073       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
11074         return SDValue();
11075     }
11076   }
11077 
11078   // We now know that the higher-order bits are irrelevant, we just need to
11079   // make sure that all of the intermediate operations are bit operations, and
11080   // all inputs are extensions.
11081   if (N->getOperand(0).getOpcode() != ISD::AND &&
11082       N->getOperand(0).getOpcode() != ISD::OR  &&
11083       N->getOperand(0).getOpcode() != ISD::XOR &&
11084       N->getOperand(0).getOpcode() != ISD::SELECT &&
11085       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
11086       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
11087       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
11088       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
11089       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
11090     return SDValue();
11091 
11092   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
11093       N->getOperand(1).getOpcode() != ISD::AND &&
11094       N->getOperand(1).getOpcode() != ISD::OR  &&
11095       N->getOperand(1).getOpcode() != ISD::XOR &&
11096       N->getOperand(1).getOpcode() != ISD::SELECT &&
11097       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
11098       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
11099       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
11100       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
11101       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
11102     return SDValue();
11103 
11104   SmallVector<SDValue, 4> Inputs;
11105   SmallVector<SDValue, 8> BinOps, PromOps;
11106   SmallPtrSet<SDNode *, 16> Visited;
11107 
11108   for (unsigned i = 0; i < 2; ++i) {
11109     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11110           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11111           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
11112           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
11113         isa<ConstantSDNode>(N->getOperand(i)))
11114       Inputs.push_back(N->getOperand(i));
11115     else
11116       BinOps.push_back(N->getOperand(i));
11117 
11118     if (N->getOpcode() == ISD::TRUNCATE)
11119       break;
11120   }
11121 
11122   // Visit all inputs, collect all binary operations (and, or, xor and
11123   // select) that are all fed by extensions.
11124   while (!BinOps.empty()) {
11125     SDValue BinOp = BinOps.back();
11126     BinOps.pop_back();
11127 
11128     if (!Visited.insert(BinOp.getNode()).second)
11129       continue;
11130 
11131     PromOps.push_back(BinOp);
11132 
11133     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
11134       // The condition of the select is not promoted.
11135       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
11136         continue;
11137       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
11138         continue;
11139 
11140       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11141             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11142             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
11143            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
11144           isa<ConstantSDNode>(BinOp.getOperand(i))) {
11145         Inputs.push_back(BinOp.getOperand(i));
11146       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
11147                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
11148                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
11149                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
11150                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
11151                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
11152                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11153                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11154                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
11155         BinOps.push_back(BinOp.getOperand(i));
11156       } else {
11157         // We have an input that is not an extension or another binary
11158         // operation; we'll abort this transformation.
11159         return SDValue();
11160       }
11161     }
11162   }
11163 
11164   // Make sure that this is a self-contained cluster of operations (which
11165   // is not quite the same thing as saying that everything has only one
11166   // use).
11167   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11168     if (isa<ConstantSDNode>(Inputs[i]))
11169       continue;
11170 
11171     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
11172                               UE = Inputs[i].getNode()->use_end();
11173          UI != UE; ++UI) {
11174       SDNode *User = *UI;
11175       if (User != N && !Visited.count(User))
11176         return SDValue();
11177 
11178       // Make sure that we're not going to promote the non-output-value
11179       // operand(s) or SELECT or SELECT_CC.
11180       // FIXME: Although we could sometimes handle this, and it does occur in
11181       // practice that one of the condition inputs to the select is also one of
11182       // the outputs, we currently can't deal with this.
11183       if (User->getOpcode() == ISD::SELECT) {
11184         if (User->getOperand(0) == Inputs[i])
11185           return SDValue();
11186       } else if (User->getOpcode() == ISD::SELECT_CC) {
11187         if (User->getOperand(0) == Inputs[i] ||
11188             User->getOperand(1) == Inputs[i])
11189           return SDValue();
11190       }
11191     }
11192   }
11193 
11194   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
11195     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
11196                               UE = PromOps[i].getNode()->use_end();
11197          UI != UE; ++UI) {
11198       SDNode *User = *UI;
11199       if (User != N && !Visited.count(User))
11200         return SDValue();
11201 
11202       // Make sure that we're not going to promote the non-output-value
11203       // operand(s) or SELECT or SELECT_CC.
11204       // FIXME: Although we could sometimes handle this, and it does occur in
11205       // practice that one of the condition inputs to the select is also one of
11206       // the outputs, we currently can't deal with this.
11207       if (User->getOpcode() == ISD::SELECT) {
11208         if (User->getOperand(0) == PromOps[i])
11209           return SDValue();
11210       } else if (User->getOpcode() == ISD::SELECT_CC) {
11211         if (User->getOperand(0) == PromOps[i] ||
11212             User->getOperand(1) == PromOps[i])
11213           return SDValue();
11214       }
11215     }
11216   }
11217 
11218   // Replace all inputs with the extension operand.
11219   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11220     // Constants may have users outside the cluster of to-be-promoted nodes,
11221     // and so we need to replace those as we do the promotions.
11222     if (isa<ConstantSDNode>(Inputs[i]))
11223       continue;
11224     else
11225       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
11226   }
11227 
11228   std::list<HandleSDNode> PromOpHandles;
11229   for (auto &PromOp : PromOps)
11230     PromOpHandles.emplace_back(PromOp);
11231 
11232   // Replace all operations (these are all the same, but have a different
11233   // (i1) return type). DAG.getNode will validate that the types of
11234   // a binary operator match, so go through the list in reverse so that
11235   // we've likely promoted both operands first. Any intermediate truncations or
11236   // extensions disappear.
11237   while (!PromOpHandles.empty()) {
11238     SDValue PromOp = PromOpHandles.back().getValue();
11239     PromOpHandles.pop_back();
11240 
11241     if (PromOp.getOpcode() == ISD::TRUNCATE ||
11242         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
11243         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
11244         PromOp.getOpcode() == ISD::ANY_EXTEND) {
11245       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
11246           PromOp.getOperand(0).getValueType() != MVT::i1) {
11247         // The operand is not yet ready (see comment below).
11248         PromOpHandles.emplace_front(PromOp);
11249         continue;
11250       }
11251 
11252       SDValue RepValue = PromOp.getOperand(0);
11253       if (isa<ConstantSDNode>(RepValue))
11254         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
11255 
11256       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
11257       continue;
11258     }
11259 
11260     unsigned C;
11261     switch (PromOp.getOpcode()) {
11262     default:             C = 0; break;
11263     case ISD::SELECT:    C = 1; break;
11264     case ISD::SELECT_CC: C = 2; break;
11265     }
11266 
11267     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
11268          PromOp.getOperand(C).getValueType() != MVT::i1) ||
11269         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
11270          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
11271       // The to-be-promoted operands of this node have not yet been
11272       // promoted (this should be rare because we're going through the
11273       // list backward, but if one of the operands has several users in
11274       // this cluster of to-be-promoted nodes, it is possible).
11275       PromOpHandles.emplace_front(PromOp);
11276       continue;
11277     }
11278 
11279     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
11280                                 PromOp.getNode()->op_end());
11281 
11282     // If there are any constant inputs, make sure they're replaced now.
11283     for (unsigned i = 0; i < 2; ++i)
11284       if (isa<ConstantSDNode>(Ops[C+i]))
11285         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
11286 
11287     DAG.ReplaceAllUsesOfValueWith(PromOp,
11288       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
11289   }
11290 
11291   // Now we're left with the initial truncation itself.
11292   if (N->getOpcode() == ISD::TRUNCATE)
11293     return N->getOperand(0);
11294 
11295   // Otherwise, this is a comparison. The operands to be compared have just
11296   // changed type (to i1), but everything else is the same.
11297   return SDValue(N, 0);
11298 }
11299 
11300 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
11301                                                   DAGCombinerInfo &DCI) const {
11302   SelectionDAG &DAG = DCI.DAG;
11303   SDLoc dl(N);
11304 
11305   // If we're tracking CR bits, we need to be careful that we don't have:
11306   //   zext(binary-ops(trunc(x), trunc(y)))
11307   // or
11308   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
11309   // such that we're unnecessarily moving things into CR bits that can more
11310   // efficiently stay in GPRs. Note that if we're not certain that the high
11311   // bits are set as required by the final extension, we still may need to do
11312   // some masking to get the proper behavior.
11313 
11314   // This same functionality is important on PPC64 when dealing with
11315   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
11316   // the return values of functions. Because it is so similar, it is handled
11317   // here as well.
11318 
11319   if (N->getValueType(0) != MVT::i32 &&
11320       N->getValueType(0) != MVT::i64)
11321     return SDValue();
11322 
11323   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
11324         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
11325     return SDValue();
11326 
11327   if (N->getOperand(0).getOpcode() != ISD::AND &&
11328       N->getOperand(0).getOpcode() != ISD::OR  &&
11329       N->getOperand(0).getOpcode() != ISD::XOR &&
11330       N->getOperand(0).getOpcode() != ISD::SELECT &&
11331       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
11332     return SDValue();
11333 
11334   SmallVector<SDValue, 4> Inputs;
11335   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
11336   SmallPtrSet<SDNode *, 16> Visited;
11337 
11338   // Visit all inputs, collect all binary operations (and, or, xor and
11339   // select) that are all fed by truncations.
11340   while (!BinOps.empty()) {
11341     SDValue BinOp = BinOps.back();
11342     BinOps.pop_back();
11343 
11344     if (!Visited.insert(BinOp.getNode()).second)
11345       continue;
11346 
11347     PromOps.push_back(BinOp);
11348 
11349     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
11350       // The condition of the select is not promoted.
11351       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
11352         continue;
11353       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
11354         continue;
11355 
11356       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
11357           isa<ConstantSDNode>(BinOp.getOperand(i))) {
11358         Inputs.push_back(BinOp.getOperand(i));
11359       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
11360                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
11361                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
11362                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
11363                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
11364         BinOps.push_back(BinOp.getOperand(i));
11365       } else {
11366         // We have an input that is not a truncation or another binary
11367         // operation; we'll abort this transformation.
11368         return SDValue();
11369       }
11370     }
11371   }
11372 
11373   // The operands of a select that must be truncated when the select is
11374   // promoted because the operand is actually part of the to-be-promoted set.
11375   DenseMap<SDNode *, EVT> SelectTruncOp[2];
11376 
11377   // Make sure that this is a self-contained cluster of operations (which
11378   // is not quite the same thing as saying that everything has only one
11379   // use).
11380   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11381     if (isa<ConstantSDNode>(Inputs[i]))
11382       continue;
11383 
11384     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
11385                               UE = Inputs[i].getNode()->use_end();
11386          UI != UE; ++UI) {
11387       SDNode *User = *UI;
11388       if (User != N && !Visited.count(User))
11389         return SDValue();
11390 
11391       // If we're going to promote the non-output-value operand(s) or SELECT or
11392       // SELECT_CC, record them for truncation.
11393       if (User->getOpcode() == ISD::SELECT) {
11394         if (User->getOperand(0) == Inputs[i])
11395           SelectTruncOp[0].insert(std::make_pair(User,
11396                                     User->getOperand(0).getValueType()));
11397       } else if (User->getOpcode() == ISD::SELECT_CC) {
11398         if (User->getOperand(0) == Inputs[i])
11399           SelectTruncOp[0].insert(std::make_pair(User,
11400                                     User->getOperand(0).getValueType()));
11401         if (User->getOperand(1) == Inputs[i])
11402           SelectTruncOp[1].insert(std::make_pair(User,
11403                                     User->getOperand(1).getValueType()));
11404       }
11405     }
11406   }
11407 
11408   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
11409     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
11410                               UE = PromOps[i].getNode()->use_end();
11411          UI != UE; ++UI) {
11412       SDNode *User = *UI;
11413       if (User != N && !Visited.count(User))
11414         return SDValue();
11415 
11416       // If we're going to promote the non-output-value operand(s) or SELECT or
11417       // SELECT_CC, record them for truncation.
11418       if (User->getOpcode() == ISD::SELECT) {
11419         if (User->getOperand(0) == PromOps[i])
11420           SelectTruncOp[0].insert(std::make_pair(User,
11421                                     User->getOperand(0).getValueType()));
11422       } else if (User->getOpcode() == ISD::SELECT_CC) {
11423         if (User->getOperand(0) == PromOps[i])
11424           SelectTruncOp[0].insert(std::make_pair(User,
11425                                     User->getOperand(0).getValueType()));
11426         if (User->getOperand(1) == PromOps[i])
11427           SelectTruncOp[1].insert(std::make_pair(User,
11428                                     User->getOperand(1).getValueType()));
11429       }
11430     }
11431   }
11432 
11433   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
11434   bool ReallyNeedsExt = false;
11435   if (N->getOpcode() != ISD::ANY_EXTEND) {
11436     // If all of the inputs are not already sign/zero extended, then
11437     // we'll still need to do that at the end.
11438     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11439       if (isa<ConstantSDNode>(Inputs[i]))
11440         continue;
11441 
11442       unsigned OpBits =
11443         Inputs[i].getOperand(0).getValueSizeInBits();
11444       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
11445 
11446       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
11447            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
11448                                   APInt::getHighBitsSet(OpBits,
11449                                                         OpBits-PromBits))) ||
11450           (N->getOpcode() == ISD::SIGN_EXTEND &&
11451            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
11452              (OpBits-(PromBits-1)))) {
11453         ReallyNeedsExt = true;
11454         break;
11455       }
11456     }
11457   }
11458 
11459   // Replace all inputs, either with the truncation operand, or a
11460   // truncation or extension to the final output type.
11461   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11462     // Constant inputs need to be replaced with the to-be-promoted nodes that
11463     // use them because they might have users outside of the cluster of
11464     // promoted nodes.
11465     if (isa<ConstantSDNode>(Inputs[i]))
11466       continue;
11467 
11468     SDValue InSrc = Inputs[i].getOperand(0);
11469     if (Inputs[i].getValueType() == N->getValueType(0))
11470       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
11471     else if (N->getOpcode() == ISD::SIGN_EXTEND)
11472       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
11473         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
11474     else if (N->getOpcode() == ISD::ZERO_EXTEND)
11475       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
11476         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
11477     else
11478       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
11479         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
11480   }
11481 
11482   std::list<HandleSDNode> PromOpHandles;
11483   for (auto &PromOp : PromOps)
11484     PromOpHandles.emplace_back(PromOp);
11485 
11486   // Replace all operations (these are all the same, but have a different
11487   // (promoted) return type). DAG.getNode will validate that the types of
11488   // a binary operator match, so go through the list in reverse so that
11489   // we've likely promoted both operands first.
11490   while (!PromOpHandles.empty()) {
11491     SDValue PromOp = PromOpHandles.back().getValue();
11492     PromOpHandles.pop_back();
11493 
11494     unsigned C;
11495     switch (PromOp.getOpcode()) {
11496     default:             C = 0; break;
11497     case ISD::SELECT:    C = 1; break;
11498     case ISD::SELECT_CC: C = 2; break;
11499     }
11500 
11501     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
11502          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
11503         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
11504          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
11505       // The to-be-promoted operands of this node have not yet been
11506       // promoted (this should be rare because we're going through the
11507       // list backward, but if one of the operands has several users in
11508       // this cluster of to-be-promoted nodes, it is possible).
11509       PromOpHandles.emplace_front(PromOp);
11510       continue;
11511     }
11512 
11513     // For SELECT and SELECT_CC nodes, we do a similar check for any
11514     // to-be-promoted comparison inputs.
11515     if (PromOp.getOpcode() == ISD::SELECT ||
11516         PromOp.getOpcode() == ISD::SELECT_CC) {
11517       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
11518            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
11519           (SelectTruncOp[1].count(PromOp.getNode()) &&
11520            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
11521         PromOpHandles.emplace_front(PromOp);
11522         continue;
11523       }
11524     }
11525 
11526     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
11527                                 PromOp.getNode()->op_end());
11528 
11529     // If this node has constant inputs, then they'll need to be promoted here.
11530     for (unsigned i = 0; i < 2; ++i) {
11531       if (!isa<ConstantSDNode>(Ops[C+i]))
11532         continue;
11533       if (Ops[C+i].getValueType() == N->getValueType(0))
11534         continue;
11535 
11536       if (N->getOpcode() == ISD::SIGN_EXTEND)
11537         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
11538       else if (N->getOpcode() == ISD::ZERO_EXTEND)
11539         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
11540       else
11541         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
11542     }
11543 
11544     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
11545     // truncate them again to the original value type.
11546     if (PromOp.getOpcode() == ISD::SELECT ||
11547         PromOp.getOpcode() == ISD::SELECT_CC) {
11548       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
11549       if (SI0 != SelectTruncOp[0].end())
11550         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
11551       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
11552       if (SI1 != SelectTruncOp[1].end())
11553         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
11554     }
11555 
11556     DAG.ReplaceAllUsesOfValueWith(PromOp,
11557       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
11558   }
11559 
11560   // Now we're left with the initial extension itself.
11561   if (!ReallyNeedsExt)
11562     return N->getOperand(0);
11563 
11564   // To zero extend, just mask off everything except for the first bit (in the
11565   // i1 case).
11566   if (N->getOpcode() == ISD::ZERO_EXTEND)
11567     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
11568                        DAG.getConstant(APInt::getLowBitsSet(
11569                                          N->getValueSizeInBits(0), PromBits),
11570                                        dl, N->getValueType(0)));
11571 
11572   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
11573          "Invalid extension type");
11574   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
11575   SDValue ShiftCst =
11576       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
11577   return DAG.getNode(
11578       ISD::SRA, dl, N->getValueType(0),
11579       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
11580       ShiftCst);
11581 }
11582 
11583 /// \brief Reduces the number of fp-to-int conversion when building a vector.
11584 ///
11585 /// If this vector is built out of floating to integer conversions,
11586 /// transform it to a vector built out of floating point values followed by a
11587 /// single floating to integer conversion of the vector.
11588 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
11589 /// becomes (fptosi (build_vector ($A, $B, ...)))
11590 SDValue PPCTargetLowering::
11591 combineElementTruncationToVectorTruncation(SDNode *N,
11592                                            DAGCombinerInfo &DCI) const {
11593   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
11594          "Should be called with a BUILD_VECTOR node");
11595 
11596   SelectionDAG &DAG = DCI.DAG;
11597   SDLoc dl(N);
11598 
11599   SDValue FirstInput = N->getOperand(0);
11600   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
11601          "The input operand must be an fp-to-int conversion.");
11602 
11603   // This combine happens after legalization so the fp_to_[su]i nodes are
11604   // already converted to PPCSISD nodes.
11605   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
11606   if (FirstConversion == PPCISD::FCTIDZ ||
11607       FirstConversion == PPCISD::FCTIDUZ ||
11608       FirstConversion == PPCISD::FCTIWZ ||
11609       FirstConversion == PPCISD::FCTIWUZ) {
11610     bool IsSplat = true;
11611     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
11612       FirstConversion == PPCISD::FCTIWUZ;
11613     EVT SrcVT = FirstInput.getOperand(0).getValueType();
11614     SmallVector<SDValue, 4> Ops;
11615     EVT TargetVT = N->getValueType(0);
11616     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
11617       if (N->getOperand(i).getOpcode() != PPCISD::MFVSR)
11618         return SDValue();
11619       unsigned NextConversion = N->getOperand(i).getOperand(0).getOpcode();
11620       if (NextConversion != FirstConversion)
11621         return SDValue();
11622       if (N->getOperand(i) != FirstInput)
11623         IsSplat = false;
11624     }
11625 
11626     // If this is a splat, we leave it as-is since there will be only a single
11627     // fp-to-int conversion followed by a splat of the integer. This is better
11628     // for 32-bit and smaller ints and neutral for 64-bit ints.
11629     if (IsSplat)
11630       return SDValue();
11631 
11632     // Now that we know we have the right type of node, get its operands
11633     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
11634       SDValue In = N->getOperand(i).getOperand(0);
11635       // For 32-bit values, we need to add an FP_ROUND node.
11636       if (Is32Bit) {
11637         if (In.isUndef())
11638           Ops.push_back(DAG.getUNDEF(SrcVT));
11639         else {
11640           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
11641                                       MVT::f32, In.getOperand(0),
11642                                       DAG.getIntPtrConstant(1, dl));
11643           Ops.push_back(Trunc);
11644         }
11645       } else
11646         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
11647     }
11648 
11649     unsigned Opcode;
11650     if (FirstConversion == PPCISD::FCTIDZ ||
11651         FirstConversion == PPCISD::FCTIWZ)
11652       Opcode = ISD::FP_TO_SINT;
11653     else
11654       Opcode = ISD::FP_TO_UINT;
11655 
11656     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
11657     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
11658     return DAG.getNode(Opcode, dl, TargetVT, BV);
11659   }
11660   return SDValue();
11661 }
11662 
11663 /// \brief Reduce the number of loads when building a vector.
11664 ///
11665 /// Building a vector out of multiple loads can be converted to a load
11666 /// of the vector type if the loads are consecutive. If the loads are
11667 /// consecutive but in descending order, a shuffle is added at the end
11668 /// to reorder the vector.
11669 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
11670   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
11671          "Should be called with a BUILD_VECTOR node");
11672 
11673   SDLoc dl(N);
11674   bool InputsAreConsecutiveLoads = true;
11675   bool InputsAreReverseConsecutive = true;
11676   unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8;
11677   SDValue FirstInput = N->getOperand(0);
11678   bool IsRoundOfExtLoad = false;
11679 
11680   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
11681       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
11682     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
11683     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
11684   }
11685   // Not a build vector of (possibly fp_rounded) loads.
11686   if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD)
11687     return SDValue();
11688 
11689   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
11690     // If any inputs are fp_round(extload), they all must be.
11691     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
11692       return SDValue();
11693 
11694     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
11695       N->getOperand(i);
11696     if (NextInput.getOpcode() != ISD::LOAD)
11697       return SDValue();
11698 
11699     SDValue PreviousInput =
11700       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
11701     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
11702     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
11703 
11704     // If any inputs are fp_round(extload), they all must be.
11705     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
11706       return SDValue();
11707 
11708     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
11709       InputsAreConsecutiveLoads = false;
11710     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
11711       InputsAreReverseConsecutive = false;
11712 
11713     // Exit early if the loads are neither consecutive nor reverse consecutive.
11714     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
11715       return SDValue();
11716   }
11717 
11718   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
11719          "The loads cannot be both consecutive and reverse consecutive.");
11720 
11721   SDValue FirstLoadOp =
11722     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
11723   SDValue LastLoadOp =
11724     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
11725                        N->getOperand(N->getNumOperands()-1);
11726 
11727   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
11728   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
11729   if (InputsAreConsecutiveLoads) {
11730     assert(LD1 && "Input needs to be a LoadSDNode.");
11731     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
11732                        LD1->getBasePtr(), LD1->getPointerInfo(),
11733                        LD1->getAlignment());
11734   }
11735   if (InputsAreReverseConsecutive) {
11736     assert(LDL && "Input needs to be a LoadSDNode.");
11737     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
11738                                LDL->getBasePtr(), LDL->getPointerInfo(),
11739                                LDL->getAlignment());
11740     SmallVector<int, 16> Ops;
11741     for (int i = N->getNumOperands() - 1; i >= 0; i--)
11742       Ops.push_back(i);
11743 
11744     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
11745                                 DAG.getUNDEF(N->getValueType(0)), Ops);
11746   }
11747   return SDValue();
11748 }
11749 
11750 // This function adds the required vector_shuffle needed to get
11751 // the elements of the vector extract in the correct position
11752 // as specified by the CorrectElems encoding.
11753 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
11754                                       SDValue Input, uint64_t Elems,
11755                                       uint64_t CorrectElems) {
11756   SDLoc dl(N);
11757 
11758   unsigned NumElems = Input.getValueType().getVectorNumElements();
11759   SmallVector<int, 16> ShuffleMask(NumElems, -1);
11760 
11761   // Knowing the element indices being extracted from the original
11762   // vector and the order in which they're being inserted, just put
11763   // them at element indices required for the instruction.
11764   for (unsigned i = 0; i < N->getNumOperands(); i++) {
11765     if (DAG.getDataLayout().isLittleEndian())
11766       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
11767     else
11768       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
11769     CorrectElems = CorrectElems >> 8;
11770     Elems = Elems >> 8;
11771   }
11772 
11773   SDValue Shuffle =
11774       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
11775                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
11776 
11777   EVT Ty = N->getValueType(0);
11778   SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle);
11779   return BV;
11780 }
11781 
11782 // Look for build vector patterns where input operands come from sign
11783 // extended vector_extract elements of specific indices. If the correct indices
11784 // aren't used, add a vector shuffle to fix up the indices and create a new
11785 // PPCISD:SExtVElems node which selects the vector sign extend instructions
11786 // during instruction selection.
11787 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
11788   // This array encodes the indices that the vector sign extend instructions
11789   // extract from when extending from one type to another for both BE and LE.
11790   // The right nibble of each byte corresponds to the LE incides.
11791   // and the left nibble of each byte corresponds to the BE incides.
11792   // For example: 0x3074B8FC  byte->word
11793   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
11794   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
11795   // For example: 0x000070F8  byte->double word
11796   // For LE: the allowed indices are: 0x0,0x8
11797   // For BE: the allowed indices are: 0x7,0xF
11798   uint64_t TargetElems[] = {
11799       0x3074B8FC, // b->w
11800       0x000070F8, // b->d
11801       0x10325476, // h->w
11802       0x00003074, // h->d
11803       0x00001032, // w->d
11804   };
11805 
11806   uint64_t Elems = 0;
11807   int Index;
11808   SDValue Input;
11809 
11810   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
11811     if (!Op)
11812       return false;
11813     if (Op.getOpcode() != ISD::SIGN_EXTEND)
11814       return false;
11815 
11816     SDValue Extract = Op.getOperand(0);
11817     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11818       return false;
11819 
11820     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
11821     if (!ExtOp)
11822       return false;
11823 
11824     Index = ExtOp->getZExtValue();
11825     if (Input && Input != Extract.getOperand(0))
11826       return false;
11827 
11828     if (!Input)
11829       Input = Extract.getOperand(0);
11830 
11831     Elems = Elems << 8;
11832     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
11833     Elems |= Index;
11834 
11835     return true;
11836   };
11837 
11838   // If the build vector operands aren't sign extended vector extracts,
11839   // of the same input vector, then return.
11840   for (unsigned i = 0; i < N->getNumOperands(); i++) {
11841     if (!isSExtOfVecExtract(N->getOperand(i))) {
11842       return SDValue();
11843     }
11844   }
11845 
11846   // If the vector extract indicies are not correct, add the appropriate
11847   // vector_shuffle.
11848   int TgtElemArrayIdx;
11849   int InputSize = Input.getValueType().getScalarSizeInBits();
11850   int OutputSize = N->getValueType(0).getScalarSizeInBits();
11851   if (InputSize + OutputSize == 40)
11852     TgtElemArrayIdx = 0;
11853   else if (InputSize + OutputSize == 72)
11854     TgtElemArrayIdx = 1;
11855   else if (InputSize + OutputSize == 48)
11856     TgtElemArrayIdx = 2;
11857   else if (InputSize + OutputSize == 80)
11858     TgtElemArrayIdx = 3;
11859   else if (InputSize + OutputSize == 96)
11860     TgtElemArrayIdx = 4;
11861   else
11862     return SDValue();
11863 
11864   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
11865   CorrectElems = DAG.getDataLayout().isLittleEndian()
11866                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
11867                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
11868   if (Elems != CorrectElems) {
11869     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
11870   }
11871 
11872   // Regular lowering will catch cases where a shuffle is not needed.
11873   return SDValue();
11874 }
11875 
11876 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
11877                                                  DAGCombinerInfo &DCI) const {
11878   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
11879          "Should be called with a BUILD_VECTOR node");
11880 
11881   SelectionDAG &DAG = DCI.DAG;
11882   SDLoc dl(N);
11883 
11884   if (!Subtarget.hasVSX())
11885     return SDValue();
11886 
11887   // The target independent DAG combiner will leave a build_vector of
11888   // float-to-int conversions intact. We can generate MUCH better code for
11889   // a float-to-int conversion of a vector of floats.
11890   SDValue FirstInput = N->getOperand(0);
11891   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
11892     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
11893     if (Reduced)
11894       return Reduced;
11895   }
11896 
11897   // If we're building a vector out of consecutive loads, just load that
11898   // vector type.
11899   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
11900   if (Reduced)
11901     return Reduced;
11902 
11903   // If we're building a vector out of extended elements from another vector
11904   // we have P9 vector integer extend instructions.
11905   if (Subtarget.hasP9Altivec()) {
11906     Reduced = combineBVOfVecSExt(N, DAG);
11907     if (Reduced)
11908       return Reduced;
11909   }
11910 
11911 
11912   if (N->getValueType(0) != MVT::v2f64)
11913     return SDValue();
11914 
11915   // Looking for:
11916   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
11917   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
11918       FirstInput.getOpcode() != ISD::UINT_TO_FP)
11919     return SDValue();
11920   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
11921       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
11922     return SDValue();
11923   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
11924     return SDValue();
11925 
11926   SDValue Ext1 = FirstInput.getOperand(0);
11927   SDValue Ext2 = N->getOperand(1).getOperand(0);
11928   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
11929      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11930     return SDValue();
11931 
11932   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
11933   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
11934   if (!Ext1Op || !Ext2Op)
11935     return SDValue();
11936   if (Ext1.getValueType() != MVT::i32 ||
11937       Ext2.getValueType() != MVT::i32)
11938   if (Ext1.getOperand(0) != Ext2.getOperand(0))
11939     return SDValue();
11940 
11941   int FirstElem = Ext1Op->getZExtValue();
11942   int SecondElem = Ext2Op->getZExtValue();
11943   int SubvecIdx;
11944   if (FirstElem == 0 && SecondElem == 1)
11945     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
11946   else if (FirstElem == 2 && SecondElem == 3)
11947     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
11948   else
11949     return SDValue();
11950 
11951   SDValue SrcVec = Ext1.getOperand(0);
11952   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
11953     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
11954   return DAG.getNode(NodeType, dl, MVT::v2f64,
11955                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
11956 }
11957 
11958 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
11959                                               DAGCombinerInfo &DCI) const {
11960   assert((N->getOpcode() == ISD::SINT_TO_FP ||
11961           N->getOpcode() == ISD::UINT_TO_FP) &&
11962          "Need an int -> FP conversion node here");
11963 
11964   if (useSoftFloat() || !Subtarget.has64BitSupport())
11965     return SDValue();
11966 
11967   SelectionDAG &DAG = DCI.DAG;
11968   SDLoc dl(N);
11969   SDValue Op(N, 0);
11970 
11971   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
11972   // from the hardware.
11973   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
11974     return SDValue();
11975   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
11976       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
11977     return SDValue();
11978 
11979   SDValue FirstOperand(Op.getOperand(0));
11980   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
11981     (FirstOperand.getValueType() == MVT::i8 ||
11982      FirstOperand.getValueType() == MVT::i16);
11983   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
11984     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
11985     bool DstDouble = Op.getValueType() == MVT::f64;
11986     unsigned ConvOp = Signed ?
11987       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
11988       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
11989     SDValue WidthConst =
11990       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
11991                             dl, false);
11992     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
11993     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
11994     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
11995                                          DAG.getVTList(MVT::f64, MVT::Other),
11996                                          Ops, MVT::i8, LDN->getMemOperand());
11997 
11998     // For signed conversion, we need to sign-extend the value in the VSR
11999     if (Signed) {
12000       SDValue ExtOps[] = { Ld, WidthConst };
12001       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
12002       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
12003     } else
12004       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
12005   }
12006 
12007 
12008   // For i32 intermediate values, unfortunately, the conversion functions
12009   // leave the upper 32 bits of the value are undefined. Within the set of
12010   // scalar instructions, we have no method for zero- or sign-extending the
12011   // value. Thus, we cannot handle i32 intermediate values here.
12012   if (Op.getOperand(0).getValueType() == MVT::i32)
12013     return SDValue();
12014 
12015   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
12016          "UINT_TO_FP is supported only with FPCVT");
12017 
12018   // If we have FCFIDS, then use it when converting to single-precision.
12019   // Otherwise, convert to double-precision and then round.
12020   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
12021                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
12022                                                             : PPCISD::FCFIDS)
12023                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
12024                                                             : PPCISD::FCFID);
12025   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
12026                   ? MVT::f32
12027                   : MVT::f64;
12028 
12029   // If we're converting from a float, to an int, and back to a float again,
12030   // then we don't need the store/load pair at all.
12031   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
12032        Subtarget.hasFPCVT()) ||
12033       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
12034     SDValue Src = Op.getOperand(0).getOperand(0);
12035     if (Src.getValueType() == MVT::f32) {
12036       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
12037       DCI.AddToWorklist(Src.getNode());
12038     } else if (Src.getValueType() != MVT::f64) {
12039       // Make sure that we don't pick up a ppc_fp128 source value.
12040       return SDValue();
12041     }
12042 
12043     unsigned FCTOp =
12044       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
12045                                                         PPCISD::FCTIDUZ;
12046 
12047     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
12048     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
12049 
12050     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
12051       FP = DAG.getNode(ISD::FP_ROUND, dl,
12052                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
12053       DCI.AddToWorklist(FP.getNode());
12054     }
12055 
12056     return FP;
12057   }
12058 
12059   return SDValue();
12060 }
12061 
12062 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
12063 // builtins) into loads with swaps.
12064 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
12065                                               DAGCombinerInfo &DCI) const {
12066   SelectionDAG &DAG = DCI.DAG;
12067   SDLoc dl(N);
12068   SDValue Chain;
12069   SDValue Base;
12070   MachineMemOperand *MMO;
12071 
12072   switch (N->getOpcode()) {
12073   default:
12074     llvm_unreachable("Unexpected opcode for little endian VSX load");
12075   case ISD::LOAD: {
12076     LoadSDNode *LD = cast<LoadSDNode>(N);
12077     Chain = LD->getChain();
12078     Base = LD->getBasePtr();
12079     MMO = LD->getMemOperand();
12080     // If the MMO suggests this isn't a load of a full vector, leave
12081     // things alone.  For a built-in, we have to make the change for
12082     // correctness, so if there is a size problem that will be a bug.
12083     if (MMO->getSize() < 16)
12084       return SDValue();
12085     break;
12086   }
12087   case ISD::INTRINSIC_W_CHAIN: {
12088     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
12089     Chain = Intrin->getChain();
12090     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
12091     // us what we want. Get operand 2 instead.
12092     Base = Intrin->getOperand(2);
12093     MMO = Intrin->getMemOperand();
12094     break;
12095   }
12096   }
12097 
12098   MVT VecTy = N->getValueType(0).getSimpleVT();
12099 
12100   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
12101   // aligned and the type is a vector with elements up to 4 bytes
12102   if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
12103       && VecTy.getScalarSizeInBits() <= 32 ) {
12104     return SDValue();
12105   }
12106 
12107   SDValue LoadOps[] = { Chain, Base };
12108   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
12109                                          DAG.getVTList(MVT::v2f64, MVT::Other),
12110                                          LoadOps, MVT::v2f64, MMO);
12111 
12112   DCI.AddToWorklist(Load.getNode());
12113   Chain = Load.getValue(1);
12114   SDValue Swap = DAG.getNode(
12115       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
12116   DCI.AddToWorklist(Swap.getNode());
12117 
12118   // Add a bitcast if the resulting load type doesn't match v2f64.
12119   if (VecTy != MVT::v2f64) {
12120     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
12121     DCI.AddToWorklist(N.getNode());
12122     // Package {bitcast value, swap's chain} to match Load's shape.
12123     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
12124                        N, Swap.getValue(1));
12125   }
12126 
12127   return Swap;
12128 }
12129 
12130 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
12131 // builtins) into stores with swaps.
12132 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
12133                                                DAGCombinerInfo &DCI) const {
12134   SelectionDAG &DAG = DCI.DAG;
12135   SDLoc dl(N);
12136   SDValue Chain;
12137   SDValue Base;
12138   unsigned SrcOpnd;
12139   MachineMemOperand *MMO;
12140 
12141   switch (N->getOpcode()) {
12142   default:
12143     llvm_unreachable("Unexpected opcode for little endian VSX store");
12144   case ISD::STORE: {
12145     StoreSDNode *ST = cast<StoreSDNode>(N);
12146     Chain = ST->getChain();
12147     Base = ST->getBasePtr();
12148     MMO = ST->getMemOperand();
12149     SrcOpnd = 1;
12150     // If the MMO suggests this isn't a store of a full vector, leave
12151     // things alone.  For a built-in, we have to make the change for
12152     // correctness, so if there is a size problem that will be a bug.
12153     if (MMO->getSize() < 16)
12154       return SDValue();
12155     break;
12156   }
12157   case ISD::INTRINSIC_VOID: {
12158     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
12159     Chain = Intrin->getChain();
12160     // Intrin->getBasePtr() oddly does not get what we want.
12161     Base = Intrin->getOperand(3);
12162     MMO = Intrin->getMemOperand();
12163     SrcOpnd = 2;
12164     break;
12165   }
12166   }
12167 
12168   SDValue Src = N->getOperand(SrcOpnd);
12169   MVT VecTy = Src.getValueType().getSimpleVT();
12170 
12171   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
12172   // aligned and the type is a vector with elements up to 4 bytes
12173   if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
12174       && VecTy.getScalarSizeInBits() <= 32 ) {
12175     return SDValue();
12176   }
12177 
12178   // All stores are done as v2f64 and possible bit cast.
12179   if (VecTy != MVT::v2f64) {
12180     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
12181     DCI.AddToWorklist(Src.getNode());
12182   }
12183 
12184   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
12185                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
12186   DCI.AddToWorklist(Swap.getNode());
12187   Chain = Swap.getValue(1);
12188   SDValue StoreOps[] = { Chain, Swap, Base };
12189   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
12190                                           DAG.getVTList(MVT::Other),
12191                                           StoreOps, VecTy, MMO);
12192   DCI.AddToWorklist(Store.getNode());
12193   return Store;
12194 }
12195 
12196 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
12197                                              DAGCombinerInfo &DCI) const {
12198   SelectionDAG &DAG = DCI.DAG;
12199   SDLoc dl(N);
12200   switch (N->getOpcode()) {
12201   default: break;
12202   case ISD::SHL:
12203     return combineSHL(N, DCI);
12204   case ISD::SRA:
12205     return combineSRA(N, DCI);
12206   case ISD::SRL:
12207     return combineSRL(N, DCI);
12208   case PPCISD::SHL:
12209     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
12210         return N->getOperand(0);
12211     break;
12212   case PPCISD::SRL:
12213     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
12214         return N->getOperand(0);
12215     break;
12216   case PPCISD::SRA:
12217     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
12218       if (C->isNullValue() ||   //  0 >>s V -> 0.
12219           C->isAllOnesValue())    // -1 >>s V -> -1.
12220         return N->getOperand(0);
12221     }
12222     break;
12223   case ISD::SIGN_EXTEND:
12224   case ISD::ZERO_EXTEND:
12225   case ISD::ANY_EXTEND:
12226     return DAGCombineExtBoolTrunc(N, DCI);
12227   case ISD::TRUNCATE:
12228   case ISD::SETCC:
12229   case ISD::SELECT_CC:
12230     return DAGCombineTruncBoolExt(N, DCI);
12231   case ISD::SINT_TO_FP:
12232   case ISD::UINT_TO_FP:
12233     return combineFPToIntToFP(N, DCI);
12234   case ISD::STORE: {
12235     EVT Op1VT = N->getOperand(1).getValueType();
12236     bool ValidTypeForStoreFltAsInt = (Op1VT == MVT::i32) ||
12237       (Subtarget.hasP9Vector() && (Op1VT == MVT::i8 || Op1VT == MVT::i16));
12238 
12239     // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
12240     if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() &&
12241         N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
12242         ValidTypeForStoreFltAsInt &&
12243         N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
12244       SDValue Val = N->getOperand(1).getOperand(0);
12245       if (Val.getValueType() == MVT::f32) {
12246         Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
12247         DCI.AddToWorklist(Val.getNode());
12248       }
12249       Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val);
12250       DCI.AddToWorklist(Val.getNode());
12251 
12252       if (Op1VT == MVT::i32) {
12253         SDValue Ops[] = {
12254           N->getOperand(0), Val, N->getOperand(2),
12255           DAG.getValueType(N->getOperand(1).getValueType())
12256         };
12257 
12258         Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
12259                 DAG.getVTList(MVT::Other), Ops,
12260                 cast<StoreSDNode>(N)->getMemoryVT(),
12261                 cast<StoreSDNode>(N)->getMemOperand());
12262       } else {
12263         unsigned WidthInBytes =
12264           N->getOperand(1).getValueType() == MVT::i8 ? 1 : 2;
12265         SDValue WidthConst = DAG.getIntPtrConstant(WidthInBytes, dl, false);
12266 
12267         SDValue Ops[] = {
12268           N->getOperand(0), Val, N->getOperand(2), WidthConst,
12269           DAG.getValueType(N->getOperand(1).getValueType())
12270         };
12271         Val = DAG.getMemIntrinsicNode(PPCISD::STXSIX, dl,
12272                                       DAG.getVTList(MVT::Other), Ops,
12273                                       cast<StoreSDNode>(N)->getMemoryVT(),
12274                                       cast<StoreSDNode>(N)->getMemOperand());
12275       }
12276 
12277       DCI.AddToWorklist(Val.getNode());
12278       return Val;
12279     }
12280 
12281     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
12282     if (cast<StoreSDNode>(N)->isUnindexed() &&
12283         N->getOperand(1).getOpcode() == ISD::BSWAP &&
12284         N->getOperand(1).getNode()->hasOneUse() &&
12285         (N->getOperand(1).getValueType() == MVT::i32 ||
12286          N->getOperand(1).getValueType() == MVT::i16 ||
12287          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
12288           N->getOperand(1).getValueType() == MVT::i64))) {
12289       SDValue BSwapOp = N->getOperand(1).getOperand(0);
12290       // Do an any-extend to 32-bits if this is a half-word input.
12291       if (BSwapOp.getValueType() == MVT::i16)
12292         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
12293 
12294       // If the type of BSWAP operand is wider than stored memory width
12295       // it need to be shifted to the right side before STBRX.
12296       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
12297       if (Op1VT.bitsGT(mVT)) {
12298         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
12299         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
12300                               DAG.getConstant(Shift, dl, MVT::i32));
12301         // Need to truncate if this is a bswap of i64 stored as i32/i16.
12302         if (Op1VT == MVT::i64)
12303           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
12304       }
12305 
12306       SDValue Ops[] = {
12307         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
12308       };
12309       return
12310         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
12311                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
12312                                 cast<StoreSDNode>(N)->getMemOperand());
12313     }
12314 
12315     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
12316     // So it can increase the chance of CSE constant construction.
12317     EVT VT = N->getOperand(1).getValueType();
12318     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
12319         isa<ConstantSDNode>(N->getOperand(1)) && VT == MVT::i32) {
12320       // Need to sign-extended to 64-bits to handle negative values.
12321       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
12322       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
12323                                     MemVT.getSizeInBits());
12324       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
12325 
12326       // DAG.getTruncStore() can't be used here because it doesn't accept
12327       // the general (base + offset) addressing mode.
12328       // So we use UpdateNodeOperands and setTruncatingStore instead.
12329       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
12330                              N->getOperand(3));
12331       cast<StoreSDNode>(N)->setTruncatingStore(true);
12332       return SDValue(N, 0);
12333     }
12334 
12335     // For little endian, VSX stores require generating xxswapd/lxvd2x.
12336     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
12337     if (VT.isSimple()) {
12338       MVT StoreVT = VT.getSimpleVT();
12339       if (Subtarget.needsSwapsForVSXMemOps() &&
12340           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
12341            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
12342         return expandVSXStoreForLE(N, DCI);
12343     }
12344     break;
12345   }
12346   case ISD::LOAD: {
12347     LoadSDNode *LD = cast<LoadSDNode>(N);
12348     EVT VT = LD->getValueType(0);
12349 
12350     // For little endian, VSX loads require generating lxvd2x/xxswapd.
12351     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
12352     if (VT.isSimple()) {
12353       MVT LoadVT = VT.getSimpleVT();
12354       if (Subtarget.needsSwapsForVSXMemOps() &&
12355           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
12356            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
12357         return expandVSXLoadForLE(N, DCI);
12358     }
12359 
12360     // We sometimes end up with a 64-bit integer load, from which we extract
12361     // two single-precision floating-point numbers. This happens with
12362     // std::complex<float>, and other similar structures, because of the way we
12363     // canonicalize structure copies. However, if we lack direct moves,
12364     // then the final bitcasts from the extracted integer values to the
12365     // floating-point numbers turn into store/load pairs. Even with direct moves,
12366     // just loading the two floating-point numbers is likely better.
12367     auto ReplaceTwoFloatLoad = [&]() {
12368       if (VT != MVT::i64)
12369         return false;
12370 
12371       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
12372           LD->isVolatile())
12373         return false;
12374 
12375       //  We're looking for a sequence like this:
12376       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
12377       //      t16: i64 = srl t13, Constant:i32<32>
12378       //    t17: i32 = truncate t16
12379       //  t18: f32 = bitcast t17
12380       //    t19: i32 = truncate t13
12381       //  t20: f32 = bitcast t19
12382 
12383       if (!LD->hasNUsesOfValue(2, 0))
12384         return false;
12385 
12386       auto UI = LD->use_begin();
12387       while (UI.getUse().getResNo() != 0) ++UI;
12388       SDNode *Trunc = *UI++;
12389       while (UI.getUse().getResNo() != 0) ++UI;
12390       SDNode *RightShift = *UI;
12391       if (Trunc->getOpcode() != ISD::TRUNCATE)
12392         std::swap(Trunc, RightShift);
12393 
12394       if (Trunc->getOpcode() != ISD::TRUNCATE ||
12395           Trunc->getValueType(0) != MVT::i32 ||
12396           !Trunc->hasOneUse())
12397         return false;
12398       if (RightShift->getOpcode() != ISD::SRL ||
12399           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
12400           RightShift->getConstantOperandVal(1) != 32 ||
12401           !RightShift->hasOneUse())
12402         return false;
12403 
12404       SDNode *Trunc2 = *RightShift->use_begin();
12405       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
12406           Trunc2->getValueType(0) != MVT::i32 ||
12407           !Trunc2->hasOneUse())
12408         return false;
12409 
12410       SDNode *Bitcast = *Trunc->use_begin();
12411       SDNode *Bitcast2 = *Trunc2->use_begin();
12412 
12413       if (Bitcast->getOpcode() != ISD::BITCAST ||
12414           Bitcast->getValueType(0) != MVT::f32)
12415         return false;
12416       if (Bitcast2->getOpcode() != ISD::BITCAST ||
12417           Bitcast2->getValueType(0) != MVT::f32)
12418         return false;
12419 
12420       if (Subtarget.isLittleEndian())
12421         std::swap(Bitcast, Bitcast2);
12422 
12423       // Bitcast has the second float (in memory-layout order) and Bitcast2
12424       // has the first one.
12425 
12426       SDValue BasePtr = LD->getBasePtr();
12427       if (LD->isIndexed()) {
12428         assert(LD->getAddressingMode() == ISD::PRE_INC &&
12429                "Non-pre-inc AM on PPC?");
12430         BasePtr =
12431           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
12432                       LD->getOffset());
12433       }
12434 
12435       auto MMOFlags =
12436           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
12437       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
12438                                       LD->getPointerInfo(), LD->getAlignment(),
12439                                       MMOFlags, LD->getAAInfo());
12440       SDValue AddPtr =
12441         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
12442                     BasePtr, DAG.getIntPtrConstant(4, dl));
12443       SDValue FloatLoad2 = DAG.getLoad(
12444           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
12445           LD->getPointerInfo().getWithOffset(4),
12446           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
12447 
12448       if (LD->isIndexed()) {
12449         // Note that DAGCombine should re-form any pre-increment load(s) from
12450         // what is produced here if that makes sense.
12451         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
12452       }
12453 
12454       DCI.CombineTo(Bitcast2, FloatLoad);
12455       DCI.CombineTo(Bitcast, FloatLoad2);
12456 
12457       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
12458                                     SDValue(FloatLoad2.getNode(), 1));
12459       return true;
12460     };
12461 
12462     if (ReplaceTwoFloatLoad())
12463       return SDValue(N, 0);
12464 
12465     EVT MemVT = LD->getMemoryVT();
12466     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
12467     unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
12468     Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
12469     unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
12470     if (LD->isUnindexed() && VT.isVector() &&
12471         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
12472           // P8 and later hardware should just use LOAD.
12473           !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 ||
12474                                        VT == MVT::v4i32 || VT == MVT::v4f32)) ||
12475          (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
12476           LD->getAlignment() >= ScalarABIAlignment)) &&
12477         LD->getAlignment() < ABIAlignment) {
12478       // This is a type-legal unaligned Altivec or QPX load.
12479       SDValue Chain = LD->getChain();
12480       SDValue Ptr = LD->getBasePtr();
12481       bool isLittleEndian = Subtarget.isLittleEndian();
12482 
12483       // This implements the loading of unaligned vectors as described in
12484       // the venerable Apple Velocity Engine overview. Specifically:
12485       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
12486       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
12487       //
12488       // The general idea is to expand a sequence of one or more unaligned
12489       // loads into an alignment-based permutation-control instruction (lvsl
12490       // or lvsr), a series of regular vector loads (which always truncate
12491       // their input address to an aligned address), and a series of
12492       // permutations.  The results of these permutations are the requested
12493       // loaded values.  The trick is that the last "extra" load is not taken
12494       // from the address you might suspect (sizeof(vector) bytes after the
12495       // last requested load), but rather sizeof(vector) - 1 bytes after the
12496       // last requested vector. The point of this is to avoid a page fault if
12497       // the base address happened to be aligned. This works because if the
12498       // base address is aligned, then adding less than a full vector length
12499       // will cause the last vector in the sequence to be (re)loaded.
12500       // Otherwise, the next vector will be fetched as you might suspect was
12501       // necessary.
12502 
12503       // We might be able to reuse the permutation generation from
12504       // a different base address offset from this one by an aligned amount.
12505       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
12506       // optimization later.
12507       Intrinsic::ID Intr, IntrLD, IntrPerm;
12508       MVT PermCntlTy, PermTy, LDTy;
12509       if (Subtarget.hasAltivec()) {
12510         Intr = isLittleEndian ?  Intrinsic::ppc_altivec_lvsr :
12511                                  Intrinsic::ppc_altivec_lvsl;
12512         IntrLD = Intrinsic::ppc_altivec_lvx;
12513         IntrPerm = Intrinsic::ppc_altivec_vperm;
12514         PermCntlTy = MVT::v16i8;
12515         PermTy = MVT::v4i32;
12516         LDTy = MVT::v4i32;
12517       } else {
12518         Intr =   MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
12519                                        Intrinsic::ppc_qpx_qvlpcls;
12520         IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
12521                                        Intrinsic::ppc_qpx_qvlfs;
12522         IntrPerm = Intrinsic::ppc_qpx_qvfperm;
12523         PermCntlTy = MVT::v4f64;
12524         PermTy = MVT::v4f64;
12525         LDTy = MemVT.getSimpleVT();
12526       }
12527 
12528       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
12529 
12530       // Create the new MMO for the new base load. It is like the original MMO,
12531       // but represents an area in memory almost twice the vector size centered
12532       // on the original address. If the address is unaligned, we might start
12533       // reading up to (sizeof(vector)-1) bytes below the address of the
12534       // original unaligned load.
12535       MachineFunction &MF = DAG.getMachineFunction();
12536       MachineMemOperand *BaseMMO =
12537         MF.getMachineMemOperand(LD->getMemOperand(),
12538                                 -(long)MemVT.getStoreSize()+1,
12539                                 2*MemVT.getStoreSize()-1);
12540 
12541       // Create the new base load.
12542       SDValue LDXIntID =
12543           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
12544       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
12545       SDValue BaseLoad =
12546         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
12547                                 DAG.getVTList(PermTy, MVT::Other),
12548                                 BaseLoadOps, LDTy, BaseMMO);
12549 
12550       // Note that the value of IncOffset (which is provided to the next
12551       // load's pointer info offset value, and thus used to calculate the
12552       // alignment), and the value of IncValue (which is actually used to
12553       // increment the pointer value) are different! This is because we
12554       // require the next load to appear to be aligned, even though it
12555       // is actually offset from the base pointer by a lesser amount.
12556       int IncOffset = VT.getSizeInBits() / 8;
12557       int IncValue = IncOffset;
12558 
12559       // Walk (both up and down) the chain looking for another load at the real
12560       // (aligned) offset (the alignment of the other load does not matter in
12561       // this case). If found, then do not use the offset reduction trick, as
12562       // that will prevent the loads from being later combined (as they would
12563       // otherwise be duplicates).
12564       if (!findConsecutiveLoad(LD, DAG))
12565         --IncValue;
12566 
12567       SDValue Increment =
12568           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
12569       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
12570 
12571       MachineMemOperand *ExtraMMO =
12572         MF.getMachineMemOperand(LD->getMemOperand(),
12573                                 1, 2*MemVT.getStoreSize()-1);
12574       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
12575       SDValue ExtraLoad =
12576         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
12577                                 DAG.getVTList(PermTy, MVT::Other),
12578                                 ExtraLoadOps, LDTy, ExtraMMO);
12579 
12580       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
12581         BaseLoad.getValue(1), ExtraLoad.getValue(1));
12582 
12583       // Because vperm has a big-endian bias, we must reverse the order
12584       // of the input vectors and complement the permute control vector
12585       // when generating little endian code.  We have already handled the
12586       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
12587       // and ExtraLoad here.
12588       SDValue Perm;
12589       if (isLittleEndian)
12590         Perm = BuildIntrinsicOp(IntrPerm,
12591                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
12592       else
12593         Perm = BuildIntrinsicOp(IntrPerm,
12594                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
12595 
12596       if (VT != PermTy)
12597         Perm = Subtarget.hasAltivec() ?
12598                  DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
12599                  DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
12600                                DAG.getTargetConstant(1, dl, MVT::i64));
12601                                // second argument is 1 because this rounding
12602                                // is always exact.
12603 
12604       // The output of the permutation is our loaded result, the TokenFactor is
12605       // our new chain.
12606       DCI.CombineTo(N, Perm, TF);
12607       return SDValue(N, 0);
12608     }
12609     }
12610     break;
12611     case ISD::INTRINSIC_WO_CHAIN: {
12612       bool isLittleEndian = Subtarget.isLittleEndian();
12613       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
12614       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
12615                                            : Intrinsic::ppc_altivec_lvsl);
12616       if ((IID == Intr ||
12617            IID == Intrinsic::ppc_qpx_qvlpcld  ||
12618            IID == Intrinsic::ppc_qpx_qvlpcls) &&
12619         N->getOperand(1)->getOpcode() == ISD::ADD) {
12620         SDValue Add = N->getOperand(1);
12621 
12622         int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
12623                    5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
12624 
12625         if (DAG.MaskedValueIsZero(Add->getOperand(1),
12626                                   APInt::getAllOnesValue(Bits /* alignment */)
12627                                       .zext(Add.getScalarValueSizeInBits()))) {
12628           SDNode *BasePtr = Add->getOperand(0).getNode();
12629           for (SDNode::use_iterator UI = BasePtr->use_begin(),
12630                                     UE = BasePtr->use_end();
12631                UI != UE; ++UI) {
12632             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
12633                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
12634               // We've found another LVSL/LVSR, and this address is an aligned
12635               // multiple of that one. The results will be the same, so use the
12636               // one we've just found instead.
12637 
12638               return SDValue(*UI, 0);
12639             }
12640           }
12641         }
12642 
12643         if (isa<ConstantSDNode>(Add->getOperand(1))) {
12644           SDNode *BasePtr = Add->getOperand(0).getNode();
12645           for (SDNode::use_iterator UI = BasePtr->use_begin(),
12646                UE = BasePtr->use_end(); UI != UE; ++UI) {
12647             if (UI->getOpcode() == ISD::ADD &&
12648                 isa<ConstantSDNode>(UI->getOperand(1)) &&
12649                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
12650                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
12651                 (1ULL << Bits) == 0) {
12652               SDNode *OtherAdd = *UI;
12653               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
12654                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
12655                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
12656                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
12657                   return SDValue(*VI, 0);
12658                 }
12659               }
12660             }
12661           }
12662         }
12663       }
12664     }
12665 
12666     break;
12667   case ISD::INTRINSIC_W_CHAIN:
12668     // For little endian, VSX loads require generating lxvd2x/xxswapd.
12669     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
12670     if (Subtarget.needsSwapsForVSXMemOps()) {
12671       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12672       default:
12673         break;
12674       case Intrinsic::ppc_vsx_lxvw4x:
12675       case Intrinsic::ppc_vsx_lxvd2x:
12676         return expandVSXLoadForLE(N, DCI);
12677       }
12678     }
12679     break;
12680   case ISD::INTRINSIC_VOID:
12681     // For little endian, VSX stores require generating xxswapd/stxvd2x.
12682     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
12683     if (Subtarget.needsSwapsForVSXMemOps()) {
12684       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12685       default:
12686         break;
12687       case Intrinsic::ppc_vsx_stxvw4x:
12688       case Intrinsic::ppc_vsx_stxvd2x:
12689         return expandVSXStoreForLE(N, DCI);
12690       }
12691     }
12692     break;
12693   case ISD::BSWAP:
12694     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
12695     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
12696         N->getOperand(0).hasOneUse() &&
12697         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
12698          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
12699           N->getValueType(0) == MVT::i64))) {
12700       SDValue Load = N->getOperand(0);
12701       LoadSDNode *LD = cast<LoadSDNode>(Load);
12702       // Create the byte-swapping load.
12703       SDValue Ops[] = {
12704         LD->getChain(),    // Chain
12705         LD->getBasePtr(),  // Ptr
12706         DAG.getValueType(N->getValueType(0)) // VT
12707       };
12708       SDValue BSLoad =
12709         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
12710                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
12711                                               MVT::i64 : MVT::i32, MVT::Other),
12712                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
12713 
12714       // If this is an i16 load, insert the truncate.
12715       SDValue ResVal = BSLoad;
12716       if (N->getValueType(0) == MVT::i16)
12717         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
12718 
12719       // First, combine the bswap away.  This makes the value produced by the
12720       // load dead.
12721       DCI.CombineTo(N, ResVal);
12722 
12723       // Next, combine the load away, we give it a bogus result value but a real
12724       // chain result.  The result value is dead because the bswap is dead.
12725       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
12726 
12727       // Return N so it doesn't get rechecked!
12728       return SDValue(N, 0);
12729     }
12730     break;
12731   case PPCISD::VCMP:
12732     // If a VCMPo node already exists with exactly the same operands as this
12733     // node, use its result instead of this node (VCMPo computes both a CR6 and
12734     // a normal output).
12735     //
12736     if (!N->getOperand(0).hasOneUse() &&
12737         !N->getOperand(1).hasOneUse() &&
12738         !N->getOperand(2).hasOneUse()) {
12739 
12740       // Scan all of the users of the LHS, looking for VCMPo's that match.
12741       SDNode *VCMPoNode = nullptr;
12742 
12743       SDNode *LHSN = N->getOperand(0).getNode();
12744       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
12745            UI != E; ++UI)
12746         if (UI->getOpcode() == PPCISD::VCMPo &&
12747             UI->getOperand(1) == N->getOperand(1) &&
12748             UI->getOperand(2) == N->getOperand(2) &&
12749             UI->getOperand(0) == N->getOperand(0)) {
12750           VCMPoNode = *UI;
12751           break;
12752         }
12753 
12754       // If there is no VCMPo node, or if the flag value has a single use, don't
12755       // transform this.
12756       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
12757         break;
12758 
12759       // Look at the (necessarily single) use of the flag value.  If it has a
12760       // chain, this transformation is more complex.  Note that multiple things
12761       // could use the value result, which we should ignore.
12762       SDNode *FlagUser = nullptr;
12763       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
12764            FlagUser == nullptr; ++UI) {
12765         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
12766         SDNode *User = *UI;
12767         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
12768           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
12769             FlagUser = User;
12770             break;
12771           }
12772         }
12773       }
12774 
12775       // If the user is a MFOCRF instruction, we know this is safe.
12776       // Otherwise we give up for right now.
12777       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
12778         return SDValue(VCMPoNode, 0);
12779     }
12780     break;
12781   case ISD::BRCOND: {
12782     SDValue Cond = N->getOperand(1);
12783     SDValue Target = N->getOperand(2);
12784 
12785     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
12786         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
12787           Intrinsic::ppc_is_decremented_ctr_nonzero) {
12788 
12789       // We now need to make the intrinsic dead (it cannot be instruction
12790       // selected).
12791       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
12792       assert(Cond.getNode()->hasOneUse() &&
12793              "Counter decrement has more than one use");
12794 
12795       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
12796                          N->getOperand(0), Target);
12797     }
12798   }
12799   break;
12800   case ISD::BR_CC: {
12801     // If this is a branch on an altivec predicate comparison, lower this so
12802     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
12803     // lowering is done pre-legalize, because the legalizer lowers the predicate
12804     // compare down to code that is difficult to reassemble.
12805     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
12806     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
12807 
12808     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
12809     // value. If so, pass-through the AND to get to the intrinsic.
12810     if (LHS.getOpcode() == ISD::AND &&
12811         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
12812         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
12813           Intrinsic::ppc_is_decremented_ctr_nonzero &&
12814         isa<ConstantSDNode>(LHS.getOperand(1)) &&
12815         !isNullConstant(LHS.getOperand(1)))
12816       LHS = LHS.getOperand(0);
12817 
12818     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
12819         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
12820           Intrinsic::ppc_is_decremented_ctr_nonzero &&
12821         isa<ConstantSDNode>(RHS)) {
12822       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
12823              "Counter decrement comparison is not EQ or NE");
12824 
12825       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
12826       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
12827                     (CC == ISD::SETNE && !Val);
12828 
12829       // We now need to make the intrinsic dead (it cannot be instruction
12830       // selected).
12831       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
12832       assert(LHS.getNode()->hasOneUse() &&
12833              "Counter decrement has more than one use");
12834 
12835       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
12836                          N->getOperand(0), N->getOperand(4));
12837     }
12838 
12839     int CompareOpc;
12840     bool isDot;
12841 
12842     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
12843         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
12844         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
12845       assert(isDot && "Can't compare against a vector result!");
12846 
12847       // If this is a comparison against something other than 0/1, then we know
12848       // that the condition is never/always true.
12849       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
12850       if (Val != 0 && Val != 1) {
12851         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
12852           return N->getOperand(0);
12853         // Always !=, turn it into an unconditional branch.
12854         return DAG.getNode(ISD::BR, dl, MVT::Other,
12855                            N->getOperand(0), N->getOperand(4));
12856       }
12857 
12858       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
12859 
12860       // Create the PPCISD altivec 'dot' comparison node.
12861       SDValue Ops[] = {
12862         LHS.getOperand(2),  // LHS of compare
12863         LHS.getOperand(3),  // RHS of compare
12864         DAG.getConstant(CompareOpc, dl, MVT::i32)
12865       };
12866       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
12867       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
12868 
12869       // Unpack the result based on how the target uses it.
12870       PPC::Predicate CompOpc;
12871       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
12872       default:  // Can't happen, don't crash on invalid number though.
12873       case 0:   // Branch on the value of the EQ bit of CR6.
12874         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
12875         break;
12876       case 1:   // Branch on the inverted value of the EQ bit of CR6.
12877         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
12878         break;
12879       case 2:   // Branch on the value of the LT bit of CR6.
12880         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
12881         break;
12882       case 3:   // Branch on the inverted value of the LT bit of CR6.
12883         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
12884         break;
12885       }
12886 
12887       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
12888                          DAG.getConstant(CompOpc, dl, MVT::i32),
12889                          DAG.getRegister(PPC::CR6, MVT::i32),
12890                          N->getOperand(4), CompNode.getValue(1));
12891     }
12892     break;
12893   }
12894   case ISD::BUILD_VECTOR:
12895     return DAGCombineBuildVector(N, DCI);
12896   }
12897 
12898   return SDValue();
12899 }
12900 
12901 SDValue
12902 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
12903                                   SelectionDAG &DAG,
12904                                   std::vector<SDNode *> *Created) const {
12905   // fold (sdiv X, pow2)
12906   EVT VT = N->getValueType(0);
12907   if (VT == MVT::i64 && !Subtarget.isPPC64())
12908     return SDValue();
12909   if ((VT != MVT::i32 && VT != MVT::i64) ||
12910       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
12911     return SDValue();
12912 
12913   SDLoc DL(N);
12914   SDValue N0 = N->getOperand(0);
12915 
12916   bool IsNegPow2 = (-Divisor).isPowerOf2();
12917   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
12918   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
12919 
12920   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
12921   if (Created)
12922     Created->push_back(Op.getNode());
12923 
12924   if (IsNegPow2) {
12925     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
12926     if (Created)
12927       Created->push_back(Op.getNode());
12928   }
12929 
12930   return Op;
12931 }
12932 
12933 //===----------------------------------------------------------------------===//
12934 // Inline Assembly Support
12935 //===----------------------------------------------------------------------===//
12936 
12937 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
12938                                                       KnownBits &Known,
12939                                                       const APInt &DemandedElts,
12940                                                       const SelectionDAG &DAG,
12941                                                       unsigned Depth) const {
12942   Known.resetAll();
12943   switch (Op.getOpcode()) {
12944   default: break;
12945   case PPCISD::LBRX: {
12946     // lhbrx is known to have the top bits cleared out.
12947     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
12948       Known.Zero = 0xFFFF0000;
12949     break;
12950   }
12951   case ISD::INTRINSIC_WO_CHAIN: {
12952     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
12953     default: break;
12954     case Intrinsic::ppc_altivec_vcmpbfp_p:
12955     case Intrinsic::ppc_altivec_vcmpeqfp_p:
12956     case Intrinsic::ppc_altivec_vcmpequb_p:
12957     case Intrinsic::ppc_altivec_vcmpequh_p:
12958     case Intrinsic::ppc_altivec_vcmpequw_p:
12959     case Intrinsic::ppc_altivec_vcmpequd_p:
12960     case Intrinsic::ppc_altivec_vcmpgefp_p:
12961     case Intrinsic::ppc_altivec_vcmpgtfp_p:
12962     case Intrinsic::ppc_altivec_vcmpgtsb_p:
12963     case Intrinsic::ppc_altivec_vcmpgtsh_p:
12964     case Intrinsic::ppc_altivec_vcmpgtsw_p:
12965     case Intrinsic::ppc_altivec_vcmpgtsd_p:
12966     case Intrinsic::ppc_altivec_vcmpgtub_p:
12967     case Intrinsic::ppc_altivec_vcmpgtuh_p:
12968     case Intrinsic::ppc_altivec_vcmpgtuw_p:
12969     case Intrinsic::ppc_altivec_vcmpgtud_p:
12970       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
12971       break;
12972     }
12973   }
12974   }
12975 }
12976 
12977 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
12978   switch (Subtarget.getDarwinDirective()) {
12979   default: break;
12980   case PPC::DIR_970:
12981   case PPC::DIR_PWR4:
12982   case PPC::DIR_PWR5:
12983   case PPC::DIR_PWR5X:
12984   case PPC::DIR_PWR6:
12985   case PPC::DIR_PWR6X:
12986   case PPC::DIR_PWR7:
12987   case PPC::DIR_PWR8:
12988   case PPC::DIR_PWR9: {
12989     if (!ML)
12990       break;
12991 
12992     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
12993 
12994     // For small loops (between 5 and 8 instructions), align to a 32-byte
12995     // boundary so that the entire loop fits in one instruction-cache line.
12996     uint64_t LoopSize = 0;
12997     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
12998       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
12999         LoopSize += TII->getInstSizeInBytes(*J);
13000         if (LoopSize > 32)
13001           break;
13002       }
13003 
13004     if (LoopSize > 16 && LoopSize <= 32)
13005       return 5;
13006 
13007     break;
13008   }
13009   }
13010 
13011   return TargetLowering::getPrefLoopAlignment(ML);
13012 }
13013 
13014 /// getConstraintType - Given a constraint, return the type of
13015 /// constraint it is for this target.
13016 PPCTargetLowering::ConstraintType
13017 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
13018   if (Constraint.size() == 1) {
13019     switch (Constraint[0]) {
13020     default: break;
13021     case 'b':
13022     case 'r':
13023     case 'f':
13024     case 'd':
13025     case 'v':
13026     case 'y':
13027       return C_RegisterClass;
13028     case 'Z':
13029       // FIXME: While Z does indicate a memory constraint, it specifically
13030       // indicates an r+r address (used in conjunction with the 'y' modifier
13031       // in the replacement string). Currently, we're forcing the base
13032       // register to be r0 in the asm printer (which is interpreted as zero)
13033       // and forming the complete address in the second register. This is
13034       // suboptimal.
13035       return C_Memory;
13036     }
13037   } else if (Constraint == "wc") { // individual CR bits.
13038     return C_RegisterClass;
13039   } else if (Constraint == "wa" || Constraint == "wd" ||
13040              Constraint == "wf" || Constraint == "ws") {
13041     return C_RegisterClass; // VSX registers.
13042   }
13043   return TargetLowering::getConstraintType(Constraint);
13044 }
13045 
13046 /// Examine constraint type and operand type and determine a weight value.
13047 /// This object must already have been set up with the operand type
13048 /// and the current alternative constraint selected.
13049 TargetLowering::ConstraintWeight
13050 PPCTargetLowering::getSingleConstraintMatchWeight(
13051     AsmOperandInfo &info, const char *constraint) const {
13052   ConstraintWeight weight = CW_Invalid;
13053   Value *CallOperandVal = info.CallOperandVal;
13054     // If we don't have a value, we can't do a match,
13055     // but allow it at the lowest weight.
13056   if (!CallOperandVal)
13057     return CW_Default;
13058   Type *type = CallOperandVal->getType();
13059 
13060   // Look at the constraint type.
13061   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
13062     return CW_Register; // an individual CR bit.
13063   else if ((StringRef(constraint) == "wa" ||
13064             StringRef(constraint) == "wd" ||
13065             StringRef(constraint) == "wf") &&
13066            type->isVectorTy())
13067     return CW_Register;
13068   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
13069     return CW_Register;
13070 
13071   switch (*constraint) {
13072   default:
13073     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
13074     break;
13075   case 'b':
13076     if (type->isIntegerTy())
13077       weight = CW_Register;
13078     break;
13079   case 'f':
13080     if (type->isFloatTy())
13081       weight = CW_Register;
13082     break;
13083   case 'd':
13084     if (type->isDoubleTy())
13085       weight = CW_Register;
13086     break;
13087   case 'v':
13088     if (type->isVectorTy())
13089       weight = CW_Register;
13090     break;
13091   case 'y':
13092     weight = CW_Register;
13093     break;
13094   case 'Z':
13095     weight = CW_Memory;
13096     break;
13097   }
13098   return weight;
13099 }
13100 
13101 std::pair<unsigned, const TargetRegisterClass *>
13102 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
13103                                                 StringRef Constraint,
13104                                                 MVT VT) const {
13105   if (Constraint.size() == 1) {
13106     // GCC RS6000 Constraint Letters
13107     switch (Constraint[0]) {
13108     case 'b':   // R1-R31
13109       if (VT == MVT::i64 && Subtarget.isPPC64())
13110         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
13111       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
13112     case 'r':   // R0-R31
13113       if (VT == MVT::i64 && Subtarget.isPPC64())
13114         return std::make_pair(0U, &PPC::G8RCRegClass);
13115       return std::make_pair(0U, &PPC::GPRCRegClass);
13116     // 'd' and 'f' constraints are both defined to be "the floating point
13117     // registers", where one is for 32-bit and the other for 64-bit. We don't
13118     // really care overly much here so just give them all the same reg classes.
13119     case 'd':
13120     case 'f':
13121       if (VT == MVT::f32 || VT == MVT::i32)
13122         return std::make_pair(0U, &PPC::F4RCRegClass);
13123       if (VT == MVT::f64 || VT == MVT::i64)
13124         return std::make_pair(0U, &PPC::F8RCRegClass);
13125       if (VT == MVT::v4f64 && Subtarget.hasQPX())
13126         return std::make_pair(0U, &PPC::QFRCRegClass);
13127       if (VT == MVT::v4f32 && Subtarget.hasQPX())
13128         return std::make_pair(0U, &PPC::QSRCRegClass);
13129       break;
13130     case 'v':
13131       if (VT == MVT::v4f64 && Subtarget.hasQPX())
13132         return std::make_pair(0U, &PPC::QFRCRegClass);
13133       if (VT == MVT::v4f32 && Subtarget.hasQPX())
13134         return std::make_pair(0U, &PPC::QSRCRegClass);
13135       if (Subtarget.hasAltivec())
13136         return std::make_pair(0U, &PPC::VRRCRegClass);
13137       break;
13138     case 'y':   // crrc
13139       return std::make_pair(0U, &PPC::CRRCRegClass);
13140     }
13141   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
13142     // An individual CR bit.
13143     return std::make_pair(0U, &PPC::CRBITRCRegClass);
13144   } else if ((Constraint == "wa" || Constraint == "wd" ||
13145              Constraint == "wf") && Subtarget.hasVSX()) {
13146     return std::make_pair(0U, &PPC::VSRCRegClass);
13147   } else if (Constraint == "ws" && Subtarget.hasVSX()) {
13148     if (VT == MVT::f32 && Subtarget.hasP8Vector())
13149       return std::make_pair(0U, &PPC::VSSRCRegClass);
13150     else
13151       return std::make_pair(0U, &PPC::VSFRCRegClass);
13152   }
13153 
13154   std::pair<unsigned, const TargetRegisterClass *> R =
13155       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
13156 
13157   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
13158   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
13159   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
13160   // register.
13161   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
13162   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
13163   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
13164       PPC::GPRCRegClass.contains(R.first))
13165     return std::make_pair(TRI->getMatchingSuperReg(R.first,
13166                             PPC::sub_32, &PPC::G8RCRegClass),
13167                           &PPC::G8RCRegClass);
13168 
13169   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
13170   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
13171     R.first = PPC::CR0;
13172     R.second = &PPC::CRRCRegClass;
13173   }
13174 
13175   return R;
13176 }
13177 
13178 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
13179 /// vector.  If it is invalid, don't add anything to Ops.
13180 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
13181                                                      std::string &Constraint,
13182                                                      std::vector<SDValue>&Ops,
13183                                                      SelectionDAG &DAG) const {
13184   SDValue Result;
13185 
13186   // Only support length 1 constraints.
13187   if (Constraint.length() > 1) return;
13188 
13189   char Letter = Constraint[0];
13190   switch (Letter) {
13191   default: break;
13192   case 'I':
13193   case 'J':
13194   case 'K':
13195   case 'L':
13196   case 'M':
13197   case 'N':
13198   case 'O':
13199   case 'P': {
13200     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
13201     if (!CST) return; // Must be an immediate to match.
13202     SDLoc dl(Op);
13203     int64_t Value = CST->getSExtValue();
13204     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
13205                          // numbers are printed as such.
13206     switch (Letter) {
13207     default: llvm_unreachable("Unknown constraint letter!");
13208     case 'I':  // "I" is a signed 16-bit constant.
13209       if (isInt<16>(Value))
13210         Result = DAG.getTargetConstant(Value, dl, TCVT);
13211       break;
13212     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
13213       if (isShiftedUInt<16, 16>(Value))
13214         Result = DAG.getTargetConstant(Value, dl, TCVT);
13215       break;
13216     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
13217       if (isShiftedInt<16, 16>(Value))
13218         Result = DAG.getTargetConstant(Value, dl, TCVT);
13219       break;
13220     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
13221       if (isUInt<16>(Value))
13222         Result = DAG.getTargetConstant(Value, dl, TCVT);
13223       break;
13224     case 'M':  // "M" is a constant that is greater than 31.
13225       if (Value > 31)
13226         Result = DAG.getTargetConstant(Value, dl, TCVT);
13227       break;
13228     case 'N':  // "N" is a positive constant that is an exact power of two.
13229       if (Value > 0 && isPowerOf2_64(Value))
13230         Result = DAG.getTargetConstant(Value, dl, TCVT);
13231       break;
13232     case 'O':  // "O" is the constant zero.
13233       if (Value == 0)
13234         Result = DAG.getTargetConstant(Value, dl, TCVT);
13235       break;
13236     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
13237       if (isInt<16>(-Value))
13238         Result = DAG.getTargetConstant(Value, dl, TCVT);
13239       break;
13240     }
13241     break;
13242   }
13243   }
13244 
13245   if (Result.getNode()) {
13246     Ops.push_back(Result);
13247     return;
13248   }
13249 
13250   // Handle standard constraint letters.
13251   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
13252 }
13253 
13254 // isLegalAddressingMode - Return true if the addressing mode represented
13255 // by AM is legal for this target, for a load/store of the specified type.
13256 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
13257                                               const AddrMode &AM, Type *Ty,
13258                                               unsigned AS, Instruction *I) const {
13259   // PPC does not allow r+i addressing modes for vectors!
13260   if (Ty->isVectorTy() && AM.BaseOffs != 0)
13261     return false;
13262 
13263   // PPC allows a sign-extended 16-bit immediate field.
13264   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
13265     return false;
13266 
13267   // No global is ever allowed as a base.
13268   if (AM.BaseGV)
13269     return false;
13270 
13271   // PPC only support r+r,
13272   switch (AM.Scale) {
13273   case 0:  // "r+i" or just "i", depending on HasBaseReg.
13274     break;
13275   case 1:
13276     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
13277       return false;
13278     // Otherwise we have r+r or r+i.
13279     break;
13280   case 2:
13281     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
13282       return false;
13283     // Allow 2*r as r+r.
13284     break;
13285   default:
13286     // No other scales are supported.
13287     return false;
13288   }
13289 
13290   return true;
13291 }
13292 
13293 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
13294                                            SelectionDAG &DAG) const {
13295   MachineFunction &MF = DAG.getMachineFunction();
13296   MachineFrameInfo &MFI = MF.getFrameInfo();
13297   MFI.setReturnAddressIsTaken(true);
13298 
13299   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
13300     return SDValue();
13301 
13302   SDLoc dl(Op);
13303   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
13304 
13305   // Make sure the function does not optimize away the store of the RA to
13306   // the stack.
13307   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
13308   FuncInfo->setLRStoreRequired();
13309   bool isPPC64 = Subtarget.isPPC64();
13310   auto PtrVT = getPointerTy(MF.getDataLayout());
13311 
13312   if (Depth > 0) {
13313     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
13314     SDValue Offset =
13315         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
13316                         isPPC64 ? MVT::i64 : MVT::i32);
13317     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
13318                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
13319                        MachinePointerInfo());
13320   }
13321 
13322   // Just load the return address off the stack.
13323   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
13324   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
13325                      MachinePointerInfo());
13326 }
13327 
13328 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
13329                                           SelectionDAG &DAG) const {
13330   SDLoc dl(Op);
13331   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
13332 
13333   MachineFunction &MF = DAG.getMachineFunction();
13334   MachineFrameInfo &MFI = MF.getFrameInfo();
13335   MFI.setFrameAddressIsTaken(true);
13336 
13337   EVT PtrVT = getPointerTy(MF.getDataLayout());
13338   bool isPPC64 = PtrVT == MVT::i64;
13339 
13340   // Naked functions never have a frame pointer, and so we use r1. For all
13341   // other functions, this decision must be delayed until during PEI.
13342   unsigned FrameReg;
13343   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
13344     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
13345   else
13346     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
13347 
13348   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
13349                                          PtrVT);
13350   while (Depth--)
13351     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
13352                             FrameAddr, MachinePointerInfo());
13353   return FrameAddr;
13354 }
13355 
13356 // FIXME? Maybe this could be a TableGen attribute on some registers and
13357 // this table could be generated automatically from RegInfo.
13358 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT,
13359                                               SelectionDAG &DAG) const {
13360   bool isPPC64 = Subtarget.isPPC64();
13361   bool isDarwinABI = Subtarget.isDarwinABI();
13362 
13363   if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) ||
13364       (!isPPC64 && VT != MVT::i32))
13365     report_fatal_error("Invalid register global variable type");
13366 
13367   bool is64Bit = isPPC64 && VT == MVT::i64;
13368   unsigned Reg = StringSwitch<unsigned>(RegName)
13369                    .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
13370                    .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2)
13371                    .Case("r13", (!isPPC64 && isDarwinABI) ? 0 :
13372                                   (is64Bit ? PPC::X13 : PPC::R13))
13373                    .Default(0);
13374 
13375   if (Reg)
13376     return Reg;
13377   report_fatal_error("Invalid register name global variable");
13378 }
13379 
13380 bool
13381 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
13382   // The PowerPC target isn't yet aware of offsets.
13383   return false;
13384 }
13385 
13386 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
13387                                            const CallInst &I,
13388                                            MachineFunction &MF,
13389                                            unsigned Intrinsic) const {
13390   switch (Intrinsic) {
13391   case Intrinsic::ppc_qpx_qvlfd:
13392   case Intrinsic::ppc_qpx_qvlfs:
13393   case Intrinsic::ppc_qpx_qvlfcd:
13394   case Intrinsic::ppc_qpx_qvlfcs:
13395   case Intrinsic::ppc_qpx_qvlfiwa:
13396   case Intrinsic::ppc_qpx_qvlfiwz:
13397   case Intrinsic::ppc_altivec_lvx:
13398   case Intrinsic::ppc_altivec_lvxl:
13399   case Intrinsic::ppc_altivec_lvebx:
13400   case Intrinsic::ppc_altivec_lvehx:
13401   case Intrinsic::ppc_altivec_lvewx:
13402   case Intrinsic::ppc_vsx_lxvd2x:
13403   case Intrinsic::ppc_vsx_lxvw4x: {
13404     EVT VT;
13405     switch (Intrinsic) {
13406     case Intrinsic::ppc_altivec_lvebx:
13407       VT = MVT::i8;
13408       break;
13409     case Intrinsic::ppc_altivec_lvehx:
13410       VT = MVT::i16;
13411       break;
13412     case Intrinsic::ppc_altivec_lvewx:
13413       VT = MVT::i32;
13414       break;
13415     case Intrinsic::ppc_vsx_lxvd2x:
13416       VT = MVT::v2f64;
13417       break;
13418     case Intrinsic::ppc_qpx_qvlfd:
13419       VT = MVT::v4f64;
13420       break;
13421     case Intrinsic::ppc_qpx_qvlfs:
13422       VT = MVT::v4f32;
13423       break;
13424     case Intrinsic::ppc_qpx_qvlfcd:
13425       VT = MVT::v2f64;
13426       break;
13427     case Intrinsic::ppc_qpx_qvlfcs:
13428       VT = MVT::v2f32;
13429       break;
13430     default:
13431       VT = MVT::v4i32;
13432       break;
13433     }
13434 
13435     Info.opc = ISD::INTRINSIC_W_CHAIN;
13436     Info.memVT = VT;
13437     Info.ptrVal = I.getArgOperand(0);
13438     Info.offset = -VT.getStoreSize()+1;
13439     Info.size = 2*VT.getStoreSize()-1;
13440     Info.align = 1;
13441     Info.flags = MachineMemOperand::MOLoad;
13442     return true;
13443   }
13444   case Intrinsic::ppc_qpx_qvlfda:
13445   case Intrinsic::ppc_qpx_qvlfsa:
13446   case Intrinsic::ppc_qpx_qvlfcda:
13447   case Intrinsic::ppc_qpx_qvlfcsa:
13448   case Intrinsic::ppc_qpx_qvlfiwaa:
13449   case Intrinsic::ppc_qpx_qvlfiwza: {
13450     EVT VT;
13451     switch (Intrinsic) {
13452     case Intrinsic::ppc_qpx_qvlfda:
13453       VT = MVT::v4f64;
13454       break;
13455     case Intrinsic::ppc_qpx_qvlfsa:
13456       VT = MVT::v4f32;
13457       break;
13458     case Intrinsic::ppc_qpx_qvlfcda:
13459       VT = MVT::v2f64;
13460       break;
13461     case Intrinsic::ppc_qpx_qvlfcsa:
13462       VT = MVT::v2f32;
13463       break;
13464     default:
13465       VT = MVT::v4i32;
13466       break;
13467     }
13468 
13469     Info.opc = ISD::INTRINSIC_W_CHAIN;
13470     Info.memVT = VT;
13471     Info.ptrVal = I.getArgOperand(0);
13472     Info.offset = 0;
13473     Info.size = VT.getStoreSize();
13474     Info.align = 1;
13475     Info.flags = MachineMemOperand::MOLoad;
13476     return true;
13477   }
13478   case Intrinsic::ppc_qpx_qvstfd:
13479   case Intrinsic::ppc_qpx_qvstfs:
13480   case Intrinsic::ppc_qpx_qvstfcd:
13481   case Intrinsic::ppc_qpx_qvstfcs:
13482   case Intrinsic::ppc_qpx_qvstfiw:
13483   case Intrinsic::ppc_altivec_stvx:
13484   case Intrinsic::ppc_altivec_stvxl:
13485   case Intrinsic::ppc_altivec_stvebx:
13486   case Intrinsic::ppc_altivec_stvehx:
13487   case Intrinsic::ppc_altivec_stvewx:
13488   case Intrinsic::ppc_vsx_stxvd2x:
13489   case Intrinsic::ppc_vsx_stxvw4x: {
13490     EVT VT;
13491     switch (Intrinsic) {
13492     case Intrinsic::ppc_altivec_stvebx:
13493       VT = MVT::i8;
13494       break;
13495     case Intrinsic::ppc_altivec_stvehx:
13496       VT = MVT::i16;
13497       break;
13498     case Intrinsic::ppc_altivec_stvewx:
13499       VT = MVT::i32;
13500       break;
13501     case Intrinsic::ppc_vsx_stxvd2x:
13502       VT = MVT::v2f64;
13503       break;
13504     case Intrinsic::ppc_qpx_qvstfd:
13505       VT = MVT::v4f64;
13506       break;
13507     case Intrinsic::ppc_qpx_qvstfs:
13508       VT = MVT::v4f32;
13509       break;
13510     case Intrinsic::ppc_qpx_qvstfcd:
13511       VT = MVT::v2f64;
13512       break;
13513     case Intrinsic::ppc_qpx_qvstfcs:
13514       VT = MVT::v2f32;
13515       break;
13516     default:
13517       VT = MVT::v4i32;
13518       break;
13519     }
13520 
13521     Info.opc = ISD::INTRINSIC_VOID;
13522     Info.memVT = VT;
13523     Info.ptrVal = I.getArgOperand(1);
13524     Info.offset = -VT.getStoreSize()+1;
13525     Info.size = 2*VT.getStoreSize()-1;
13526     Info.align = 1;
13527     Info.flags = MachineMemOperand::MOStore;
13528     return true;
13529   }
13530   case Intrinsic::ppc_qpx_qvstfda:
13531   case Intrinsic::ppc_qpx_qvstfsa:
13532   case Intrinsic::ppc_qpx_qvstfcda:
13533   case Intrinsic::ppc_qpx_qvstfcsa:
13534   case Intrinsic::ppc_qpx_qvstfiwa: {
13535     EVT VT;
13536     switch (Intrinsic) {
13537     case Intrinsic::ppc_qpx_qvstfda:
13538       VT = MVT::v4f64;
13539       break;
13540     case Intrinsic::ppc_qpx_qvstfsa:
13541       VT = MVT::v4f32;
13542       break;
13543     case Intrinsic::ppc_qpx_qvstfcda:
13544       VT = MVT::v2f64;
13545       break;
13546     case Intrinsic::ppc_qpx_qvstfcsa:
13547       VT = MVT::v2f32;
13548       break;
13549     default:
13550       VT = MVT::v4i32;
13551       break;
13552     }
13553 
13554     Info.opc = ISD::INTRINSIC_VOID;
13555     Info.memVT = VT;
13556     Info.ptrVal = I.getArgOperand(1);
13557     Info.offset = 0;
13558     Info.size = VT.getStoreSize();
13559     Info.align = 1;
13560     Info.flags = MachineMemOperand::MOStore;
13561     return true;
13562   }
13563   default:
13564     break;
13565   }
13566 
13567   return false;
13568 }
13569 
13570 /// getOptimalMemOpType - Returns the target specific optimal type for load
13571 /// and store operations as a result of memset, memcpy, and memmove
13572 /// lowering. If DstAlign is zero that means it's safe to destination
13573 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
13574 /// means there isn't a need to check it against alignment requirement,
13575 /// probably because the source does not need to be loaded. If 'IsMemset' is
13576 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
13577 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
13578 /// source is constant so it does not need to be loaded.
13579 /// It returns EVT::Other if the type should be determined using generic
13580 /// target-independent logic.
13581 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
13582                                            unsigned DstAlign, unsigned SrcAlign,
13583                                            bool IsMemset, bool ZeroMemset,
13584                                            bool MemcpyStrSrc,
13585                                            MachineFunction &MF) const {
13586   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
13587     const Function &F = MF.getFunction();
13588     // When expanding a memset, require at least two QPX instructions to cover
13589     // the cost of loading the value to be stored from the constant pool.
13590     if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
13591        (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
13592         !F.hasFnAttribute(Attribute::NoImplicitFloat)) {
13593       return MVT::v4f64;
13594     }
13595 
13596     // We should use Altivec/VSX loads and stores when available. For unaligned
13597     // addresses, unaligned VSX loads are only fast starting with the P8.
13598     if (Subtarget.hasAltivec() && Size >= 16 &&
13599         (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
13600          ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
13601       return MVT::v4i32;
13602   }
13603 
13604   if (Subtarget.isPPC64()) {
13605     return MVT::i64;
13606   }
13607 
13608   return MVT::i32;
13609 }
13610 
13611 /// \brief Returns true if it is beneficial to convert a load of a constant
13612 /// to just the constant itself.
13613 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13614                                                           Type *Ty) const {
13615   assert(Ty->isIntegerTy());
13616 
13617   unsigned BitSize = Ty->getPrimitiveSizeInBits();
13618   return !(BitSize == 0 || BitSize > 64);
13619 }
13620 
13621 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
13622   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
13623     return false;
13624   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
13625   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
13626   return NumBits1 == 64 && NumBits2 == 32;
13627 }
13628 
13629 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
13630   if (!VT1.isInteger() || !VT2.isInteger())
13631     return false;
13632   unsigned NumBits1 = VT1.getSizeInBits();
13633   unsigned NumBits2 = VT2.getSizeInBits();
13634   return NumBits1 == 64 && NumBits2 == 32;
13635 }
13636 
13637 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
13638   // Generally speaking, zexts are not free, but they are free when they can be
13639   // folded with other operations.
13640   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
13641     EVT MemVT = LD->getMemoryVT();
13642     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
13643          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
13644         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
13645          LD->getExtensionType() == ISD::ZEXTLOAD))
13646       return true;
13647   }
13648 
13649   // FIXME: Add other cases...
13650   //  - 32-bit shifts with a zext to i64
13651   //  - zext after ctlz, bswap, etc.
13652   //  - zext after and by a constant mask
13653 
13654   return TargetLowering::isZExtFree(Val, VT2);
13655 }
13656 
13657 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
13658   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
13659          "invalid fpext types");
13660   return true;
13661 }
13662 
13663 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
13664   return isInt<16>(Imm) || isUInt<16>(Imm);
13665 }
13666 
13667 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
13668   return isInt<16>(Imm) || isUInt<16>(Imm);
13669 }
13670 
13671 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
13672                                                        unsigned,
13673                                                        unsigned,
13674                                                        bool *Fast) const {
13675   if (DisablePPCUnaligned)
13676     return false;
13677 
13678   // PowerPC supports unaligned memory access for simple non-vector types.
13679   // Although accessing unaligned addresses is not as efficient as accessing
13680   // aligned addresses, it is generally more efficient than manual expansion,
13681   // and generally only traps for software emulation when crossing page
13682   // boundaries.
13683 
13684   if (!VT.isSimple())
13685     return false;
13686 
13687   if (VT.getSimpleVT().isVector()) {
13688     if (Subtarget.hasVSX()) {
13689       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
13690           VT != MVT::v4f32 && VT != MVT::v4i32)
13691         return false;
13692     } else {
13693       return false;
13694     }
13695   }
13696 
13697   if (VT == MVT::ppcf128)
13698     return false;
13699 
13700   if (Fast)
13701     *Fast = true;
13702 
13703   return true;
13704 }
13705 
13706 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
13707   VT = VT.getScalarType();
13708 
13709   if (!VT.isSimple())
13710     return false;
13711 
13712   switch (VT.getSimpleVT().SimpleTy) {
13713   case MVT::f32:
13714   case MVT::f64:
13715     return true;
13716   default:
13717     break;
13718   }
13719 
13720   return false;
13721 }
13722 
13723 const MCPhysReg *
13724 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
13725   // LR is a callee-save register, but we must treat it as clobbered by any call
13726   // site. Hence we include LR in the scratch registers, which are in turn added
13727   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
13728   // to CTR, which is used by any indirect call.
13729   static const MCPhysReg ScratchRegs[] = {
13730     PPC::X12, PPC::LR8, PPC::CTR8, 0
13731   };
13732 
13733   return ScratchRegs;
13734 }
13735 
13736 unsigned PPCTargetLowering::getExceptionPointerRegister(
13737     const Constant *PersonalityFn) const {
13738   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
13739 }
13740 
13741 unsigned PPCTargetLowering::getExceptionSelectorRegister(
13742     const Constant *PersonalityFn) const {
13743   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
13744 }
13745 
13746 bool
13747 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
13748                      EVT VT , unsigned DefinedValues) const {
13749   if (VT == MVT::v2i64)
13750     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
13751 
13752   if (Subtarget.hasVSX() || Subtarget.hasQPX())
13753     return true;
13754 
13755   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
13756 }
13757 
13758 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
13759   if (DisableILPPref || Subtarget.enableMachineScheduler())
13760     return TargetLowering::getSchedulingPreference(N);
13761 
13762   return Sched::ILP;
13763 }
13764 
13765 // Create a fast isel object.
13766 FastISel *
13767 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
13768                                   const TargetLibraryInfo *LibInfo) const {
13769   return PPC::createFastISel(FuncInfo, LibInfo);
13770 }
13771 
13772 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
13773   if (Subtarget.isDarwinABI()) return;
13774   if (!Subtarget.isPPC64()) return;
13775 
13776   // Update IsSplitCSR in PPCFunctionInfo
13777   PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>();
13778   PFI->setIsSplitCSR(true);
13779 }
13780 
13781 void PPCTargetLowering::insertCopiesSplitCSR(
13782   MachineBasicBlock *Entry,
13783   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
13784   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
13785   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
13786   if (!IStart)
13787     return;
13788 
13789   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
13790   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
13791   MachineBasicBlock::iterator MBBI = Entry->begin();
13792   for (const MCPhysReg *I = IStart; *I; ++I) {
13793     const TargetRegisterClass *RC = nullptr;
13794     if (PPC::G8RCRegClass.contains(*I))
13795       RC = &PPC::G8RCRegClass;
13796     else if (PPC::F8RCRegClass.contains(*I))
13797       RC = &PPC::F8RCRegClass;
13798     else if (PPC::CRRCRegClass.contains(*I))
13799       RC = &PPC::CRRCRegClass;
13800     else if (PPC::VRRCRegClass.contains(*I))
13801       RC = &PPC::VRRCRegClass;
13802     else
13803       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
13804 
13805     unsigned NewVR = MRI->createVirtualRegister(RC);
13806     // Create copy from CSR to a virtual register.
13807     // FIXME: this currently does not emit CFI pseudo-instructions, it works
13808     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
13809     // nounwind. If we want to generalize this later, we may need to emit
13810     // CFI pseudo-instructions.
13811     assert(Entry->getParent()->getFunction().hasFnAttribute(
13812              Attribute::NoUnwind) &&
13813            "Function should be nounwind in insertCopiesSplitCSR!");
13814     Entry->addLiveIn(*I);
13815     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
13816       .addReg(*I);
13817 
13818     // Insert the copy-back instructions right before the terminator
13819     for (auto *Exit : Exits)
13820       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
13821               TII->get(TargetOpcode::COPY), *I)
13822         .addReg(NewVR);
13823   }
13824 }
13825 
13826 // Override to enable LOAD_STACK_GUARD lowering on Linux.
13827 bool PPCTargetLowering::useLoadStackGuardNode() const {
13828   if (!Subtarget.isTargetLinux())
13829     return TargetLowering::useLoadStackGuardNode();
13830   return true;
13831 }
13832 
13833 // Override to disable global variable loading on Linux.
13834 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
13835   if (!Subtarget.isTargetLinux())
13836     return TargetLowering::insertSSPDeclarations(M);
13837 }
13838 
13839 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
13840   if (!VT.isSimple() || !Subtarget.hasVSX())
13841     return false;
13842 
13843   switch(VT.getSimpleVT().SimpleTy) {
13844   default:
13845     // For FP types that are currently not supported by PPC backend, return
13846     // false. Examples: f16, f80.
13847     return false;
13848   case MVT::f32:
13849   case MVT::f64:
13850   case MVT::ppcf128:
13851     return Imm.isPosZero();
13852   }
13853 }
13854 
13855 // For vector shift operation op, fold
13856 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
13857 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
13858                                   SelectionDAG &DAG) {
13859   SDValue N0 = N->getOperand(0);
13860   SDValue N1 = N->getOperand(1);
13861   EVT VT = N0.getValueType();
13862   unsigned OpSizeInBits = VT.getScalarSizeInBits();
13863   unsigned Opcode = N->getOpcode();
13864   unsigned TargetOpcode;
13865 
13866   switch (Opcode) {
13867   default:
13868     llvm_unreachable("Unexpected shift operation");
13869   case ISD::SHL:
13870     TargetOpcode = PPCISD::SHL;
13871     break;
13872   case ISD::SRL:
13873     TargetOpcode = PPCISD::SRL;
13874     break;
13875   case ISD::SRA:
13876     TargetOpcode = PPCISD::SRA;
13877     break;
13878   }
13879 
13880   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
13881       N1->getOpcode() == ISD::AND)
13882     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
13883       if (Mask->getZExtValue() == OpSizeInBits - 1)
13884         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
13885 
13886   return SDValue();
13887 }
13888 
13889 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
13890   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
13891     return Value;
13892 
13893   return SDValue();
13894 }
13895 
13896 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
13897   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
13898     return Value;
13899 
13900   return SDValue();
13901 }
13902 
13903 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
13904   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
13905     return Value;
13906 
13907   return SDValue();
13908 }
13909 
13910 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
13911   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
13912   if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64())
13913     return false;
13914 
13915   // If not a tail call then no need to proceed.
13916   if (!CI->isTailCall())
13917     return false;
13918 
13919   // If tail calls are disabled for the caller then we are done.
13920   const Function *Caller = CI->getParent()->getParent();
13921   auto Attr = Caller->getFnAttribute("disable-tail-calls");
13922   if (Attr.getValueAsString() == "true")
13923     return false;
13924 
13925   // If sibling calls have been disabled and tail-calls aren't guaranteed
13926   // there is no reason to duplicate.
13927   auto &TM = getTargetMachine();
13928   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
13929     return false;
13930 
13931   // Can't tail call a function called indirectly, or if it has variadic args.
13932   const Function *Callee = CI->getCalledFunction();
13933   if (!Callee || Callee->isVarArg())
13934     return false;
13935 
13936   // Make sure the callee and caller calling conventions are eligible for tco.
13937   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
13938                                            CI->getCallingConv()))
13939       return false;
13940 
13941   // If the function is local then we have a good chance at tail-calling it
13942   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
13943 }
13944