1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallSite.h"
59 #include "llvm/IR/CallingConv.h"
60 #include "llvm/IR/Constant.h"
61 #include "llvm/IR/Constants.h"
62 #include "llvm/IR/DataLayout.h"
63 #include "llvm/IR/DebugLoc.h"
64 #include "llvm/IR/DerivedTypes.h"
65 #include "llvm/IR/Function.h"
66 #include "llvm/IR/GlobalValue.h"
67 #include "llvm/IR/IRBuilder.h"
68 #include "llvm/IR/Instructions.h"
69 #include "llvm/IR/Intrinsics.h"
70 #include "llvm/IR/IntrinsicsPowerPC.h"
71 #include "llvm/IR/Module.h"
72 #include "llvm/IR/Type.h"
73 #include "llvm/IR/Use.h"
74 #include "llvm/IR/Value.h"
75 #include "llvm/MC/MCContext.h"
76 #include "llvm/MC/MCExpr.h"
77 #include "llvm/MC/MCRegisterInfo.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <list>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "ppc-lowering"
105 
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 
121 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision",
122 cl::desc("enable quad precision float support on ppc"), cl::Hidden);
123 
124 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
125 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
126 
127 STATISTIC(NumTailCalls, "Number of tail calls");
128 STATISTIC(NumSiblingCalls, "Number of sibling calls");
129 
130 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
131 
132 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
133 
134 // FIXME: Remove this once the bug has been fixed!
135 extern cl::opt<bool> ANDIGlueBug;
136 
137 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
138                                      const PPCSubtarget &STI)
139     : TargetLowering(TM), Subtarget(STI) {
140   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
141   // arguments are at least 4/8 bytes aligned.
142   bool isPPC64 = Subtarget.isPPC64();
143   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
144 
145   // Set up the register classes.
146   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
147   if (!useSoftFloat()) {
148     if (hasSPE()) {
149       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
150       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
151     } else {
152       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
153       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
154     }
155   }
156 
157   // Match BITREVERSE to customized fast code sequence in the td file.
158   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
159   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
160 
161   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
162   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
163 
164   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
165   for (MVT VT : MVT::integer_valuetypes()) {
166     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
167     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
168   }
169 
170   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
171 
172   // PowerPC has pre-inc load and store's.
173   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
174   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
175   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
176   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
177   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
178   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
179   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
180   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
181   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
182   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
183   if (!Subtarget.hasSPE()) {
184     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
185     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
186     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
187     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
188   }
189 
190   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
191   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
192   for (MVT VT : ScalarIntVTs) {
193     setOperationAction(ISD::ADDC, VT, Legal);
194     setOperationAction(ISD::ADDE, VT, Legal);
195     setOperationAction(ISD::SUBC, VT, Legal);
196     setOperationAction(ISD::SUBE, VT, Legal);
197   }
198 
199   if (Subtarget.useCRBits()) {
200     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
201 
202     if (isPPC64 || Subtarget.hasFPCVT()) {
203       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
204       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
205                          isPPC64 ? MVT::i64 : MVT::i32);
206       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
207       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
208                         isPPC64 ? MVT::i64 : MVT::i32);
209     } else {
210       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
211       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
212     }
213 
214     // PowerPC does not support direct load/store of condition registers.
215     setOperationAction(ISD::LOAD, MVT::i1, Custom);
216     setOperationAction(ISD::STORE, MVT::i1, Custom);
217 
218     // FIXME: Remove this once the ANDI glue bug is fixed:
219     if (ANDIGlueBug)
220       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
221 
222     for (MVT VT : MVT::integer_valuetypes()) {
223       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
224       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
225       setTruncStoreAction(VT, MVT::i1, Expand);
226     }
227 
228     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
229   }
230 
231   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
232   // PPC (the libcall is not available).
233   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
234   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
235 
236   // We do not currently implement these libm ops for PowerPC.
237   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
238   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
239   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
240   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
241   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
242   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
243 
244   // PowerPC has no SREM/UREM instructions unless we are on P9
245   // On P9 we may use a hardware instruction to compute the remainder.
246   // The instructions are not legalized directly because in the cases where the
247   // result of both the remainder and the division is required it is more
248   // efficient to compute the remainder from the result of the division rather
249   // than use the remainder instruction.
250   if (Subtarget.isISA3_0()) {
251     setOperationAction(ISD::SREM, MVT::i32, Custom);
252     setOperationAction(ISD::UREM, MVT::i32, Custom);
253     setOperationAction(ISD::SREM, MVT::i64, Custom);
254     setOperationAction(ISD::UREM, MVT::i64, Custom);
255   } else {
256     setOperationAction(ISD::SREM, MVT::i32, Expand);
257     setOperationAction(ISD::UREM, MVT::i32, Expand);
258     setOperationAction(ISD::SREM, MVT::i64, Expand);
259     setOperationAction(ISD::UREM, MVT::i64, Expand);
260   }
261 
262   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
263   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
264   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
265   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
266   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
267   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
268   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
269   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
270   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
271 
272   // Handle constrained floating-point operations of scalar.
273   // TODO: Handle SPE specific operation.
274   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
275   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
276   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
277   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
278 
279   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
280   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
281   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
282   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
283 
284   // We don't support sin/cos/sqrt/fmod/pow
285   setOperationAction(ISD::FSIN , MVT::f64, Expand);
286   setOperationAction(ISD::FCOS , MVT::f64, Expand);
287   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
288   setOperationAction(ISD::FREM , MVT::f64, Expand);
289   setOperationAction(ISD::FPOW , MVT::f64, Expand);
290   setOperationAction(ISD::FSIN , MVT::f32, Expand);
291   setOperationAction(ISD::FCOS , MVT::f32, Expand);
292   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
293   setOperationAction(ISD::FREM , MVT::f32, Expand);
294   setOperationAction(ISD::FPOW , MVT::f32, Expand);
295   if (Subtarget.hasSPE()) {
296     setOperationAction(ISD::FMA  , MVT::f64, Expand);
297     setOperationAction(ISD::FMA  , MVT::f32, Expand);
298   } else {
299     setOperationAction(ISD::FMA  , MVT::f64, Legal);
300     setOperationAction(ISD::FMA  , MVT::f32, Legal);
301   }
302 
303   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
304 
305   // If we're enabling GP optimizations, use hardware square root
306   if (!Subtarget.hasFSQRT() &&
307       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
308         Subtarget.hasFRE()))
309     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
310 
311   if (!Subtarget.hasFSQRT() &&
312       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
313         Subtarget.hasFRES()))
314     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
315 
316   if (Subtarget.hasFCPSGN()) {
317     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
318     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
319   } else {
320     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
321     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
322   }
323 
324   if (Subtarget.hasFPRND()) {
325     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
326     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
327     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
328     setOperationAction(ISD::FROUND, MVT::f64, Legal);
329 
330     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
331     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
332     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
333     setOperationAction(ISD::FROUND, MVT::f32, Legal);
334   }
335 
336   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
337   // to speed up scalar BSWAP64.
338   // CTPOP or CTTZ were introduced in P8/P9 respectively
339   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
340   if (Subtarget.hasP9Vector())
341     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
342   else
343     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
344   if (Subtarget.isISA3_0()) {
345     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
346     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
347   } else {
348     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
349     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
350   }
351 
352   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
353     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
354     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
355   } else {
356     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
357     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
358   }
359 
360   // PowerPC does not have ROTR
361   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
362   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
363 
364   if (!Subtarget.useCRBits()) {
365     // PowerPC does not have Select
366     setOperationAction(ISD::SELECT, MVT::i32, Expand);
367     setOperationAction(ISD::SELECT, MVT::i64, Expand);
368     setOperationAction(ISD::SELECT, MVT::f32, Expand);
369     setOperationAction(ISD::SELECT, MVT::f64, Expand);
370   }
371 
372   // PowerPC wants to turn select_cc of FP into fsel when possible.
373   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
374   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
375 
376   // PowerPC wants to optimize integer setcc a bit
377   if (!Subtarget.useCRBits())
378     setOperationAction(ISD::SETCC, MVT::i32, Custom);
379 
380   // PowerPC does not have BRCOND which requires SetCC
381   if (!Subtarget.useCRBits())
382     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
383 
384   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
385 
386   if (Subtarget.hasSPE()) {
387     // SPE has built-in conversions
388     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
389     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
390     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
391   } else {
392     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
393     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
394 
395     // PowerPC does not have [U|S]INT_TO_FP
396     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
397     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
398   }
399 
400   if (Subtarget.hasDirectMove() && isPPC64) {
401     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
402     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
403     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
404     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
405     if (TM.Options.UnsafeFPMath) {
406       setOperationAction(ISD::LRINT, MVT::f64, Legal);
407       setOperationAction(ISD::LRINT, MVT::f32, Legal);
408       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
409       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
410       setOperationAction(ISD::LROUND, MVT::f64, Legal);
411       setOperationAction(ISD::LROUND, MVT::f32, Legal);
412       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
413       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
414     }
415   } else {
416     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
417     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
418     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
419     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
420   }
421 
422   // We cannot sextinreg(i1).  Expand to shifts.
423   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
424 
425   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
426   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
427   // support continuation, user-level threading, and etc.. As a result, no
428   // other SjLj exception interfaces are implemented and please don't build
429   // your own exception handling based on them.
430   // LLVM/Clang supports zero-cost DWARF exception handling.
431   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
432   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
433 
434   // We want to legalize GlobalAddress and ConstantPool nodes into the
435   // appropriate instructions to materialize the address.
436   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
437   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
438   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
439   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
440   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
441   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
442   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
443   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
444   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
445   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
446 
447   // TRAP is legal.
448   setOperationAction(ISD::TRAP, MVT::Other, Legal);
449 
450   // TRAMPOLINE is custom lowered.
451   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
452   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
453 
454   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
455   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
456 
457   if (Subtarget.is64BitELFABI()) {
458     // VAARG always uses double-word chunks, so promote anything smaller.
459     setOperationAction(ISD::VAARG, MVT::i1, Promote);
460     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
461     setOperationAction(ISD::VAARG, MVT::i8, Promote);
462     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
463     setOperationAction(ISD::VAARG, MVT::i16, Promote);
464     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
465     setOperationAction(ISD::VAARG, MVT::i32, Promote);
466     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
467     setOperationAction(ISD::VAARG, MVT::Other, Expand);
468   } else if (Subtarget.is32BitELFABI()) {
469     // VAARG is custom lowered with the 32-bit SVR4 ABI.
470     setOperationAction(ISD::VAARG, MVT::Other, Custom);
471     setOperationAction(ISD::VAARG, MVT::i64, Custom);
472   } else
473     setOperationAction(ISD::VAARG, MVT::Other, Expand);
474 
475   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
476   if (Subtarget.is32BitELFABI())
477     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
478   else
479     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
480 
481   // Use the default implementation.
482   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
483   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
484   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
485   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
486   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
487   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
488   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
489   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
490   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
491 
492   // We want to custom lower some of our intrinsics.
493   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
494 
495   // To handle counter-based loop conditions.
496   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
497 
498   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
499   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
500   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
501   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
502 
503   // Comparisons that require checking two conditions.
504   if (Subtarget.hasSPE()) {
505     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
506     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
507     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
508     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
509   }
510   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
511   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
512   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
513   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
514   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
515   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
516   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
517   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
518   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
519   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
520   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
521   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
522 
523   if (Subtarget.has64BitSupport()) {
524     // They also have instructions for converting between i64 and fp.
525     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
526     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
527     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
528     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
529     // This is just the low 32 bits of a (signed) fp->i64 conversion.
530     // We cannot do this with Promote because i64 is not a legal type.
531     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
532 
533     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
534       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
535   } else {
536     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
537     if (Subtarget.hasSPE())
538       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
539     else
540       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
541   }
542 
543   // With the instructions enabled under FPCVT, we can do everything.
544   if (Subtarget.hasFPCVT()) {
545     if (Subtarget.has64BitSupport()) {
546       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
547       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
548       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
549       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
550     }
551 
552     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
553     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
554     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
555     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
556   }
557 
558   if (Subtarget.use64BitRegs()) {
559     // 64-bit PowerPC implementations can support i64 types directly
560     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
561     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
562     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
563     // 64-bit PowerPC wants to expand i128 shifts itself.
564     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
565     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
566     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
567   } else {
568     // 32-bit PowerPC wants to expand i64 shifts itself.
569     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
570     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
571     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
572   }
573 
574   if (Subtarget.hasVSX()) {
575     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
576     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
577     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
578     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
579   }
580 
581   if (Subtarget.hasAltivec()) {
582     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
583       setOperationAction(ISD::SADDSAT, VT, Legal);
584       setOperationAction(ISD::SSUBSAT, VT, Legal);
585       setOperationAction(ISD::UADDSAT, VT, Legal);
586       setOperationAction(ISD::USUBSAT, VT, Legal);
587     }
588     // First set operation action for all vector types to expand. Then we
589     // will selectively turn on ones that can be effectively codegen'd.
590     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
591       // add/sub are legal for all supported vector VT's.
592       setOperationAction(ISD::ADD, VT, Legal);
593       setOperationAction(ISD::SUB, VT, Legal);
594 
595       // For v2i64, these are only valid with P8Vector. This is corrected after
596       // the loop.
597       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
598         setOperationAction(ISD::SMAX, VT, Legal);
599         setOperationAction(ISD::SMIN, VT, Legal);
600         setOperationAction(ISD::UMAX, VT, Legal);
601         setOperationAction(ISD::UMIN, VT, Legal);
602       }
603       else {
604         setOperationAction(ISD::SMAX, VT, Expand);
605         setOperationAction(ISD::SMIN, VT, Expand);
606         setOperationAction(ISD::UMAX, VT, Expand);
607         setOperationAction(ISD::UMIN, VT, Expand);
608       }
609 
610       if (Subtarget.hasVSX()) {
611         setOperationAction(ISD::FMAXNUM, VT, Legal);
612         setOperationAction(ISD::FMINNUM, VT, Legal);
613       }
614 
615       // Vector instructions introduced in P8
616       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
617         setOperationAction(ISD::CTPOP, VT, Legal);
618         setOperationAction(ISD::CTLZ, VT, Legal);
619       }
620       else {
621         setOperationAction(ISD::CTPOP, VT, Expand);
622         setOperationAction(ISD::CTLZ, VT, Expand);
623       }
624 
625       // Vector instructions introduced in P9
626       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
627         setOperationAction(ISD::CTTZ, VT, Legal);
628       else
629         setOperationAction(ISD::CTTZ, VT, Expand);
630 
631       // We promote all shuffles to v16i8.
632       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
633       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
634 
635       // We promote all non-typed operations to v4i32.
636       setOperationAction(ISD::AND   , VT, Promote);
637       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
638       setOperationAction(ISD::OR    , VT, Promote);
639       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
640       setOperationAction(ISD::XOR   , VT, Promote);
641       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
642       setOperationAction(ISD::LOAD  , VT, Promote);
643       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
644       setOperationAction(ISD::SELECT, VT, Promote);
645       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
646       setOperationAction(ISD::VSELECT, VT, Legal);
647       setOperationAction(ISD::SELECT_CC, VT, Promote);
648       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
649       setOperationAction(ISD::STORE, VT, Promote);
650       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
651 
652       // No other operations are legal.
653       setOperationAction(ISD::MUL , VT, Expand);
654       setOperationAction(ISD::SDIV, VT, Expand);
655       setOperationAction(ISD::SREM, VT, Expand);
656       setOperationAction(ISD::UDIV, VT, Expand);
657       setOperationAction(ISD::UREM, VT, Expand);
658       setOperationAction(ISD::FDIV, VT, Expand);
659       setOperationAction(ISD::FREM, VT, Expand);
660       setOperationAction(ISD::FNEG, VT, Expand);
661       setOperationAction(ISD::FSQRT, VT, Expand);
662       setOperationAction(ISD::FLOG, VT, Expand);
663       setOperationAction(ISD::FLOG10, VT, Expand);
664       setOperationAction(ISD::FLOG2, VT, Expand);
665       setOperationAction(ISD::FEXP, VT, Expand);
666       setOperationAction(ISD::FEXP2, VT, Expand);
667       setOperationAction(ISD::FSIN, VT, Expand);
668       setOperationAction(ISD::FCOS, VT, Expand);
669       setOperationAction(ISD::FABS, VT, Expand);
670       setOperationAction(ISD::FFLOOR, VT, Expand);
671       setOperationAction(ISD::FCEIL,  VT, Expand);
672       setOperationAction(ISD::FTRUNC, VT, Expand);
673       setOperationAction(ISD::FRINT,  VT, Expand);
674       setOperationAction(ISD::FNEARBYINT, VT, Expand);
675       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
676       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
677       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
678       setOperationAction(ISD::MULHU, VT, Expand);
679       setOperationAction(ISD::MULHS, VT, Expand);
680       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
681       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
682       setOperationAction(ISD::UDIVREM, VT, Expand);
683       setOperationAction(ISD::SDIVREM, VT, Expand);
684       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
685       setOperationAction(ISD::FPOW, VT, Expand);
686       setOperationAction(ISD::BSWAP, VT, Expand);
687       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
688       setOperationAction(ISD::ROTL, VT, Expand);
689       setOperationAction(ISD::ROTR, VT, Expand);
690 
691       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
692         setTruncStoreAction(VT, InnerVT, Expand);
693         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
694         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
695         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
696       }
697     }
698     if (!Subtarget.hasP8Vector()) {
699       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
700       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
701       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
702       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
703     }
704 
705     for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
706       setOperationAction(ISD::ABS, VT, Custom);
707 
708     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
709     // with merges, splats, etc.
710     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
711 
712     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
713     // are cheap, so handle them before they get expanded to scalar.
714     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
715     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
716     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
717     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
718     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
719 
720     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
721     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
722     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
723     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
724     setOperationAction(ISD::SELECT, MVT::v4i32,
725                        Subtarget.useCRBits() ? Legal : Expand);
726     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
727     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
728     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
729     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
730     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
731     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
732     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
733     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
734     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
735 
736     // Without hasP8Altivec set, v2i64 SMAX isn't available.
737     // But ABS custom lowering requires SMAX support.
738     if (!Subtarget.hasP8Altivec())
739       setOperationAction(ISD::ABS, MVT::v2i64, Expand);
740 
741     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
742     if (Subtarget.hasAltivec())
743       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
744         setOperationAction(ISD::ROTL, VT, Legal);
745     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
746     if (Subtarget.hasP8Altivec())
747       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
748 
749     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
750     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
751     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
752     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
753 
754     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
755     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
756 
757     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
758       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
759       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
760     }
761 
762     if (Subtarget.hasP8Altivec())
763       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
764     else
765       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
766 
767     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
768     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
769 
770     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
771     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
772 
773     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
774     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
775     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
776     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
777 
778     // Altivec does not contain unordered floating-point compare instructions
779     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
780     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
781     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
782     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
783 
784     if (Subtarget.hasVSX()) {
785       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
786       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
787       if (Subtarget.hasP8Vector()) {
788         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
789         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
790       }
791       if (Subtarget.hasDirectMove() && isPPC64) {
792         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
793         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
794         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
795         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
796         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
797         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
798         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
799         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
800       }
801       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
802 
803       // The nearbyint variants are not allowed to raise the inexact exception
804       // so we can only code-gen them with unsafe math.
805       if (TM.Options.UnsafeFPMath) {
806         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
807         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
808       }
809 
810       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
811       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
812       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
813       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
814       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
815       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
816       setOperationAction(ISD::FROUND, MVT::f64, Legal);
817       setOperationAction(ISD::FRINT, MVT::f64, Legal);
818 
819       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
820       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
821       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
822       setOperationAction(ISD::FROUND, MVT::f32, Legal);
823       setOperationAction(ISD::FRINT, MVT::f32, Legal);
824 
825       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
826       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
827 
828       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
829       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
830 
831       // Share the Altivec comparison restrictions.
832       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
833       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
834       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
835       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
836 
837       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
838       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
839 
840       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
841 
842       if (Subtarget.hasP8Vector())
843         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
844 
845       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
846 
847       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
848       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
849       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
850 
851       if (Subtarget.hasP8Altivec()) {
852         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
853         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
854         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
855 
856         // 128 bit shifts can be accomplished via 3 instructions for SHL and
857         // SRL, but not for SRA because of the instructions available:
858         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
859         // doing
860         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
861         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
862         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
863 
864         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
865       }
866       else {
867         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
868         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
869         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
870 
871         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
872 
873         // VSX v2i64 only supports non-arithmetic operations.
874         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
875         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
876       }
877 
878       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
879       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
880       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
881       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
882 
883       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
884 
885       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
886       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
887       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
888       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
889 
890       // Custom handling for partial vectors of integers converted to
891       // floating point. We already have optimal handling for v2i32 through
892       // the DAG combine, so those aren't necessary.
893       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
894       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
895       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
896       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
897       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
898       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
899       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
900       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
901 
902       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
903       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
904       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
905       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
906       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
907       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
908 
909       if (Subtarget.hasDirectMove())
910         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
911       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
912 
913       // Handle constrained floating-point operations of vector.
914       // The predictor is `hasVSX` because altivec instruction has
915       // no exception but VSX vector instruction has.
916       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
917       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
918       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
919       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
920 
921       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
922       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
923       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
924       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
925 
926       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
927     }
928 
929     if (Subtarget.hasP8Altivec()) {
930       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
931       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
932     }
933 
934     if (Subtarget.hasP9Vector()) {
935       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
936       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
937 
938       // 128 bit shifts can be accomplished via 3 instructions for SHL and
939       // SRL, but not for SRA because of the instructions available:
940       // VS{RL} and VS{RL}O.
941       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
942       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
943       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
944 
945       if (EnableQuadPrecision) {
946         addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
947         setOperationAction(ISD::FADD, MVT::f128, Legal);
948         setOperationAction(ISD::FSUB, MVT::f128, Legal);
949         setOperationAction(ISD::FDIV, MVT::f128, Legal);
950         setOperationAction(ISD::FMUL, MVT::f128, Legal);
951         setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
952         // No extending loads to f128 on PPC.
953         for (MVT FPT : MVT::fp_valuetypes())
954           setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
955         setOperationAction(ISD::FMA, MVT::f128, Legal);
956         setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
957         setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
958         setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
959         setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
960         setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
961         setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
962 
963         setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
964         setOperationAction(ISD::FRINT, MVT::f128, Legal);
965         setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
966         setOperationAction(ISD::FCEIL, MVT::f128, Legal);
967         setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
968         setOperationAction(ISD::FROUND, MVT::f128, Legal);
969 
970         setOperationAction(ISD::SELECT, MVT::f128, Expand);
971         setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
972         setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
973         setTruncStoreAction(MVT::f128, MVT::f64, Expand);
974         setTruncStoreAction(MVT::f128, MVT::f32, Expand);
975         setOperationAction(ISD::BITCAST, MVT::i128, Custom);
976         // No implementation for these ops for PowerPC.
977         setOperationAction(ISD::FSIN , MVT::f128, Expand);
978         setOperationAction(ISD::FCOS , MVT::f128, Expand);
979         setOperationAction(ISD::FPOW, MVT::f128, Expand);
980         setOperationAction(ISD::FPOWI, MVT::f128, Expand);
981         setOperationAction(ISD::FREM, MVT::f128, Expand);
982 
983         // Handle constrained floating-point operations of fp128
984         setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
985         setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
986         setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
987         setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
988       }
989       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
990       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
991       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
992       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
993       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
994     }
995 
996     if (Subtarget.hasP9Altivec()) {
997       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
998       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
999 
1000       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1001       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1002       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1003       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1004       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1005       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1006       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1007     }
1008   }
1009 
1010   if (Subtarget.hasQPX()) {
1011     setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1012     setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1013     setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1014     setOperationAction(ISD::FREM, MVT::v4f64, Expand);
1015 
1016     setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
1017     setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
1018 
1019     setOperationAction(ISD::LOAD  , MVT::v4f64, Custom);
1020     setOperationAction(ISD::STORE , MVT::v4f64, Custom);
1021 
1022     setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
1023     setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
1024 
1025     if (!Subtarget.useCRBits())
1026       setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
1027     setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
1028 
1029     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
1030     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
1031     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
1032     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
1033     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
1034     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
1035     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
1036 
1037     setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
1038     setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
1039 
1040     setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
1041     setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
1042 
1043     setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
1044     setOperationAction(ISD::FABS , MVT::v4f64, Legal);
1045     setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
1046     setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
1047     setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
1048     setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
1049     setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
1050     setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
1051     setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
1052     setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
1053 
1054     setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
1055     setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
1056 
1057     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
1058     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
1059 
1060     addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
1061 
1062     setOperationAction(ISD::FADD, MVT::v4f32, Legal);
1063     setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
1064     setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
1065     setOperationAction(ISD::FREM, MVT::v4f32, Expand);
1066 
1067     setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1068     setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
1069 
1070     setOperationAction(ISD::LOAD  , MVT::v4f32, Custom);
1071     setOperationAction(ISD::STORE , MVT::v4f32, Custom);
1072 
1073     if (!Subtarget.useCRBits())
1074       setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
1075     setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
1076 
1077     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
1078     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
1079     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
1080     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
1081     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
1082     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
1083     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
1084 
1085     setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
1086     setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
1087 
1088     setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
1089     setOperationAction(ISD::FABS , MVT::v4f32, Legal);
1090     setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
1091     setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
1092     setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
1093     setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
1094     setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
1095     setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
1096     setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
1097     setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
1098 
1099     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1100     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1101 
1102     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
1103     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
1104 
1105     addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
1106 
1107     setOperationAction(ISD::AND , MVT::v4i1, Legal);
1108     setOperationAction(ISD::OR , MVT::v4i1, Legal);
1109     setOperationAction(ISD::XOR , MVT::v4i1, Legal);
1110 
1111     if (!Subtarget.useCRBits())
1112       setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
1113     setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
1114 
1115     setOperationAction(ISD::LOAD  , MVT::v4i1, Custom);
1116     setOperationAction(ISD::STORE , MVT::v4i1, Custom);
1117 
1118     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
1119     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
1120     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
1121     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
1122     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
1123     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
1124     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1125 
1126     setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1127     setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1128 
1129     addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
1130 
1131     setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1132     setOperationAction(ISD::FCEIL,  MVT::v4f64, Legal);
1133     setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1134     setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
1135 
1136     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1137     setOperationAction(ISD::FCEIL,  MVT::v4f32, Legal);
1138     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1139     setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1140 
1141     setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1142     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1143 
1144     // These need to set FE_INEXACT, and so cannot be vectorized here.
1145     setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1146     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1147 
1148     if (TM.Options.UnsafeFPMath) {
1149       setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1150       setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1151 
1152       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1153       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1154     } else {
1155       setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1156       setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1157 
1158       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1159       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1160     }
1161 
1162     // TODO: Handle constrained floating-point operations of v4f64
1163   }
1164 
1165   if (Subtarget.has64BitSupport())
1166     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1167 
1168   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1169 
1170   if (!isPPC64) {
1171     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1172     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1173   }
1174 
1175   setBooleanContents(ZeroOrOneBooleanContent);
1176 
1177   if (Subtarget.hasAltivec()) {
1178     // Altivec instructions set fields to all zeros or all ones.
1179     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1180   }
1181 
1182   if (!isPPC64) {
1183     // These libcalls are not available in 32-bit.
1184     setLibcallName(RTLIB::SHL_I128, nullptr);
1185     setLibcallName(RTLIB::SRL_I128, nullptr);
1186     setLibcallName(RTLIB::SRA_I128, nullptr);
1187   }
1188 
1189   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1190 
1191   // We have target-specific dag combine patterns for the following nodes:
1192   setTargetDAGCombine(ISD::ADD);
1193   setTargetDAGCombine(ISD::SHL);
1194   setTargetDAGCombine(ISD::SRA);
1195   setTargetDAGCombine(ISD::SRL);
1196   setTargetDAGCombine(ISD::MUL);
1197   setTargetDAGCombine(ISD::SINT_TO_FP);
1198   setTargetDAGCombine(ISD::BUILD_VECTOR);
1199   if (Subtarget.hasFPCVT())
1200     setTargetDAGCombine(ISD::UINT_TO_FP);
1201   setTargetDAGCombine(ISD::LOAD);
1202   setTargetDAGCombine(ISD::STORE);
1203   setTargetDAGCombine(ISD::BR_CC);
1204   if (Subtarget.useCRBits())
1205     setTargetDAGCombine(ISD::BRCOND);
1206   setTargetDAGCombine(ISD::BSWAP);
1207   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1208   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1209   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1210 
1211   setTargetDAGCombine(ISD::SIGN_EXTEND);
1212   setTargetDAGCombine(ISD::ZERO_EXTEND);
1213   setTargetDAGCombine(ISD::ANY_EXTEND);
1214 
1215   setTargetDAGCombine(ISD::TRUNCATE);
1216   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1217 
1218 
1219   if (Subtarget.useCRBits()) {
1220     setTargetDAGCombine(ISD::TRUNCATE);
1221     setTargetDAGCombine(ISD::SETCC);
1222     setTargetDAGCombine(ISD::SELECT_CC);
1223   }
1224 
1225   // Use reciprocal estimates.
1226   if (TM.Options.UnsafeFPMath) {
1227     setTargetDAGCombine(ISD::FDIV);
1228     setTargetDAGCombine(ISD::FSQRT);
1229   }
1230 
1231   if (Subtarget.hasP9Altivec()) {
1232     setTargetDAGCombine(ISD::ABS);
1233     setTargetDAGCombine(ISD::VSELECT);
1234   }
1235 
1236   if (EnableQuadPrecision) {
1237     setLibcallName(RTLIB::LOG_F128, "logf128");
1238     setLibcallName(RTLIB::LOG2_F128, "log2f128");
1239     setLibcallName(RTLIB::LOG10_F128, "log10f128");
1240     setLibcallName(RTLIB::EXP_F128, "expf128");
1241     setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1242     setLibcallName(RTLIB::SIN_F128, "sinf128");
1243     setLibcallName(RTLIB::COS_F128, "cosf128");
1244     setLibcallName(RTLIB::POW_F128, "powf128");
1245     setLibcallName(RTLIB::FMIN_F128, "fminf128");
1246     setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1247     setLibcallName(RTLIB::POWI_F128, "__powikf2");
1248     setLibcallName(RTLIB::REM_F128, "fmodf128");
1249   }
1250 
1251   // With 32 condition bits, we don't need to sink (and duplicate) compares
1252   // aggressively in CodeGenPrep.
1253   if (Subtarget.useCRBits()) {
1254     setHasMultipleConditionRegisters();
1255     setJumpIsExpensive();
1256   }
1257 
1258   setMinFunctionAlignment(Align(4));
1259 
1260   switch (Subtarget.getCPUDirective()) {
1261   default: break;
1262   case PPC::DIR_970:
1263   case PPC::DIR_A2:
1264   case PPC::DIR_E500:
1265   case PPC::DIR_E500mc:
1266   case PPC::DIR_E5500:
1267   case PPC::DIR_PWR4:
1268   case PPC::DIR_PWR5:
1269   case PPC::DIR_PWR5X:
1270   case PPC::DIR_PWR6:
1271   case PPC::DIR_PWR6X:
1272   case PPC::DIR_PWR7:
1273   case PPC::DIR_PWR8:
1274   case PPC::DIR_PWR9:
1275   case PPC::DIR_PWR_FUTURE:
1276     setPrefLoopAlignment(Align(16));
1277     setPrefFunctionAlignment(Align(16));
1278     break;
1279   }
1280 
1281   if (Subtarget.enableMachineScheduler())
1282     setSchedulingPreference(Sched::Source);
1283   else
1284     setSchedulingPreference(Sched::Hybrid);
1285 
1286   computeRegisterProperties(STI.getRegisterInfo());
1287 
1288   // The Freescale cores do better with aggressive inlining of memcpy and
1289   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1290   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1291       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1292     MaxStoresPerMemset = 32;
1293     MaxStoresPerMemsetOptSize = 16;
1294     MaxStoresPerMemcpy = 32;
1295     MaxStoresPerMemcpyOptSize = 8;
1296     MaxStoresPerMemmove = 32;
1297     MaxStoresPerMemmoveOptSize = 8;
1298   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1299     // The A2 also benefits from (very) aggressive inlining of memcpy and
1300     // friends. The overhead of a the function call, even when warm, can be
1301     // over one hundred cycles.
1302     MaxStoresPerMemset = 128;
1303     MaxStoresPerMemcpy = 128;
1304     MaxStoresPerMemmove = 128;
1305     MaxLoadsPerMemcmp = 128;
1306   } else {
1307     MaxLoadsPerMemcmp = 8;
1308     MaxLoadsPerMemcmpOptSize = 4;
1309   }
1310 }
1311 
1312 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1313 /// the desired ByVal argument alignment.
1314 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
1315                              unsigned MaxMaxAlign) {
1316   if (MaxAlign == MaxMaxAlign)
1317     return;
1318   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1319     if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
1320       MaxAlign = 32;
1321     else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
1322       MaxAlign = 16;
1323   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1324     unsigned EltAlign = 0;
1325     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1326     if (EltAlign > MaxAlign)
1327       MaxAlign = EltAlign;
1328   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1329     for (auto *EltTy : STy->elements()) {
1330       unsigned EltAlign = 0;
1331       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1332       if (EltAlign > MaxAlign)
1333         MaxAlign = EltAlign;
1334       if (MaxAlign == MaxMaxAlign)
1335         break;
1336     }
1337   }
1338 }
1339 
1340 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1341 /// function arguments in the caller parameter area.
1342 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1343                                                   const DataLayout &DL) const {
1344   // 16byte and wider vectors are passed on 16byte boundary.
1345   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1346   unsigned Align = Subtarget.isPPC64() ? 8 : 4;
1347   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1348     getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
1349   return Align;
1350 }
1351 
1352 bool PPCTargetLowering::useSoftFloat() const {
1353   return Subtarget.useSoftFloat();
1354 }
1355 
1356 bool PPCTargetLowering::hasSPE() const {
1357   return Subtarget.hasSPE();
1358 }
1359 
1360 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1361   return VT.isScalarInteger();
1362 }
1363 
1364 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1365   switch ((PPCISD::NodeType)Opcode) {
1366   case PPCISD::FIRST_NUMBER:    break;
1367   case PPCISD::FSEL:            return "PPCISD::FSEL";
1368   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1369   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1370   case PPCISD::FCFID:           return "PPCISD::FCFID";
1371   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1372   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1373   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1374   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1375   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1376   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1377   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1378   case PPCISD::FP_TO_UINT_IN_VSR:
1379                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1380   case PPCISD::FP_TO_SINT_IN_VSR:
1381                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1382   case PPCISD::FRE:             return "PPCISD::FRE";
1383   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1384   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1385   case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
1386   case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
1387   case PPCISD::VPERM:           return "PPCISD::VPERM";
1388   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1389   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1390   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1391   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1392   case PPCISD::CMPB:            return "PPCISD::CMPB";
1393   case PPCISD::Hi:              return "PPCISD::Hi";
1394   case PPCISD::Lo:              return "PPCISD::Lo";
1395   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1396   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1397   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1398   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1399   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1400   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1401   case PPCISD::SRL:             return "PPCISD::SRL";
1402   case PPCISD::SRA:             return "PPCISD::SRA";
1403   case PPCISD::SHL:             return "PPCISD::SHL";
1404   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1405   case PPCISD::CALL:            return "PPCISD::CALL";
1406   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1407   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1408   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1409   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1410   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1411   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1412   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1413   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1414   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1415   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1416   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1417   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1418   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1419   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1420   case PPCISD::ANDI_rec_1_EQ_BIT:
1421     return "PPCISD::ANDI_rec_1_EQ_BIT";
1422   case PPCISD::ANDI_rec_1_GT_BIT:
1423     return "PPCISD::ANDI_rec_1_GT_BIT";
1424   case PPCISD::VCMP:            return "PPCISD::VCMP";
1425   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1426   case PPCISD::LBRX:            return "PPCISD::LBRX";
1427   case PPCISD::STBRX:           return "PPCISD::STBRX";
1428   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1429   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1430   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1431   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1432   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1433   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1434   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1435   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1436   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1437   case PPCISD::ST_VSR_SCAL_INT:
1438                                 return "PPCISD::ST_VSR_SCAL_INT";
1439   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1440   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1441   case PPCISD::BDZ:             return "PPCISD::BDZ";
1442   case PPCISD::MFFS:            return "PPCISD::MFFS";
1443   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1444   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1445   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1446   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1447   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1448   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1449   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1450   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1451   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1452   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1453   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1454   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1455   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1456   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1457   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1458   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1459   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1460   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1461   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1462   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1463   case PPCISD::SC:              return "PPCISD::SC";
1464   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1465   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1466   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1467   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1468   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1469   case PPCISD::VABSD:           return "PPCISD::VABSD";
1470   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
1471   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
1472   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
1473   case PPCISD::QVESPLATI:       return "PPCISD::QVESPLATI";
1474   case PPCISD::QBFLT:           return "PPCISD::QBFLT";
1475   case PPCISD::QVLFSb:          return "PPCISD::QVLFSb";
1476   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1477   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1478   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1479   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1480   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1481   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1482   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1483   }
1484   return nullptr;
1485 }
1486 
1487 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1488                                           EVT VT) const {
1489   if (!VT.isVector())
1490     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1491 
1492   if (Subtarget.hasQPX())
1493     return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1494 
1495   return VT.changeVectorElementTypeToInteger();
1496 }
1497 
1498 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1499   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1500   return true;
1501 }
1502 
1503 //===----------------------------------------------------------------------===//
1504 // Node matching predicates, for use by the tblgen matching code.
1505 //===----------------------------------------------------------------------===//
1506 
1507 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1508 static bool isFloatingPointZero(SDValue Op) {
1509   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1510     return CFP->getValueAPF().isZero();
1511   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1512     // Maybe this has already been legalized into the constant pool?
1513     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1514       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1515         return CFP->getValueAPF().isZero();
1516   }
1517   return false;
1518 }
1519 
1520 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1521 /// true if Op is undef or if it matches the specified value.
1522 static bool isConstantOrUndef(int Op, int Val) {
1523   return Op < 0 || Op == Val;
1524 }
1525 
1526 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1527 /// VPKUHUM instruction.
1528 /// The ShuffleKind distinguishes between big-endian operations with
1529 /// two different inputs (0), either-endian operations with two identical
1530 /// inputs (1), and little-endian operations with two different inputs (2).
1531 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1532 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1533                                SelectionDAG &DAG) {
1534   bool IsLE = DAG.getDataLayout().isLittleEndian();
1535   if (ShuffleKind == 0) {
1536     if (IsLE)
1537       return false;
1538     for (unsigned i = 0; i != 16; ++i)
1539       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1540         return false;
1541   } else if (ShuffleKind == 2) {
1542     if (!IsLE)
1543       return false;
1544     for (unsigned i = 0; i != 16; ++i)
1545       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1546         return false;
1547   } else if (ShuffleKind == 1) {
1548     unsigned j = IsLE ? 0 : 1;
1549     for (unsigned i = 0; i != 8; ++i)
1550       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1551           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1552         return false;
1553   }
1554   return true;
1555 }
1556 
1557 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1558 /// VPKUWUM instruction.
1559 /// The ShuffleKind distinguishes between big-endian operations with
1560 /// two different inputs (0), either-endian operations with two identical
1561 /// inputs (1), and little-endian operations with two different inputs (2).
1562 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1563 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1564                                SelectionDAG &DAG) {
1565   bool IsLE = DAG.getDataLayout().isLittleEndian();
1566   if (ShuffleKind == 0) {
1567     if (IsLE)
1568       return false;
1569     for (unsigned i = 0; i != 16; i += 2)
1570       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1571           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1572         return false;
1573   } else if (ShuffleKind == 2) {
1574     if (!IsLE)
1575       return false;
1576     for (unsigned i = 0; i != 16; i += 2)
1577       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1578           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1579         return false;
1580   } else if (ShuffleKind == 1) {
1581     unsigned j = IsLE ? 0 : 2;
1582     for (unsigned i = 0; i != 8; i += 2)
1583       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1584           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1585           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1586           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1587         return false;
1588   }
1589   return true;
1590 }
1591 
1592 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1593 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1594 /// current subtarget.
1595 ///
1596 /// The ShuffleKind distinguishes between big-endian operations with
1597 /// two different inputs (0), either-endian operations with two identical
1598 /// inputs (1), and little-endian operations with two different inputs (2).
1599 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1600 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1601                                SelectionDAG &DAG) {
1602   const PPCSubtarget& Subtarget =
1603       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1604   if (!Subtarget.hasP8Vector())
1605     return false;
1606 
1607   bool IsLE = DAG.getDataLayout().isLittleEndian();
1608   if (ShuffleKind == 0) {
1609     if (IsLE)
1610       return false;
1611     for (unsigned i = 0; i != 16; i += 4)
1612       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1613           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1614           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1615           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1616         return false;
1617   } else if (ShuffleKind == 2) {
1618     if (!IsLE)
1619       return false;
1620     for (unsigned i = 0; i != 16; i += 4)
1621       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1622           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1623           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1624           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1625         return false;
1626   } else if (ShuffleKind == 1) {
1627     unsigned j = IsLE ? 0 : 4;
1628     for (unsigned i = 0; i != 8; i += 4)
1629       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1630           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1631           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1632           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1633           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1634           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1635           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1636           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1637         return false;
1638   }
1639   return true;
1640 }
1641 
1642 /// isVMerge - Common function, used to match vmrg* shuffles.
1643 ///
1644 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1645                      unsigned LHSStart, unsigned RHSStart) {
1646   if (N->getValueType(0) != MVT::v16i8)
1647     return false;
1648   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1649          "Unsupported merge size!");
1650 
1651   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1652     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1653       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1654                              LHSStart+j+i*UnitSize) ||
1655           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1656                              RHSStart+j+i*UnitSize))
1657         return false;
1658     }
1659   return true;
1660 }
1661 
1662 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1663 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1664 /// The ShuffleKind distinguishes between big-endian merges with two
1665 /// different inputs (0), either-endian merges with two identical inputs (1),
1666 /// and little-endian merges with two different inputs (2).  For the latter,
1667 /// the input operands are swapped (see PPCInstrAltivec.td).
1668 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1669                              unsigned ShuffleKind, SelectionDAG &DAG) {
1670   if (DAG.getDataLayout().isLittleEndian()) {
1671     if (ShuffleKind == 1) // unary
1672       return isVMerge(N, UnitSize, 0, 0);
1673     else if (ShuffleKind == 2) // swapped
1674       return isVMerge(N, UnitSize, 0, 16);
1675     else
1676       return false;
1677   } else {
1678     if (ShuffleKind == 1) // unary
1679       return isVMerge(N, UnitSize, 8, 8);
1680     else if (ShuffleKind == 0) // normal
1681       return isVMerge(N, UnitSize, 8, 24);
1682     else
1683       return false;
1684   }
1685 }
1686 
1687 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1688 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1689 /// The ShuffleKind distinguishes between big-endian merges with two
1690 /// different inputs (0), either-endian merges with two identical inputs (1),
1691 /// and little-endian merges with two different inputs (2).  For the latter,
1692 /// the input operands are swapped (see PPCInstrAltivec.td).
1693 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1694                              unsigned ShuffleKind, SelectionDAG &DAG) {
1695   if (DAG.getDataLayout().isLittleEndian()) {
1696     if (ShuffleKind == 1) // unary
1697       return isVMerge(N, UnitSize, 8, 8);
1698     else if (ShuffleKind == 2) // swapped
1699       return isVMerge(N, UnitSize, 8, 24);
1700     else
1701       return false;
1702   } else {
1703     if (ShuffleKind == 1) // unary
1704       return isVMerge(N, UnitSize, 0, 0);
1705     else if (ShuffleKind == 0) // normal
1706       return isVMerge(N, UnitSize, 0, 16);
1707     else
1708       return false;
1709   }
1710 }
1711 
1712 /**
1713  * Common function used to match vmrgew and vmrgow shuffles
1714  *
1715  * The indexOffset determines whether to look for even or odd words in
1716  * the shuffle mask. This is based on the of the endianness of the target
1717  * machine.
1718  *   - Little Endian:
1719  *     - Use offset of 0 to check for odd elements
1720  *     - Use offset of 4 to check for even elements
1721  *   - Big Endian:
1722  *     - Use offset of 0 to check for even elements
1723  *     - Use offset of 4 to check for odd elements
1724  * A detailed description of the vector element ordering for little endian and
1725  * big endian can be found at
1726  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1727  * Targeting your applications - what little endian and big endian IBM XL C/C++
1728  * compiler differences mean to you
1729  *
1730  * The mask to the shuffle vector instruction specifies the indices of the
1731  * elements from the two input vectors to place in the result. The elements are
1732  * numbered in array-access order, starting with the first vector. These vectors
1733  * are always of type v16i8, thus each vector will contain 16 elements of size
1734  * 8. More info on the shuffle vector can be found in the
1735  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1736  * Language Reference.
1737  *
1738  * The RHSStartValue indicates whether the same input vectors are used (unary)
1739  * or two different input vectors are used, based on the following:
1740  *   - If the instruction uses the same vector for both inputs, the range of the
1741  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1742  *     be 0.
1743  *   - If the instruction has two different vectors then the range of the
1744  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1745  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1746  *     to 31 specify elements in the second vector).
1747  *
1748  * \param[in] N The shuffle vector SD Node to analyze
1749  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1750  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1751  * vector to the shuffle_vector instruction
1752  * \return true iff this shuffle vector represents an even or odd word merge
1753  */
1754 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1755                      unsigned RHSStartValue) {
1756   if (N->getValueType(0) != MVT::v16i8)
1757     return false;
1758 
1759   for (unsigned i = 0; i < 2; ++i)
1760     for (unsigned j = 0; j < 4; ++j)
1761       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1762                              i*RHSStartValue+j+IndexOffset) ||
1763           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1764                              i*RHSStartValue+j+IndexOffset+8))
1765         return false;
1766   return true;
1767 }
1768 
1769 /**
1770  * Determine if the specified shuffle mask is suitable for the vmrgew or
1771  * vmrgow instructions.
1772  *
1773  * \param[in] N The shuffle vector SD Node to analyze
1774  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1775  * \param[in] ShuffleKind Identify the type of merge:
1776  *   - 0 = big-endian merge with two different inputs;
1777  *   - 1 = either-endian merge with two identical inputs;
1778  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1779  *     little-endian merges).
1780  * \param[in] DAG The current SelectionDAG
1781  * \return true iff this shuffle mask
1782  */
1783 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1784                               unsigned ShuffleKind, SelectionDAG &DAG) {
1785   if (DAG.getDataLayout().isLittleEndian()) {
1786     unsigned indexOffset = CheckEven ? 4 : 0;
1787     if (ShuffleKind == 1) // Unary
1788       return isVMerge(N, indexOffset, 0);
1789     else if (ShuffleKind == 2) // swapped
1790       return isVMerge(N, indexOffset, 16);
1791     else
1792       return false;
1793   }
1794   else {
1795     unsigned indexOffset = CheckEven ? 0 : 4;
1796     if (ShuffleKind == 1) // Unary
1797       return isVMerge(N, indexOffset, 0);
1798     else if (ShuffleKind == 0) // Normal
1799       return isVMerge(N, indexOffset, 16);
1800     else
1801       return false;
1802   }
1803   return false;
1804 }
1805 
1806 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1807 /// amount, otherwise return -1.
1808 /// The ShuffleKind distinguishes between big-endian operations with two
1809 /// different inputs (0), either-endian operations with two identical inputs
1810 /// (1), and little-endian operations with two different inputs (2).  For the
1811 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1812 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1813                              SelectionDAG &DAG) {
1814   if (N->getValueType(0) != MVT::v16i8)
1815     return -1;
1816 
1817   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1818 
1819   // Find the first non-undef value in the shuffle mask.
1820   unsigned i;
1821   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1822     /*search*/;
1823 
1824   if (i == 16) return -1;  // all undef.
1825 
1826   // Otherwise, check to see if the rest of the elements are consecutively
1827   // numbered from this value.
1828   unsigned ShiftAmt = SVOp->getMaskElt(i);
1829   if (ShiftAmt < i) return -1;
1830 
1831   ShiftAmt -= i;
1832   bool isLE = DAG.getDataLayout().isLittleEndian();
1833 
1834   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1835     // Check the rest of the elements to see if they are consecutive.
1836     for (++i; i != 16; ++i)
1837       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1838         return -1;
1839   } else if (ShuffleKind == 1) {
1840     // Check the rest of the elements to see if they are consecutive.
1841     for (++i; i != 16; ++i)
1842       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1843         return -1;
1844   } else
1845     return -1;
1846 
1847   if (isLE)
1848     ShiftAmt = 16 - ShiftAmt;
1849 
1850   return ShiftAmt;
1851 }
1852 
1853 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1854 /// specifies a splat of a single element that is suitable for input to
1855 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1856 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1857   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1858          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1859 
1860   // The consecutive indices need to specify an element, not part of two
1861   // different elements.  So abandon ship early if this isn't the case.
1862   if (N->getMaskElt(0) % EltSize != 0)
1863     return false;
1864 
1865   // This is a splat operation if each element of the permute is the same, and
1866   // if the value doesn't reference the second vector.
1867   unsigned ElementBase = N->getMaskElt(0);
1868 
1869   // FIXME: Handle UNDEF elements too!
1870   if (ElementBase >= 16)
1871     return false;
1872 
1873   // Check that the indices are consecutive, in the case of a multi-byte element
1874   // splatted with a v16i8 mask.
1875   for (unsigned i = 1; i != EltSize; ++i)
1876     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1877       return false;
1878 
1879   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1880     if (N->getMaskElt(i) < 0) continue;
1881     for (unsigned j = 0; j != EltSize; ++j)
1882       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1883         return false;
1884   }
1885   return true;
1886 }
1887 
1888 /// Check that the mask is shuffling N byte elements. Within each N byte
1889 /// element of the mask, the indices could be either in increasing or
1890 /// decreasing order as long as they are consecutive.
1891 /// \param[in] N the shuffle vector SD Node to analyze
1892 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1893 /// Word/DoubleWord/QuadWord).
1894 /// \param[in] StepLen the delta indices number among the N byte element, if
1895 /// the mask is in increasing/decreasing order then it is 1/-1.
1896 /// \return true iff the mask is shuffling N byte elements.
1897 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1898                                    int StepLen) {
1899   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1900          "Unexpected element width.");
1901   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1902 
1903   unsigned NumOfElem = 16 / Width;
1904   unsigned MaskVal[16]; //  Width is never greater than 16
1905   for (unsigned i = 0; i < NumOfElem; ++i) {
1906     MaskVal[0] = N->getMaskElt(i * Width);
1907     if ((StepLen == 1) && (MaskVal[0] % Width)) {
1908       return false;
1909     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1910       return false;
1911     }
1912 
1913     for (unsigned int j = 1; j < Width; ++j) {
1914       MaskVal[j] = N->getMaskElt(i * Width + j);
1915       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1916         return false;
1917       }
1918     }
1919   }
1920 
1921   return true;
1922 }
1923 
1924 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1925                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1926   if (!isNByteElemShuffleMask(N, 4, 1))
1927     return false;
1928 
1929   // Now we look at mask elements 0,4,8,12
1930   unsigned M0 = N->getMaskElt(0) / 4;
1931   unsigned M1 = N->getMaskElt(4) / 4;
1932   unsigned M2 = N->getMaskElt(8) / 4;
1933   unsigned M3 = N->getMaskElt(12) / 4;
1934   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1935   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1936 
1937   // Below, let H and L be arbitrary elements of the shuffle mask
1938   // where H is in the range [4,7] and L is in the range [0,3].
1939   // H, 1, 2, 3 or L, 5, 6, 7
1940   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1941       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1942     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1943     InsertAtByte = IsLE ? 12 : 0;
1944     Swap = M0 < 4;
1945     return true;
1946   }
1947   // 0, H, 2, 3 or 4, L, 6, 7
1948   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1949       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1950     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1951     InsertAtByte = IsLE ? 8 : 4;
1952     Swap = M1 < 4;
1953     return true;
1954   }
1955   // 0, 1, H, 3 or 4, 5, L, 7
1956   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1957       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1958     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1959     InsertAtByte = IsLE ? 4 : 8;
1960     Swap = M2 < 4;
1961     return true;
1962   }
1963   // 0, 1, 2, H or 4, 5, 6, L
1964   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1965       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1966     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1967     InsertAtByte = IsLE ? 0 : 12;
1968     Swap = M3 < 4;
1969     return true;
1970   }
1971 
1972   // If both vector operands for the shuffle are the same vector, the mask will
1973   // contain only elements from the first one and the second one will be undef.
1974   if (N->getOperand(1).isUndef()) {
1975     ShiftElts = 0;
1976     Swap = true;
1977     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1978     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1979       InsertAtByte = IsLE ? 12 : 0;
1980       return true;
1981     }
1982     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1983       InsertAtByte = IsLE ? 8 : 4;
1984       return true;
1985     }
1986     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1987       InsertAtByte = IsLE ? 4 : 8;
1988       return true;
1989     }
1990     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1991       InsertAtByte = IsLE ? 0 : 12;
1992       return true;
1993     }
1994   }
1995 
1996   return false;
1997 }
1998 
1999 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2000                                bool &Swap, bool IsLE) {
2001   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2002   // Ensure each byte index of the word is consecutive.
2003   if (!isNByteElemShuffleMask(N, 4, 1))
2004     return false;
2005 
2006   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2007   unsigned M0 = N->getMaskElt(0) / 4;
2008   unsigned M1 = N->getMaskElt(4) / 4;
2009   unsigned M2 = N->getMaskElt(8) / 4;
2010   unsigned M3 = N->getMaskElt(12) / 4;
2011 
2012   // If both vector operands for the shuffle are the same vector, the mask will
2013   // contain only elements from the first one and the second one will be undef.
2014   if (N->getOperand(1).isUndef()) {
2015     assert(M0 < 4 && "Indexing into an undef vector?");
2016     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2017       return false;
2018 
2019     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2020     Swap = false;
2021     return true;
2022   }
2023 
2024   // Ensure each word index of the ShuffleVector Mask is consecutive.
2025   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2026     return false;
2027 
2028   if (IsLE) {
2029     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2030       // Input vectors don't need to be swapped if the leading element
2031       // of the result is one of the 3 left elements of the second vector
2032       // (or if there is no shift to be done at all).
2033       Swap = false;
2034       ShiftElts = (8 - M0) % 8;
2035     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2036       // Input vectors need to be swapped if the leading element
2037       // of the result is one of the 3 left elements of the first vector
2038       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2039       Swap = true;
2040       ShiftElts = (4 - M0) % 4;
2041     }
2042 
2043     return true;
2044   } else {                                          // BE
2045     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2046       // Input vectors don't need to be swapped if the leading element
2047       // of the result is one of the 4 elements of the first vector.
2048       Swap = false;
2049       ShiftElts = M0;
2050     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2051       // Input vectors need to be swapped if the leading element
2052       // of the result is one of the 4 elements of the right vector.
2053       Swap = true;
2054       ShiftElts = M0 - 4;
2055     }
2056 
2057     return true;
2058   }
2059 }
2060 
2061 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2062   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2063 
2064   if (!isNByteElemShuffleMask(N, Width, -1))
2065     return false;
2066 
2067   for (int i = 0; i < 16; i += Width)
2068     if (N->getMaskElt(i) != i + Width - 1)
2069       return false;
2070 
2071   return true;
2072 }
2073 
2074 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2075   return isXXBRShuffleMaskHelper(N, 2);
2076 }
2077 
2078 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2079   return isXXBRShuffleMaskHelper(N, 4);
2080 }
2081 
2082 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2083   return isXXBRShuffleMaskHelper(N, 8);
2084 }
2085 
2086 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2087   return isXXBRShuffleMaskHelper(N, 16);
2088 }
2089 
2090 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2091 /// if the inputs to the instruction should be swapped and set \p DM to the
2092 /// value for the immediate.
2093 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2094 /// AND element 0 of the result comes from the first input (LE) or second input
2095 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2096 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2097 /// mask.
2098 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2099                                bool &Swap, bool IsLE) {
2100   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2101 
2102   // Ensure each byte index of the double word is consecutive.
2103   if (!isNByteElemShuffleMask(N, 8, 1))
2104     return false;
2105 
2106   unsigned M0 = N->getMaskElt(0) / 8;
2107   unsigned M1 = N->getMaskElt(8) / 8;
2108   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2109 
2110   // If both vector operands for the shuffle are the same vector, the mask will
2111   // contain only elements from the first one and the second one will be undef.
2112   if (N->getOperand(1).isUndef()) {
2113     if ((M0 | M1) < 2) {
2114       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2115       Swap = false;
2116       return true;
2117     } else
2118       return false;
2119   }
2120 
2121   if (IsLE) {
2122     if (M0 > 1 && M1 < 2) {
2123       Swap = false;
2124     } else if (M0 < 2 && M1 > 1) {
2125       M0 = (M0 + 2) % 4;
2126       M1 = (M1 + 2) % 4;
2127       Swap = true;
2128     } else
2129       return false;
2130 
2131     // Note: if control flow comes here that means Swap is already set above
2132     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2133     return true;
2134   } else { // BE
2135     if (M0 < 2 && M1 > 1) {
2136       Swap = false;
2137     } else if (M0 > 1 && M1 < 2) {
2138       M0 = (M0 + 2) % 4;
2139       M1 = (M1 + 2) % 4;
2140       Swap = true;
2141     } else
2142       return false;
2143 
2144     // Note: if control flow comes here that means Swap is already set above
2145     DM = (M0 << 1) + (M1 & 1);
2146     return true;
2147   }
2148 }
2149 
2150 
2151 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2152 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2153 /// elements are counted from the left of the vector register).
2154 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2155                                          SelectionDAG &DAG) {
2156   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2157   assert(isSplatShuffleMask(SVOp, EltSize));
2158   if (DAG.getDataLayout().isLittleEndian())
2159     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2160   else
2161     return SVOp->getMaskElt(0) / EltSize;
2162 }
2163 
2164 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2165 /// by using a vspltis[bhw] instruction of the specified element size, return
2166 /// the constant being splatted.  The ByteSize field indicates the number of
2167 /// bytes of each element [124] -> [bhw].
2168 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2169   SDValue OpVal(nullptr, 0);
2170 
2171   // If ByteSize of the splat is bigger than the element size of the
2172   // build_vector, then we have a case where we are checking for a splat where
2173   // multiple elements of the buildvector are folded together into a single
2174   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2175   unsigned EltSize = 16/N->getNumOperands();
2176   if (EltSize < ByteSize) {
2177     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2178     SDValue UniquedVals[4];
2179     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2180 
2181     // See if all of the elements in the buildvector agree across.
2182     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2183       if (N->getOperand(i).isUndef()) continue;
2184       // If the element isn't a constant, bail fully out.
2185       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2186 
2187       if (!UniquedVals[i&(Multiple-1)].getNode())
2188         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2189       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2190         return SDValue();  // no match.
2191     }
2192 
2193     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2194     // either constant or undef values that are identical for each chunk.  See
2195     // if these chunks can form into a larger vspltis*.
2196 
2197     // Check to see if all of the leading entries are either 0 or -1.  If
2198     // neither, then this won't fit into the immediate field.
2199     bool LeadingZero = true;
2200     bool LeadingOnes = true;
2201     for (unsigned i = 0; i != Multiple-1; ++i) {
2202       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2203 
2204       LeadingZero &= isNullConstant(UniquedVals[i]);
2205       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2206     }
2207     // Finally, check the least significant entry.
2208     if (LeadingZero) {
2209       if (!UniquedVals[Multiple-1].getNode())
2210         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2211       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2212       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2213         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2214     }
2215     if (LeadingOnes) {
2216       if (!UniquedVals[Multiple-1].getNode())
2217         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2218       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2219       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2220         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2221     }
2222 
2223     return SDValue();
2224   }
2225 
2226   // Check to see if this buildvec has a single non-undef value in its elements.
2227   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2228     if (N->getOperand(i).isUndef()) continue;
2229     if (!OpVal.getNode())
2230       OpVal = N->getOperand(i);
2231     else if (OpVal != N->getOperand(i))
2232       return SDValue();
2233   }
2234 
2235   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2236 
2237   unsigned ValSizeInBytes = EltSize;
2238   uint64_t Value = 0;
2239   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2240     Value = CN->getZExtValue();
2241   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2242     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2243     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2244   }
2245 
2246   // If the splat value is larger than the element value, then we can never do
2247   // this splat.  The only case that we could fit the replicated bits into our
2248   // immediate field for would be zero, and we prefer to use vxor for it.
2249   if (ValSizeInBytes < ByteSize) return SDValue();
2250 
2251   // If the element value is larger than the splat value, check if it consists
2252   // of a repeated bit pattern of size ByteSize.
2253   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2254     return SDValue();
2255 
2256   // Properly sign extend the value.
2257   int MaskVal = SignExtend32(Value, ByteSize * 8);
2258 
2259   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2260   if (MaskVal == 0) return SDValue();
2261 
2262   // Finally, if this value fits in a 5 bit sext field, return it
2263   if (SignExtend32<5>(MaskVal) == MaskVal)
2264     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2265   return SDValue();
2266 }
2267 
2268 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2269 /// amount, otherwise return -1.
2270 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2271   EVT VT = N->getValueType(0);
2272   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2273     return -1;
2274 
2275   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2276 
2277   // Find the first non-undef value in the shuffle mask.
2278   unsigned i;
2279   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2280     /*search*/;
2281 
2282   if (i == 4) return -1;  // all undef.
2283 
2284   // Otherwise, check to see if the rest of the elements are consecutively
2285   // numbered from this value.
2286   unsigned ShiftAmt = SVOp->getMaskElt(i);
2287   if (ShiftAmt < i) return -1;
2288   ShiftAmt -= i;
2289 
2290   // Check the rest of the elements to see if they are consecutive.
2291   for (++i; i != 4; ++i)
2292     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2293       return -1;
2294 
2295   return ShiftAmt;
2296 }
2297 
2298 //===----------------------------------------------------------------------===//
2299 //  Addressing Mode Selection
2300 //===----------------------------------------------------------------------===//
2301 
2302 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2303 /// or 64-bit immediate, and if the value can be accurately represented as a
2304 /// sign extension from a 16-bit value.  If so, this returns true and the
2305 /// immediate.
2306 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2307   if (!isa<ConstantSDNode>(N))
2308     return false;
2309 
2310   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2311   if (N->getValueType(0) == MVT::i32)
2312     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2313   else
2314     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2315 }
2316 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2317   return isIntS16Immediate(Op.getNode(), Imm);
2318 }
2319 
2320 
2321 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2322 /// be represented as an indexed [r+r] operation.
2323 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2324                                                SDValue &Index,
2325                                                SelectionDAG &DAG) const {
2326   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2327       UI != E; ++UI) {
2328     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2329       if (Memop->getMemoryVT() == MVT::f64) {
2330           Base = N.getOperand(0);
2331           Index = N.getOperand(1);
2332           return true;
2333       }
2334     }
2335   }
2336   return false;
2337 }
2338 
2339 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2340 /// can be represented as an indexed [r+r] operation.  Returns false if it
2341 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2342 /// non-zero and N can be represented by a base register plus a signed 16-bit
2343 /// displacement, make a more precise judgement by checking (displacement % \p
2344 /// EncodingAlignment).
2345 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
2346                                             SDValue &Index, SelectionDAG &DAG,
2347                                             unsigned EncodingAlignment) const {
2348   int16_t imm = 0;
2349   if (N.getOpcode() == ISD::ADD) {
2350     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2351     // SPE load/store can only handle 8-bit offsets.
2352     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2353         return true;
2354     if (isIntS16Immediate(N.getOperand(1), imm) &&
2355         (!EncodingAlignment || !(imm % EncodingAlignment)))
2356       return false; // r+i
2357     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2358       return false;    // r+i
2359 
2360     Base = N.getOperand(0);
2361     Index = N.getOperand(1);
2362     return true;
2363   } else if (N.getOpcode() == ISD::OR) {
2364     if (isIntS16Immediate(N.getOperand(1), imm) &&
2365         (!EncodingAlignment || !(imm % EncodingAlignment)))
2366       return false; // r+i can fold it if we can.
2367 
2368     // If this is an or of disjoint bitfields, we can codegen this as an add
2369     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2370     // disjoint.
2371     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2372 
2373     if (LHSKnown.Zero.getBoolValue()) {
2374       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2375       // If all of the bits are known zero on the LHS or RHS, the add won't
2376       // carry.
2377       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2378         Base = N.getOperand(0);
2379         Index = N.getOperand(1);
2380         return true;
2381       }
2382     }
2383   }
2384 
2385   return false;
2386 }
2387 
2388 // If we happen to be doing an i64 load or store into a stack slot that has
2389 // less than a 4-byte alignment, then the frame-index elimination may need to
2390 // use an indexed load or store instruction (because the offset may not be a
2391 // multiple of 4). The extra register needed to hold the offset comes from the
2392 // register scavenger, and it is possible that the scavenger will need to use
2393 // an emergency spill slot. As a result, we need to make sure that a spill slot
2394 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2395 // stack slot.
2396 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2397   // FIXME: This does not handle the LWA case.
2398   if (VT != MVT::i64)
2399     return;
2400 
2401   // NOTE: We'll exclude negative FIs here, which come from argument
2402   // lowering, because there are no known test cases triggering this problem
2403   // using packed structures (or similar). We can remove this exclusion if
2404   // we find such a test case. The reason why this is so test-case driven is
2405   // because this entire 'fixup' is only to prevent crashes (from the
2406   // register scavenger) on not-really-valid inputs. For example, if we have:
2407   //   %a = alloca i1
2408   //   %b = bitcast i1* %a to i64*
2409   //   store i64* a, i64 b
2410   // then the store should really be marked as 'align 1', but is not. If it
2411   // were marked as 'align 1' then the indexed form would have been
2412   // instruction-selected initially, and the problem this 'fixup' is preventing
2413   // won't happen regardless.
2414   if (FrameIdx < 0)
2415     return;
2416 
2417   MachineFunction &MF = DAG.getMachineFunction();
2418   MachineFrameInfo &MFI = MF.getFrameInfo();
2419 
2420   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2421     return;
2422 
2423   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2424   FuncInfo->setHasNonRISpills();
2425 }
2426 
2427 /// Returns true if the address N can be represented by a base register plus
2428 /// a signed 16-bit displacement [r+imm], and if it is not better
2429 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2430 /// displacements that are multiples of that value.
2431 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
2432                                             SDValue &Base,
2433                                             SelectionDAG &DAG,
2434                                             unsigned EncodingAlignment) const {
2435   // FIXME dl should come from parent load or store, not from address
2436   SDLoc dl(N);
2437   // If this can be more profitably realized as r+r, fail.
2438   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2439     return false;
2440 
2441   if (N.getOpcode() == ISD::ADD) {
2442     int16_t imm = 0;
2443     if (isIntS16Immediate(N.getOperand(1), imm) &&
2444         (!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
2445       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2446       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2447         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2448         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2449       } else {
2450         Base = N.getOperand(0);
2451       }
2452       return true; // [r+i]
2453     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2454       // Match LOAD (ADD (X, Lo(G))).
2455       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2456              && "Cannot handle constant offsets yet!");
2457       Disp = N.getOperand(1).getOperand(0);  // The global address.
2458       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2459              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2460              Disp.getOpcode() == ISD::TargetConstantPool ||
2461              Disp.getOpcode() == ISD::TargetJumpTable);
2462       Base = N.getOperand(0);
2463       return true;  // [&g+r]
2464     }
2465   } else if (N.getOpcode() == ISD::OR) {
2466     int16_t imm = 0;
2467     if (isIntS16Immediate(N.getOperand(1), imm) &&
2468         (!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
2469       // If this is an or of disjoint bitfields, we can codegen this as an add
2470       // (for better address arithmetic) if the LHS and RHS of the OR are
2471       // provably disjoint.
2472       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2473 
2474       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2475         // If all of the bits are known zero on the LHS or RHS, the add won't
2476         // carry.
2477         if (FrameIndexSDNode *FI =
2478               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2479           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2480           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2481         } else {
2482           Base = N.getOperand(0);
2483         }
2484         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2485         return true;
2486       }
2487     }
2488   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2489     // Loading from a constant address.
2490 
2491     // If this address fits entirely in a 16-bit sext immediate field, codegen
2492     // this as "d, 0"
2493     int16_t Imm;
2494     if (isIntS16Immediate(CN, Imm) &&
2495         (!EncodingAlignment || (Imm % EncodingAlignment) == 0)) {
2496       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2497       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2498                              CN->getValueType(0));
2499       return true;
2500     }
2501 
2502     // Handle 32-bit sext immediates with LIS + addr mode.
2503     if ((CN->getValueType(0) == MVT::i32 ||
2504          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2505         (!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) {
2506       int Addr = (int)CN->getZExtValue();
2507 
2508       // Otherwise, break this down into an LIS + disp.
2509       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2510 
2511       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2512                                    MVT::i32);
2513       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2514       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2515       return true;
2516     }
2517   }
2518 
2519   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2520   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2521     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2522     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2523   } else
2524     Base = N;
2525   return true;      // [r+0]
2526 }
2527 
2528 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2529 /// represented as an indexed [r+r] operation.
2530 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2531                                                 SDValue &Index,
2532                                                 SelectionDAG &DAG) const {
2533   // Check to see if we can easily represent this as an [r+r] address.  This
2534   // will fail if it thinks that the address is more profitably represented as
2535   // reg+imm, e.g. where imm = 0.
2536   if (SelectAddressRegReg(N, Base, Index, DAG))
2537     return true;
2538 
2539   // If the address is the result of an add, we will utilize the fact that the
2540   // address calculation includes an implicit add.  However, we can reduce
2541   // register pressure if we do not materialize a constant just for use as the
2542   // index register.  We only get rid of the add if it is not an add of a
2543   // value and a 16-bit signed constant and both have a single use.
2544   int16_t imm = 0;
2545   if (N.getOpcode() == ISD::ADD &&
2546       (!isIntS16Immediate(N.getOperand(1), imm) ||
2547        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2548     Base = N.getOperand(0);
2549     Index = N.getOperand(1);
2550     return true;
2551   }
2552 
2553   // Otherwise, do it the hard way, using R0 as the base register.
2554   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2555                          N.getValueType());
2556   Index = N;
2557   return true;
2558 }
2559 
2560 /// Returns true if we should use a direct load into vector instruction
2561 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2562 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2563 
2564   // If there are any other uses other than scalar to vector, then we should
2565   // keep it as a scalar load -> direct move pattern to prevent multiple
2566   // loads.
2567   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2568   if (!LD)
2569     return false;
2570 
2571   EVT MemVT = LD->getMemoryVT();
2572   if (!MemVT.isSimple())
2573     return false;
2574   switch(MemVT.getSimpleVT().SimpleTy) {
2575   case MVT::i64:
2576     break;
2577   case MVT::i32:
2578     if (!ST.hasP8Vector())
2579       return false;
2580     break;
2581   case MVT::i16:
2582   case MVT::i8:
2583     if (!ST.hasP9Vector())
2584       return false;
2585     break;
2586   default:
2587     return false;
2588   }
2589 
2590   SDValue LoadedVal(N, 0);
2591   if (!LoadedVal.hasOneUse())
2592     return false;
2593 
2594   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2595        UI != UE; ++UI)
2596     if (UI.getUse().get().getResNo() == 0 &&
2597         UI->getOpcode() != ISD::SCALAR_TO_VECTOR)
2598       return false;
2599 
2600   return true;
2601 }
2602 
2603 /// getPreIndexedAddressParts - returns true by value, base pointer and
2604 /// offset pointer and addressing mode by reference if the node's address
2605 /// can be legally represented as pre-indexed load / store address.
2606 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2607                                                   SDValue &Offset,
2608                                                   ISD::MemIndexedMode &AM,
2609                                                   SelectionDAG &DAG) const {
2610   if (DisablePPCPreinc) return false;
2611 
2612   bool isLoad = true;
2613   SDValue Ptr;
2614   EVT VT;
2615   unsigned Alignment;
2616   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2617     Ptr = LD->getBasePtr();
2618     VT = LD->getMemoryVT();
2619     Alignment = LD->getAlignment();
2620   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2621     Ptr = ST->getBasePtr();
2622     VT  = ST->getMemoryVT();
2623     Alignment = ST->getAlignment();
2624     isLoad = false;
2625   } else
2626     return false;
2627 
2628   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2629   // instructions because we can fold these into a more efficient instruction
2630   // instead, (such as LXSD).
2631   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2632     return false;
2633   }
2634 
2635   // PowerPC doesn't have preinc load/store instructions for vectors (except
2636   // for QPX, which does have preinc r+r forms).
2637   if (VT.isVector()) {
2638     if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2639       return false;
2640     } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2641       AM = ISD::PRE_INC;
2642       return true;
2643     }
2644   }
2645 
2646   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2647     // Common code will reject creating a pre-inc form if the base pointer
2648     // is a frame index, or if N is a store and the base pointer is either
2649     // the same as or a predecessor of the value being stored.  Check for
2650     // those situations here, and try with swapped Base/Offset instead.
2651     bool Swap = false;
2652 
2653     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2654       Swap = true;
2655     else if (!isLoad) {
2656       SDValue Val = cast<StoreSDNode>(N)->getValue();
2657       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2658         Swap = true;
2659     }
2660 
2661     if (Swap)
2662       std::swap(Base, Offset);
2663 
2664     AM = ISD::PRE_INC;
2665     return true;
2666   }
2667 
2668   // LDU/STU can only handle immediates that are a multiple of 4.
2669   if (VT != MVT::i64) {
2670     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0))
2671       return false;
2672   } else {
2673     // LDU/STU need an address with at least 4-byte alignment.
2674     if (Alignment < 4)
2675       return false;
2676 
2677     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4))
2678       return false;
2679   }
2680 
2681   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2682     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2683     // sext i32 to i64 when addr mode is r+i.
2684     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2685         LD->getExtensionType() == ISD::SEXTLOAD &&
2686         isa<ConstantSDNode>(Offset))
2687       return false;
2688   }
2689 
2690   AM = ISD::PRE_INC;
2691   return true;
2692 }
2693 
2694 //===----------------------------------------------------------------------===//
2695 //  LowerOperation implementation
2696 //===----------------------------------------------------------------------===//
2697 
2698 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2699 /// and LoOpFlags to the target MO flags.
2700 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2701                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2702                                const GlobalValue *GV = nullptr) {
2703   HiOpFlags = PPCII::MO_HA;
2704   LoOpFlags = PPCII::MO_LO;
2705 
2706   // Don't use the pic base if not in PIC relocation model.
2707   if (IsPIC) {
2708     HiOpFlags |= PPCII::MO_PIC_FLAG;
2709     LoOpFlags |= PPCII::MO_PIC_FLAG;
2710   }
2711 }
2712 
2713 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2714                              SelectionDAG &DAG) {
2715   SDLoc DL(HiPart);
2716   EVT PtrVT = HiPart.getValueType();
2717   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2718 
2719   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2720   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2721 
2722   // With PIC, the first instruction is actually "GR+hi(&G)".
2723   if (isPIC)
2724     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2725                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2726 
2727   // Generate non-pic code that has direct accesses to the constant pool.
2728   // The address of the global is just (hi(&g)+lo(&g)).
2729   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2730 }
2731 
2732 static void setUsesTOCBasePtr(MachineFunction &MF) {
2733   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2734   FuncInfo->setUsesTOCBasePtr();
2735 }
2736 
2737 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2738   setUsesTOCBasePtr(DAG.getMachineFunction());
2739 }
2740 
2741 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2742                                        SDValue GA) const {
2743   const bool Is64Bit = Subtarget.isPPC64();
2744   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2745   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2746                         : Subtarget.isAIXABI()
2747                               ? DAG.getRegister(PPC::R2, VT)
2748                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2749   SDValue Ops[] = { GA, Reg };
2750   return DAG.getMemIntrinsicNode(
2751       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2752       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2753       MachineMemOperand::MOLoad);
2754 }
2755 
2756 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2757                                              SelectionDAG &DAG) const {
2758   EVT PtrVT = Op.getValueType();
2759   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2760   const Constant *C = CP->getConstVal();
2761 
2762   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2763   // The actual address of the GlobalValue is stored in the TOC.
2764   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2765     setUsesTOCBasePtr(DAG);
2766     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
2767     return getTOCEntry(DAG, SDLoc(CP), GA);
2768   }
2769 
2770   unsigned MOHiFlag, MOLoFlag;
2771   bool IsPIC = isPositionIndependent();
2772   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2773 
2774   if (IsPIC && Subtarget.isSVR4ABI()) {
2775     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
2776                                            PPCII::MO_PIC_FLAG);
2777     return getTOCEntry(DAG, SDLoc(CP), GA);
2778   }
2779 
2780   SDValue CPIHi =
2781     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
2782   SDValue CPILo =
2783     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
2784   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2785 }
2786 
2787 // For 64-bit PowerPC, prefer the more compact relative encodings.
2788 // This trades 32 bits per jump table entry for one or two instructions
2789 // on the jump site.
2790 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2791   if (isJumpTableRelative())
2792     return MachineJumpTableInfo::EK_LabelDifference32;
2793 
2794   return TargetLowering::getJumpTableEncoding();
2795 }
2796 
2797 bool PPCTargetLowering::isJumpTableRelative() const {
2798   if (UseAbsoluteJumpTables)
2799     return false;
2800   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2801     return true;
2802   return TargetLowering::isJumpTableRelative();
2803 }
2804 
2805 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2806                                                     SelectionDAG &DAG) const {
2807   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2808     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2809 
2810   switch (getTargetMachine().getCodeModel()) {
2811   case CodeModel::Small:
2812   case CodeModel::Medium:
2813     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2814   default:
2815     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2816                        getPointerTy(DAG.getDataLayout()));
2817   }
2818 }
2819 
2820 const MCExpr *
2821 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2822                                                 unsigned JTI,
2823                                                 MCContext &Ctx) const {
2824   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2825     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2826 
2827   switch (getTargetMachine().getCodeModel()) {
2828   case CodeModel::Small:
2829   case CodeModel::Medium:
2830     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2831   default:
2832     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2833   }
2834 }
2835 
2836 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2837   EVT PtrVT = Op.getValueType();
2838   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2839 
2840   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2841   // The actual address of the GlobalValue is stored in the TOC.
2842   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2843     setUsesTOCBasePtr(DAG);
2844     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2845     return getTOCEntry(DAG, SDLoc(JT), GA);
2846   }
2847 
2848   unsigned MOHiFlag, MOLoFlag;
2849   bool IsPIC = isPositionIndependent();
2850   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2851 
2852   if (IsPIC && Subtarget.isSVR4ABI()) {
2853     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2854                                         PPCII::MO_PIC_FLAG);
2855     return getTOCEntry(DAG, SDLoc(GA), GA);
2856   }
2857 
2858   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2859   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2860   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2861 }
2862 
2863 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2864                                              SelectionDAG &DAG) const {
2865   EVT PtrVT = Op.getValueType();
2866   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2867   const BlockAddress *BA = BASDN->getBlockAddress();
2868 
2869   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2870   // The actual BlockAddress is stored in the TOC.
2871   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2872     setUsesTOCBasePtr(DAG);
2873     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2874     return getTOCEntry(DAG, SDLoc(BASDN), GA);
2875   }
2876 
2877   // 32-bit position-independent ELF stores the BlockAddress in the .got.
2878   if (Subtarget.is32BitELFABI() && isPositionIndependent())
2879     return getTOCEntry(
2880         DAG, SDLoc(BASDN),
2881         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
2882 
2883   unsigned MOHiFlag, MOLoFlag;
2884   bool IsPIC = isPositionIndependent();
2885   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2886   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2887   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2888   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2889 }
2890 
2891 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2892                                               SelectionDAG &DAG) const {
2893   // FIXME: TLS addresses currently use medium model code sequences,
2894   // which is the most useful form.  Eventually support for small and
2895   // large models could be added if users need it, at the cost of
2896   // additional complexity.
2897   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2898   if (DAG.getTarget().useEmulatedTLS())
2899     return LowerToTLSEmulatedModel(GA, DAG);
2900 
2901   SDLoc dl(GA);
2902   const GlobalValue *GV = GA->getGlobal();
2903   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2904   bool is64bit = Subtarget.isPPC64();
2905   const Module *M = DAG.getMachineFunction().getFunction().getParent();
2906   PICLevel::Level picLevel = M->getPICLevel();
2907 
2908   const TargetMachine &TM = getTargetMachine();
2909   TLSModel::Model Model = TM.getTLSModel(GV);
2910 
2911   if (Model == TLSModel::LocalExec) {
2912     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2913                                                PPCII::MO_TPREL_HA);
2914     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2915                                                PPCII::MO_TPREL_LO);
2916     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
2917                              : DAG.getRegister(PPC::R2, MVT::i32);
2918 
2919     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
2920     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
2921   }
2922 
2923   if (Model == TLSModel::InitialExec) {
2924     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2925     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2926                                                 PPCII::MO_TLS);
2927     SDValue GOTPtr;
2928     if (is64bit) {
2929       setUsesTOCBasePtr(DAG);
2930       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2931       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
2932                            PtrVT, GOTReg, TGA);
2933     } else {
2934       if (!TM.isPositionIndependent())
2935         GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
2936       else if (picLevel == PICLevel::SmallPIC)
2937         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2938       else
2939         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2940     }
2941     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
2942                                    PtrVT, TGA, GOTPtr);
2943     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
2944   }
2945 
2946   if (Model == TLSModel::GeneralDynamic) {
2947     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2948     SDValue GOTPtr;
2949     if (is64bit) {
2950       setUsesTOCBasePtr(DAG);
2951       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2952       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
2953                                    GOTReg, TGA);
2954     } else {
2955       if (picLevel == PICLevel::SmallPIC)
2956         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2957       else
2958         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2959     }
2960     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
2961                        GOTPtr, TGA, TGA);
2962   }
2963 
2964   if (Model == TLSModel::LocalDynamic) {
2965     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2966     SDValue GOTPtr;
2967     if (is64bit) {
2968       setUsesTOCBasePtr(DAG);
2969       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2970       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
2971                            GOTReg, TGA);
2972     } else {
2973       if (picLevel == PICLevel::SmallPIC)
2974         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2975       else
2976         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2977     }
2978     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
2979                                   PtrVT, GOTPtr, TGA, TGA);
2980     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
2981                                       PtrVT, TLSAddr, TGA);
2982     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
2983   }
2984 
2985   llvm_unreachable("Unknown TLS model!");
2986 }
2987 
2988 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
2989                                               SelectionDAG &DAG) const {
2990   EVT PtrVT = Op.getValueType();
2991   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
2992   SDLoc DL(GSDN);
2993   const GlobalValue *GV = GSDN->getGlobal();
2994 
2995   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
2996   // The actual address of the GlobalValue is stored in the TOC.
2997   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2998     setUsesTOCBasePtr(DAG);
2999     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3000     return getTOCEntry(DAG, DL, GA);
3001   }
3002 
3003   unsigned MOHiFlag, MOLoFlag;
3004   bool IsPIC = isPositionIndependent();
3005   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3006 
3007   if (IsPIC && Subtarget.isSVR4ABI()) {
3008     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3009                                             GSDN->getOffset(),
3010                                             PPCII::MO_PIC_FLAG);
3011     return getTOCEntry(DAG, DL, GA);
3012   }
3013 
3014   SDValue GAHi =
3015     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3016   SDValue GALo =
3017     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3018 
3019   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3020 }
3021 
3022 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3023   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3024   SDLoc dl(Op);
3025 
3026   if (Op.getValueType() == MVT::v2i64) {
3027     // When the operands themselves are v2i64 values, we need to do something
3028     // special because VSX has no underlying comparison operations for these.
3029     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3030       // Equality can be handled by casting to the legal type for Altivec
3031       // comparisons, everything else needs to be expanded.
3032       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3033         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3034                  DAG.getSetCC(dl, MVT::v4i32,
3035                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3036                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3037                    CC));
3038       }
3039 
3040       return SDValue();
3041     }
3042 
3043     // We handle most of these in the usual way.
3044     return Op;
3045   }
3046 
3047   // If we're comparing for equality to zero, expose the fact that this is
3048   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3049   // fold the new nodes.
3050   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3051     return V;
3052 
3053   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3054     // Leave comparisons against 0 and -1 alone for now, since they're usually
3055     // optimized.  FIXME: revisit this when we can custom lower all setcc
3056     // optimizations.
3057     if (C->isAllOnesValue() || C->isNullValue())
3058       return SDValue();
3059   }
3060 
3061   // If we have an integer seteq/setne, turn it into a compare against zero
3062   // by xor'ing the rhs with the lhs, which is faster than setting a
3063   // condition register, reading it back out, and masking the correct bit.  The
3064   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3065   // the result to other bit-twiddling opportunities.
3066   EVT LHSVT = Op.getOperand(0).getValueType();
3067   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3068     EVT VT = Op.getValueType();
3069     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3070                                 Op.getOperand(1));
3071     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3072   }
3073   return SDValue();
3074 }
3075 
3076 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3077   SDNode *Node = Op.getNode();
3078   EVT VT = Node->getValueType(0);
3079   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3080   SDValue InChain = Node->getOperand(0);
3081   SDValue VAListPtr = Node->getOperand(1);
3082   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3083   SDLoc dl(Node);
3084 
3085   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3086 
3087   // gpr_index
3088   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3089                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3090   InChain = GprIndex.getValue(1);
3091 
3092   if (VT == MVT::i64) {
3093     // Check if GprIndex is even
3094     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3095                                  DAG.getConstant(1, dl, MVT::i32));
3096     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3097                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3098     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3099                                           DAG.getConstant(1, dl, MVT::i32));
3100     // Align GprIndex to be even if it isn't
3101     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3102                            GprIndex);
3103   }
3104 
3105   // fpr index is 1 byte after gpr
3106   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3107                                DAG.getConstant(1, dl, MVT::i32));
3108 
3109   // fpr
3110   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3111                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3112   InChain = FprIndex.getValue(1);
3113 
3114   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3115                                        DAG.getConstant(8, dl, MVT::i32));
3116 
3117   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3118                                         DAG.getConstant(4, dl, MVT::i32));
3119 
3120   // areas
3121   SDValue OverflowArea =
3122       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3123   InChain = OverflowArea.getValue(1);
3124 
3125   SDValue RegSaveArea =
3126       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3127   InChain = RegSaveArea.getValue(1);
3128 
3129   // select overflow_area if index > 8
3130   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3131                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3132 
3133   // adjustment constant gpr_index * 4/8
3134   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3135                                     VT.isInteger() ? GprIndex : FprIndex,
3136                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3137                                                     MVT::i32));
3138 
3139   // OurReg = RegSaveArea + RegConstant
3140   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3141                                RegConstant);
3142 
3143   // Floating types are 32 bytes into RegSaveArea
3144   if (VT.isFloatingPoint())
3145     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3146                          DAG.getConstant(32, dl, MVT::i32));
3147 
3148   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3149   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3150                                    VT.isInteger() ? GprIndex : FprIndex,
3151                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3152                                                    MVT::i32));
3153 
3154   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3155                               VT.isInteger() ? VAListPtr : FprPtr,
3156                               MachinePointerInfo(SV), MVT::i8);
3157 
3158   // determine if we should load from reg_save_area or overflow_area
3159   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3160 
3161   // increase overflow_area by 4/8 if gpr/fpr > 8
3162   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3163                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3164                                           dl, MVT::i32));
3165 
3166   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3167                              OverflowAreaPlusN);
3168 
3169   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3170                               MachinePointerInfo(), MVT::i32);
3171 
3172   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3173 }
3174 
3175 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3176   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3177 
3178   // We have to copy the entire va_list struct:
3179   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3180   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3181                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3182                        false, true, false, MachinePointerInfo(),
3183                        MachinePointerInfo());
3184 }
3185 
3186 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3187                                                   SelectionDAG &DAG) const {
3188   if (Subtarget.isAIXABI())
3189     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3190 
3191   return Op.getOperand(0);
3192 }
3193 
3194 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3195                                                 SelectionDAG &DAG) const {
3196   if (Subtarget.isAIXABI())
3197     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3198 
3199   SDValue Chain = Op.getOperand(0);
3200   SDValue Trmp = Op.getOperand(1); // trampoline
3201   SDValue FPtr = Op.getOperand(2); // nested function
3202   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3203   SDLoc dl(Op);
3204 
3205   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3206   bool isPPC64 = (PtrVT == MVT::i64);
3207   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3208 
3209   TargetLowering::ArgListTy Args;
3210   TargetLowering::ArgListEntry Entry;
3211 
3212   Entry.Ty = IntPtrTy;
3213   Entry.Node = Trmp; Args.push_back(Entry);
3214 
3215   // TrampSize == (isPPC64 ? 48 : 40);
3216   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3217                                isPPC64 ? MVT::i64 : MVT::i32);
3218   Args.push_back(Entry);
3219 
3220   Entry.Node = FPtr; Args.push_back(Entry);
3221   Entry.Node = Nest; Args.push_back(Entry);
3222 
3223   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3224   TargetLowering::CallLoweringInfo CLI(DAG);
3225   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3226       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3227       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3228 
3229   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3230   return CallResult.second;
3231 }
3232 
3233 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3234   MachineFunction &MF = DAG.getMachineFunction();
3235   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3236   EVT PtrVT = getPointerTy(MF.getDataLayout());
3237 
3238   SDLoc dl(Op);
3239 
3240   if (Subtarget.isPPC64()) {
3241     // vastart just stores the address of the VarArgsFrameIndex slot into the
3242     // memory location argument.
3243     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3244     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3245     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3246                         MachinePointerInfo(SV));
3247   }
3248 
3249   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3250   // We suppose the given va_list is already allocated.
3251   //
3252   // typedef struct {
3253   //  char gpr;     /* index into the array of 8 GPRs
3254   //                 * stored in the register save area
3255   //                 * gpr=0 corresponds to r3,
3256   //                 * gpr=1 to r4, etc.
3257   //                 */
3258   //  char fpr;     /* index into the array of 8 FPRs
3259   //                 * stored in the register save area
3260   //                 * fpr=0 corresponds to f1,
3261   //                 * fpr=1 to f2, etc.
3262   //                 */
3263   //  char *overflow_arg_area;
3264   //                /* location on stack that holds
3265   //                 * the next overflow argument
3266   //                 */
3267   //  char *reg_save_area;
3268   //               /* where r3:r10 and f1:f8 (if saved)
3269   //                * are stored
3270   //                */
3271   // } va_list[1];
3272 
3273   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3274   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3275   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3276                                             PtrVT);
3277   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3278                                  PtrVT);
3279 
3280   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3281   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3282 
3283   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3284   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3285 
3286   uint64_t FPROffset = 1;
3287   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3288 
3289   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3290 
3291   // Store first byte : number of int regs
3292   SDValue firstStore =
3293       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3294                         MachinePointerInfo(SV), MVT::i8);
3295   uint64_t nextOffset = FPROffset;
3296   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3297                                   ConstFPROffset);
3298 
3299   // Store second byte : number of float regs
3300   SDValue secondStore =
3301       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3302                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3303   nextOffset += StackOffset;
3304   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3305 
3306   // Store second word : arguments given on stack
3307   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3308                                     MachinePointerInfo(SV, nextOffset));
3309   nextOffset += FrameOffset;
3310   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3311 
3312   // Store third word : arguments given in registers
3313   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3314                       MachinePointerInfo(SV, nextOffset));
3315 }
3316 
3317 /// FPR - The set of FP registers that should be allocated for arguments
3318 /// on Darwin and AIX.
3319 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3320                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3321                                 PPC::F11, PPC::F12, PPC::F13};
3322 
3323 /// QFPR - The set of QPX registers that should be allocated for arguments.
3324 static const MCPhysReg QFPR[] = {
3325     PPC::QF1, PPC::QF2, PPC::QF3,  PPC::QF4,  PPC::QF5,  PPC::QF6, PPC::QF7,
3326     PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3327 
3328 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3329 /// the stack.
3330 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3331                                        unsigned PtrByteSize) {
3332   unsigned ArgSize = ArgVT.getStoreSize();
3333   if (Flags.isByVal())
3334     ArgSize = Flags.getByValSize();
3335 
3336   // Round up to multiples of the pointer size, except for array members,
3337   // which are always packed.
3338   if (!Flags.isInConsecutiveRegs())
3339     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3340 
3341   return ArgSize;
3342 }
3343 
3344 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3345 /// on the stack.
3346 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3347                                          ISD::ArgFlagsTy Flags,
3348                                          unsigned PtrByteSize) {
3349   Align Alignment(PtrByteSize);
3350 
3351   // Altivec parameters are padded to a 16 byte boundary.
3352   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3353       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3354       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3355       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3356     Alignment = Align(16);
3357   // QPX vector types stored in double-precision are padded to a 32 byte
3358   // boundary.
3359   else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3360     Alignment = Align(32);
3361 
3362   // ByVal parameters are aligned as requested.
3363   if (Flags.isByVal()) {
3364     auto BVAlign = Flags.getNonZeroByValAlign();
3365     if (BVAlign > PtrByteSize) {
3366       if (BVAlign.value() % PtrByteSize != 0)
3367         llvm_unreachable(
3368             "ByVal alignment is not a multiple of the pointer size");
3369 
3370       Alignment = BVAlign;
3371     }
3372   }
3373 
3374   // Array members are always packed to their original alignment.
3375   if (Flags.isInConsecutiveRegs()) {
3376     // If the array member was split into multiple registers, the first
3377     // needs to be aligned to the size of the full type.  (Except for
3378     // ppcf128, which is only aligned as its f64 components.)
3379     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3380       Alignment = Align(OrigVT.getStoreSize());
3381     else
3382       Alignment = Align(ArgVT.getStoreSize());
3383   }
3384 
3385   return Alignment;
3386 }
3387 
3388 /// CalculateStackSlotUsed - Return whether this argument will use its
3389 /// stack slot (instead of being passed in registers).  ArgOffset,
3390 /// AvailableFPRs, and AvailableVRs must hold the current argument
3391 /// position, and will be updated to account for this argument.
3392 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3393                                    ISD::ArgFlagsTy Flags,
3394                                    unsigned PtrByteSize,
3395                                    unsigned LinkageSize,
3396                                    unsigned ParamAreaSize,
3397                                    unsigned &ArgOffset,
3398                                    unsigned &AvailableFPRs,
3399                                    unsigned &AvailableVRs, bool HasQPX) {
3400   bool UseMemory = false;
3401 
3402   // Respect alignment of argument on the stack.
3403   Align Alignment =
3404       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3405   ArgOffset = alignTo(ArgOffset, Alignment);
3406   // If there's no space left in the argument save area, we must
3407   // use memory (this check also catches zero-sized arguments).
3408   if (ArgOffset >= LinkageSize + ParamAreaSize)
3409     UseMemory = true;
3410 
3411   // Allocate argument on the stack.
3412   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3413   if (Flags.isInConsecutiveRegsLast())
3414     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3415   // If we overran the argument save area, we must use memory
3416   // (this check catches arguments passed partially in memory)
3417   if (ArgOffset > LinkageSize + ParamAreaSize)
3418     UseMemory = true;
3419 
3420   // However, if the argument is actually passed in an FPR or a VR,
3421   // we don't use memory after all.
3422   if (!Flags.isByVal()) {
3423     if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3424         // QPX registers overlap with the scalar FP registers.
3425         (HasQPX && (ArgVT == MVT::v4f32 ||
3426                     ArgVT == MVT::v4f64 ||
3427                     ArgVT == MVT::v4i1)))
3428       if (AvailableFPRs > 0) {
3429         --AvailableFPRs;
3430         return false;
3431       }
3432     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3433         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3434         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3435         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3436       if (AvailableVRs > 0) {
3437         --AvailableVRs;
3438         return false;
3439       }
3440   }
3441 
3442   return UseMemory;
3443 }
3444 
3445 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3446 /// ensure minimum alignment required for target.
3447 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3448                                      unsigned NumBytes) {
3449   return alignTo(NumBytes, Lowering->getStackAlign());
3450 }
3451 
3452 SDValue PPCTargetLowering::LowerFormalArguments(
3453     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3454     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3455     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3456   if (Subtarget.isAIXABI())
3457     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3458                                     InVals);
3459   if (Subtarget.is64BitELFABI())
3460     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3461                                        InVals);
3462   if (Subtarget.is32BitELFABI())
3463     return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3464                                        InVals);
3465 
3466   return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3467                                      InVals);
3468 }
3469 
3470 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3471     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3472     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3473     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3474 
3475   // 32-bit SVR4 ABI Stack Frame Layout:
3476   //              +-----------------------------------+
3477   //        +-->  |            Back chain             |
3478   //        |     +-----------------------------------+
3479   //        |     | Floating-point register save area |
3480   //        |     +-----------------------------------+
3481   //        |     |    General register save area     |
3482   //        |     +-----------------------------------+
3483   //        |     |          CR save word             |
3484   //        |     +-----------------------------------+
3485   //        |     |         VRSAVE save word          |
3486   //        |     +-----------------------------------+
3487   //        |     |         Alignment padding         |
3488   //        |     +-----------------------------------+
3489   //        |     |     Vector register save area     |
3490   //        |     +-----------------------------------+
3491   //        |     |       Local variable space        |
3492   //        |     +-----------------------------------+
3493   //        |     |        Parameter list area        |
3494   //        |     +-----------------------------------+
3495   //        |     |           LR save word            |
3496   //        |     +-----------------------------------+
3497   // SP-->  +---  |            Back chain             |
3498   //              +-----------------------------------+
3499   //
3500   // Specifications:
3501   //   System V Application Binary Interface PowerPC Processor Supplement
3502   //   AltiVec Technology Programming Interface Manual
3503 
3504   MachineFunction &MF = DAG.getMachineFunction();
3505   MachineFrameInfo &MFI = MF.getFrameInfo();
3506   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3507 
3508   EVT PtrVT = getPointerTy(MF.getDataLayout());
3509   // Potential tail calls could cause overwriting of argument stack slots.
3510   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3511                        (CallConv == CallingConv::Fast));
3512   unsigned PtrByteSize = 4;
3513 
3514   // Assign locations to all of the incoming arguments.
3515   SmallVector<CCValAssign, 16> ArgLocs;
3516   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3517                  *DAG.getContext());
3518 
3519   // Reserve space for the linkage area on the stack.
3520   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3521   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
3522   if (useSoftFloat())
3523     CCInfo.PreAnalyzeFormalArguments(Ins);
3524 
3525   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3526   CCInfo.clearWasPPCF128();
3527 
3528   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3529     CCValAssign &VA = ArgLocs[i];
3530 
3531     // Arguments stored in registers.
3532     if (VA.isRegLoc()) {
3533       const TargetRegisterClass *RC;
3534       EVT ValVT = VA.getValVT();
3535 
3536       switch (ValVT.getSimpleVT().SimpleTy) {
3537         default:
3538           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3539         case MVT::i1:
3540         case MVT::i32:
3541           RC = &PPC::GPRCRegClass;
3542           break;
3543         case MVT::f32:
3544           if (Subtarget.hasP8Vector())
3545             RC = &PPC::VSSRCRegClass;
3546           else if (Subtarget.hasSPE())
3547             RC = &PPC::GPRCRegClass;
3548           else
3549             RC = &PPC::F4RCRegClass;
3550           break;
3551         case MVT::f64:
3552           if (Subtarget.hasVSX())
3553             RC = &PPC::VSFRCRegClass;
3554           else if (Subtarget.hasSPE())
3555             // SPE passes doubles in GPR pairs.
3556             RC = &PPC::GPRCRegClass;
3557           else
3558             RC = &PPC::F8RCRegClass;
3559           break;
3560         case MVT::v16i8:
3561         case MVT::v8i16:
3562         case MVT::v4i32:
3563           RC = &PPC::VRRCRegClass;
3564           break;
3565         case MVT::v4f32:
3566           RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3567           break;
3568         case MVT::v2f64:
3569         case MVT::v2i64:
3570           RC = &PPC::VRRCRegClass;
3571           break;
3572         case MVT::v4f64:
3573           RC = &PPC::QFRCRegClass;
3574           break;
3575         case MVT::v4i1:
3576           RC = &PPC::QBRCRegClass;
3577           break;
3578       }
3579 
3580       SDValue ArgValue;
3581       // Transform the arguments stored in physical registers into
3582       // virtual ones.
3583       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3584         assert(i + 1 < e && "No second half of double precision argument");
3585         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3586         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3587         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3588         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3589         if (!Subtarget.isLittleEndian())
3590           std::swap (ArgValueLo, ArgValueHi);
3591         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3592                                ArgValueHi);
3593       } else {
3594         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3595         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3596                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3597         if (ValVT == MVT::i1)
3598           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3599       }
3600 
3601       InVals.push_back(ArgValue);
3602     } else {
3603       // Argument stored in memory.
3604       assert(VA.isMemLoc());
3605 
3606       // Get the extended size of the argument type in stack
3607       unsigned ArgSize = VA.getLocVT().getStoreSize();
3608       // Get the actual size of the argument type
3609       unsigned ObjSize = VA.getValVT().getStoreSize();
3610       unsigned ArgOffset = VA.getLocMemOffset();
3611       // Stack objects in PPC32 are right justified.
3612       ArgOffset += ArgSize - ObjSize;
3613       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3614 
3615       // Create load nodes to retrieve arguments from the stack.
3616       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3617       InVals.push_back(
3618           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3619     }
3620   }
3621 
3622   // Assign locations to all of the incoming aggregate by value arguments.
3623   // Aggregates passed by value are stored in the local variable space of the
3624   // caller's stack frame, right above the parameter list area.
3625   SmallVector<CCValAssign, 16> ByValArgLocs;
3626   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3627                       ByValArgLocs, *DAG.getContext());
3628 
3629   // Reserve stack space for the allocations in CCInfo.
3630   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3631 
3632   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3633 
3634   // Area that is at least reserved in the caller of this function.
3635   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3636   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3637 
3638   // Set the size that is at least reserved in caller of this function.  Tail
3639   // call optimized function's reserved stack space needs to be aligned so that
3640   // taking the difference between two stack areas will result in an aligned
3641   // stack.
3642   MinReservedArea =
3643       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3644   FuncInfo->setMinReservedArea(MinReservedArea);
3645 
3646   SmallVector<SDValue, 8> MemOps;
3647 
3648   // If the function takes variable number of arguments, make a frame index for
3649   // the start of the first vararg value... for expansion of llvm.va_start.
3650   if (isVarArg) {
3651     static const MCPhysReg GPArgRegs[] = {
3652       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3653       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3654     };
3655     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3656 
3657     static const MCPhysReg FPArgRegs[] = {
3658       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3659       PPC::F8
3660     };
3661     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3662 
3663     if (useSoftFloat() || hasSPE())
3664        NumFPArgRegs = 0;
3665 
3666     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3667     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3668 
3669     // Make room for NumGPArgRegs and NumFPArgRegs.
3670     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3671                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3672 
3673     FuncInfo->setVarArgsStackOffset(
3674       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3675                             CCInfo.getNextStackOffset(), true));
3676 
3677     FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false));
3678     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3679 
3680     // The fixed integer arguments of a variadic function are stored to the
3681     // VarArgsFrameIndex on the stack so that they may be loaded by
3682     // dereferencing the result of va_next.
3683     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3684       // Get an existing live-in vreg, or add a new one.
3685       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3686       if (!VReg)
3687         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3688 
3689       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3690       SDValue Store =
3691           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3692       MemOps.push_back(Store);
3693       // Increment the address by four for the next argument to store
3694       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3695       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3696     }
3697 
3698     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3699     // is set.
3700     // The double arguments are stored to the VarArgsFrameIndex
3701     // on the stack.
3702     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3703       // Get an existing live-in vreg, or add a new one.
3704       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3705       if (!VReg)
3706         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3707 
3708       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3709       SDValue Store =
3710           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3711       MemOps.push_back(Store);
3712       // Increment the address by eight for the next argument to store
3713       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3714                                          PtrVT);
3715       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3716     }
3717   }
3718 
3719   if (!MemOps.empty())
3720     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3721 
3722   return Chain;
3723 }
3724 
3725 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3726 // value to MVT::i64 and then truncate to the correct register size.
3727 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3728                                              EVT ObjectVT, SelectionDAG &DAG,
3729                                              SDValue ArgVal,
3730                                              const SDLoc &dl) const {
3731   if (Flags.isSExt())
3732     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3733                          DAG.getValueType(ObjectVT));
3734   else if (Flags.isZExt())
3735     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3736                          DAG.getValueType(ObjectVT));
3737 
3738   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3739 }
3740 
3741 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3742     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3743     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3744     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3745   // TODO: add description of PPC stack frame format, or at least some docs.
3746   //
3747   bool isELFv2ABI = Subtarget.isELFv2ABI();
3748   bool isLittleEndian = Subtarget.isLittleEndian();
3749   MachineFunction &MF = DAG.getMachineFunction();
3750   MachineFrameInfo &MFI = MF.getFrameInfo();
3751   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3752 
3753   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3754          "fastcc not supported on varargs functions");
3755 
3756   EVT PtrVT = getPointerTy(MF.getDataLayout());
3757   // Potential tail calls could cause overwriting of argument stack slots.
3758   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3759                        (CallConv == CallingConv::Fast));
3760   unsigned PtrByteSize = 8;
3761   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3762 
3763   static const MCPhysReg GPR[] = {
3764     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3765     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3766   };
3767   static const MCPhysReg VR[] = {
3768     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3769     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3770   };
3771 
3772   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3773   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3774   const unsigned Num_VR_Regs  = array_lengthof(VR);
3775   const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3776 
3777   // Do a first pass over the arguments to determine whether the ABI
3778   // guarantees that our caller has allocated the parameter save area
3779   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3780   // in the ELFv2 ABI, it is true if this is a vararg function or if
3781   // any parameter is located in a stack slot.
3782 
3783   bool HasParameterArea = !isELFv2ABI || isVarArg;
3784   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3785   unsigned NumBytes = LinkageSize;
3786   unsigned AvailableFPRs = Num_FPR_Regs;
3787   unsigned AvailableVRs = Num_VR_Regs;
3788   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3789     if (Ins[i].Flags.isNest())
3790       continue;
3791 
3792     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3793                                PtrByteSize, LinkageSize, ParamAreaSize,
3794                                NumBytes, AvailableFPRs, AvailableVRs,
3795                                Subtarget.hasQPX()))
3796       HasParameterArea = true;
3797   }
3798 
3799   // Add DAG nodes to load the arguments or copy them out of registers.  On
3800   // entry to a function on PPC, the arguments start after the linkage area,
3801   // although the first ones are often in registers.
3802 
3803   unsigned ArgOffset = LinkageSize;
3804   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3805   unsigned &QFPR_idx = FPR_idx;
3806   SmallVector<SDValue, 8> MemOps;
3807   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3808   unsigned CurArgIdx = 0;
3809   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3810     SDValue ArgVal;
3811     bool needsLoad = false;
3812     EVT ObjectVT = Ins[ArgNo].VT;
3813     EVT OrigVT = Ins[ArgNo].ArgVT;
3814     unsigned ObjSize = ObjectVT.getStoreSize();
3815     unsigned ArgSize = ObjSize;
3816     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3817     if (Ins[ArgNo].isOrigArg()) {
3818       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3819       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3820     }
3821     // We re-align the argument offset for each argument, except when using the
3822     // fast calling convention, when we need to make sure we do that only when
3823     // we'll actually use a stack slot.
3824     unsigned CurArgOffset;
3825     Align Alignment;
3826     auto ComputeArgOffset = [&]() {
3827       /* Respect alignment of argument on the stack.  */
3828       Alignment =
3829           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3830       ArgOffset = alignTo(ArgOffset, Alignment);
3831       CurArgOffset = ArgOffset;
3832     };
3833 
3834     if (CallConv != CallingConv::Fast) {
3835       ComputeArgOffset();
3836 
3837       /* Compute GPR index associated with argument offset.  */
3838       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3839       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3840     }
3841 
3842     // FIXME the codegen can be much improved in some cases.
3843     // We do not have to keep everything in memory.
3844     if (Flags.isByVal()) {
3845       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3846 
3847       if (CallConv == CallingConv::Fast)
3848         ComputeArgOffset();
3849 
3850       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3851       ObjSize = Flags.getByValSize();
3852       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3853       // Empty aggregate parameters do not take up registers.  Examples:
3854       //   struct { } a;
3855       //   union  { } b;
3856       //   int c[0];
3857       // etc.  However, we have to provide a place-holder in InVals, so
3858       // pretend we have an 8-byte item at the current address for that
3859       // purpose.
3860       if (!ObjSize) {
3861         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3862         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3863         InVals.push_back(FIN);
3864         continue;
3865       }
3866 
3867       // Create a stack object covering all stack doublewords occupied
3868       // by the argument.  If the argument is (fully or partially) on
3869       // the stack, or if the argument is fully in registers but the
3870       // caller has allocated the parameter save anyway, we can refer
3871       // directly to the caller's stack frame.  Otherwise, create a
3872       // local copy in our own frame.
3873       int FI;
3874       if (HasParameterArea ||
3875           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3876         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3877       else
3878         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
3879       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3880 
3881       // Handle aggregates smaller than 8 bytes.
3882       if (ObjSize < PtrByteSize) {
3883         // The value of the object is its address, which differs from the
3884         // address of the enclosing doubleword on big-endian systems.
3885         SDValue Arg = FIN;
3886         if (!isLittleEndian) {
3887           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3888           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3889         }
3890         InVals.push_back(Arg);
3891 
3892         if (GPR_idx != Num_GPR_Regs) {
3893           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3894           FuncInfo->addLiveInAttr(VReg, Flags);
3895           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3896           SDValue Store;
3897 
3898           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3899             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3900                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
3901             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
3902                                       MachinePointerInfo(&*FuncArg), ObjType);
3903           } else {
3904             // For sizes that don't fit a truncating store (3, 5, 6, 7),
3905             // store the whole register as-is to the parameter save area
3906             // slot.
3907             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3908                                  MachinePointerInfo(&*FuncArg));
3909           }
3910 
3911           MemOps.push_back(Store);
3912         }
3913         // Whether we copied from a register or not, advance the offset
3914         // into the parameter save area by a full doubleword.
3915         ArgOffset += PtrByteSize;
3916         continue;
3917       }
3918 
3919       // The value of the object is its address, which is the address of
3920       // its first stack doubleword.
3921       InVals.push_back(FIN);
3922 
3923       // Store whatever pieces of the object are in registers to memory.
3924       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3925         if (GPR_idx == Num_GPR_Regs)
3926           break;
3927 
3928         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3929         FuncInfo->addLiveInAttr(VReg, Flags);
3930         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3931         SDValue Addr = FIN;
3932         if (j) {
3933           SDValue Off = DAG.getConstant(j, dl, PtrVT);
3934           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
3935         }
3936         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
3937                                      MachinePointerInfo(&*FuncArg, j));
3938         MemOps.push_back(Store);
3939         ++GPR_idx;
3940       }
3941       ArgOffset += ArgSize;
3942       continue;
3943     }
3944 
3945     switch (ObjectVT.getSimpleVT().SimpleTy) {
3946     default: llvm_unreachable("Unhandled argument type!");
3947     case MVT::i1:
3948     case MVT::i32:
3949     case MVT::i64:
3950       if (Flags.isNest()) {
3951         // The 'nest' parameter, if any, is passed in R11.
3952         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3953         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3954 
3955         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3956           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3957 
3958         break;
3959       }
3960 
3961       // These can be scalar arguments or elements of an integer array type
3962       // passed directly.  Clang may use those instead of "byval" aggregate
3963       // types to avoid forcing arguments to memory unnecessarily.
3964       if (GPR_idx != Num_GPR_Regs) {
3965         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3966         FuncInfo->addLiveInAttr(VReg, Flags);
3967         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3968 
3969         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3970           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3971           // value to MVT::i64 and then truncate to the correct register size.
3972           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3973       } else {
3974         if (CallConv == CallingConv::Fast)
3975           ComputeArgOffset();
3976 
3977         needsLoad = true;
3978         ArgSize = PtrByteSize;
3979       }
3980       if (CallConv != CallingConv::Fast || needsLoad)
3981         ArgOffset += 8;
3982       break;
3983 
3984     case MVT::f32:
3985     case MVT::f64:
3986       // These can be scalar arguments or elements of a float array type
3987       // passed directly.  The latter are used to implement ELFv2 homogenous
3988       // float aggregates.
3989       if (FPR_idx != Num_FPR_Regs) {
3990         unsigned VReg;
3991 
3992         if (ObjectVT == MVT::f32)
3993           VReg = MF.addLiveIn(FPR[FPR_idx],
3994                               Subtarget.hasP8Vector()
3995                                   ? &PPC::VSSRCRegClass
3996                                   : &PPC::F4RCRegClass);
3997         else
3998           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
3999                                                 ? &PPC::VSFRCRegClass
4000                                                 : &PPC::F8RCRegClass);
4001 
4002         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4003         ++FPR_idx;
4004       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4005         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4006         // once we support fp <-> gpr moves.
4007 
4008         // This can only ever happen in the presence of f32 array types,
4009         // since otherwise we never run out of FPRs before running out
4010         // of GPRs.
4011         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4012         FuncInfo->addLiveInAttr(VReg, Flags);
4013         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4014 
4015         if (ObjectVT == MVT::f32) {
4016           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4017             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4018                                  DAG.getConstant(32, dl, MVT::i32));
4019           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4020         }
4021 
4022         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4023       } else {
4024         if (CallConv == CallingConv::Fast)
4025           ComputeArgOffset();
4026 
4027         needsLoad = true;
4028       }
4029 
4030       // When passing an array of floats, the array occupies consecutive
4031       // space in the argument area; only round up to the next doubleword
4032       // at the end of the array.  Otherwise, each float takes 8 bytes.
4033       if (CallConv != CallingConv::Fast || needsLoad) {
4034         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4035         ArgOffset += ArgSize;
4036         if (Flags.isInConsecutiveRegsLast())
4037           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4038       }
4039       break;
4040     case MVT::v4f32:
4041     case MVT::v4i32:
4042     case MVT::v8i16:
4043     case MVT::v16i8:
4044     case MVT::v2f64:
4045     case MVT::v2i64:
4046     case MVT::v1i128:
4047     case MVT::f128:
4048       if (!Subtarget.hasQPX()) {
4049         // These can be scalar arguments or elements of a vector array type
4050         // passed directly.  The latter are used to implement ELFv2 homogenous
4051         // vector aggregates.
4052         if (VR_idx != Num_VR_Regs) {
4053           unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4054           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4055           ++VR_idx;
4056         } else {
4057           if (CallConv == CallingConv::Fast)
4058             ComputeArgOffset();
4059           needsLoad = true;
4060         }
4061         if (CallConv != CallingConv::Fast || needsLoad)
4062           ArgOffset += 16;
4063         break;
4064       } // not QPX
4065 
4066       assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
4067              "Invalid QPX parameter type");
4068       LLVM_FALLTHROUGH;
4069 
4070     case MVT::v4f64:
4071     case MVT::v4i1:
4072       // QPX vectors are treated like their scalar floating-point subregisters
4073       // (except that they're larger).
4074       unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
4075       if (QFPR_idx != Num_QFPR_Regs) {
4076         const TargetRegisterClass *RC;
4077         switch (ObjectVT.getSimpleVT().SimpleTy) {
4078         case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
4079         case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
4080         default:         RC = &PPC::QBRCRegClass; break;
4081         }
4082 
4083         unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
4084         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4085         ++QFPR_idx;
4086       } else {
4087         if (CallConv == CallingConv::Fast)
4088           ComputeArgOffset();
4089         needsLoad = true;
4090       }
4091       if (CallConv != CallingConv::Fast || needsLoad)
4092         ArgOffset += Sz;
4093       break;
4094     }
4095 
4096     // We need to load the argument to a virtual register if we determined
4097     // above that we ran out of physical registers of the appropriate type.
4098     if (needsLoad) {
4099       if (ObjSize < ArgSize && !isLittleEndian)
4100         CurArgOffset += ArgSize - ObjSize;
4101       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4102       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4103       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4104     }
4105 
4106     InVals.push_back(ArgVal);
4107   }
4108 
4109   // Area that is at least reserved in the caller of this function.
4110   unsigned MinReservedArea;
4111   if (HasParameterArea)
4112     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4113   else
4114     MinReservedArea = LinkageSize;
4115 
4116   // Set the size that is at least reserved in caller of this function.  Tail
4117   // call optimized functions' reserved stack space needs to be aligned so that
4118   // taking the difference between two stack areas will result in an aligned
4119   // stack.
4120   MinReservedArea =
4121       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4122   FuncInfo->setMinReservedArea(MinReservedArea);
4123 
4124   // If the function takes variable number of arguments, make a frame index for
4125   // the start of the first vararg value... for expansion of llvm.va_start.
4126   if (isVarArg) {
4127     int Depth = ArgOffset;
4128 
4129     FuncInfo->setVarArgsFrameIndex(
4130       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4131     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4132 
4133     // If this function is vararg, store any remaining integer argument regs
4134     // to their spots on the stack so that they may be loaded by dereferencing
4135     // the result of va_next.
4136     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4137          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4138       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4139       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4140       SDValue Store =
4141           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4142       MemOps.push_back(Store);
4143       // Increment the address by four for the next argument to store
4144       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4145       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4146     }
4147   }
4148 
4149   if (!MemOps.empty())
4150     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4151 
4152   return Chain;
4153 }
4154 
4155 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4156     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4157     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4158     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4159   // TODO: add description of PPC stack frame format, or at least some docs.
4160   //
4161   MachineFunction &MF = DAG.getMachineFunction();
4162   MachineFrameInfo &MFI = MF.getFrameInfo();
4163   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4164 
4165   EVT PtrVT = getPointerTy(MF.getDataLayout());
4166   bool isPPC64 = PtrVT == MVT::i64;
4167   // Potential tail calls could cause overwriting of argument stack slots.
4168   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4169                        (CallConv == CallingConv::Fast));
4170   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4171   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4172   unsigned ArgOffset = LinkageSize;
4173   // Area that is at least reserved in caller of this function.
4174   unsigned MinReservedArea = ArgOffset;
4175 
4176   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4177     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4178     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4179   };
4180   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4181     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4182     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4183   };
4184   static const MCPhysReg VR[] = {
4185     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4186     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4187   };
4188 
4189   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4190   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4191   const unsigned Num_VR_Regs  = array_lengthof( VR);
4192 
4193   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4194 
4195   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4196 
4197   // In 32-bit non-varargs functions, the stack space for vectors is after the
4198   // stack space for non-vectors.  We do not use this space unless we have
4199   // too many vectors to fit in registers, something that only occurs in
4200   // constructed examples:), but we have to walk the arglist to figure
4201   // that out...for the pathological case, compute VecArgOffset as the
4202   // start of the vector parameter area.  Computing VecArgOffset is the
4203   // entire point of the following loop.
4204   unsigned VecArgOffset = ArgOffset;
4205   if (!isVarArg && !isPPC64) {
4206     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4207          ++ArgNo) {
4208       EVT ObjectVT = Ins[ArgNo].VT;
4209       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4210 
4211       if (Flags.isByVal()) {
4212         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4213         unsigned ObjSize = Flags.getByValSize();
4214         unsigned ArgSize =
4215                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4216         VecArgOffset += ArgSize;
4217         continue;
4218       }
4219 
4220       switch(ObjectVT.getSimpleVT().SimpleTy) {
4221       default: llvm_unreachable("Unhandled argument type!");
4222       case MVT::i1:
4223       case MVT::i32:
4224       case MVT::f32:
4225         VecArgOffset += 4;
4226         break;
4227       case MVT::i64:  // PPC64
4228       case MVT::f64:
4229         // FIXME: We are guaranteed to be !isPPC64 at this point.
4230         // Does MVT::i64 apply?
4231         VecArgOffset += 8;
4232         break;
4233       case MVT::v4f32:
4234       case MVT::v4i32:
4235       case MVT::v8i16:
4236       case MVT::v16i8:
4237         // Nothing to do, we're only looking at Nonvector args here.
4238         break;
4239       }
4240     }
4241   }
4242   // We've found where the vector parameter area in memory is.  Skip the
4243   // first 12 parameters; these don't use that memory.
4244   VecArgOffset = ((VecArgOffset+15)/16)*16;
4245   VecArgOffset += 12*16;
4246 
4247   // Add DAG nodes to load the arguments or copy them out of registers.  On
4248   // entry to a function on PPC, the arguments start after the linkage area,
4249   // although the first ones are often in registers.
4250 
4251   SmallVector<SDValue, 8> MemOps;
4252   unsigned nAltivecParamsAtEnd = 0;
4253   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4254   unsigned CurArgIdx = 0;
4255   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4256     SDValue ArgVal;
4257     bool needsLoad = false;
4258     EVT ObjectVT = Ins[ArgNo].VT;
4259     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4260     unsigned ArgSize = ObjSize;
4261     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4262     if (Ins[ArgNo].isOrigArg()) {
4263       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4264       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4265     }
4266     unsigned CurArgOffset = ArgOffset;
4267 
4268     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4269     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4270         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4271       if (isVarArg || isPPC64) {
4272         MinReservedArea = ((MinReservedArea+15)/16)*16;
4273         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4274                                                   Flags,
4275                                                   PtrByteSize);
4276       } else  nAltivecParamsAtEnd++;
4277     } else
4278       // Calculate min reserved area.
4279       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4280                                                 Flags,
4281                                                 PtrByteSize);
4282 
4283     // FIXME the codegen can be much improved in some cases.
4284     // We do not have to keep everything in memory.
4285     if (Flags.isByVal()) {
4286       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4287 
4288       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4289       ObjSize = Flags.getByValSize();
4290       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4291       // Objects of size 1 and 2 are right justified, everything else is
4292       // left justified.  This means the memory address is adjusted forwards.
4293       if (ObjSize==1 || ObjSize==2) {
4294         CurArgOffset = CurArgOffset + (4 - ObjSize);
4295       }
4296       // The value of the object is its address.
4297       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4298       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4299       InVals.push_back(FIN);
4300       if (ObjSize==1 || ObjSize==2) {
4301         if (GPR_idx != Num_GPR_Regs) {
4302           unsigned VReg;
4303           if (isPPC64)
4304             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4305           else
4306             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4307           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4308           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4309           SDValue Store =
4310               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4311                                 MachinePointerInfo(&*FuncArg), ObjType);
4312           MemOps.push_back(Store);
4313           ++GPR_idx;
4314         }
4315 
4316         ArgOffset += PtrByteSize;
4317 
4318         continue;
4319       }
4320       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4321         // Store whatever pieces of the object are in registers
4322         // to memory.  ArgOffset will be the address of the beginning
4323         // of the object.
4324         if (GPR_idx != Num_GPR_Regs) {
4325           unsigned VReg;
4326           if (isPPC64)
4327             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4328           else
4329             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4330           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4331           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4332           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4333           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4334                                        MachinePointerInfo(&*FuncArg, j));
4335           MemOps.push_back(Store);
4336           ++GPR_idx;
4337           ArgOffset += PtrByteSize;
4338         } else {
4339           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4340           break;
4341         }
4342       }
4343       continue;
4344     }
4345 
4346     switch (ObjectVT.getSimpleVT().SimpleTy) {
4347     default: llvm_unreachable("Unhandled argument type!");
4348     case MVT::i1:
4349     case MVT::i32:
4350       if (!isPPC64) {
4351         if (GPR_idx != Num_GPR_Regs) {
4352           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4353           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4354 
4355           if (ObjectVT == MVT::i1)
4356             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4357 
4358           ++GPR_idx;
4359         } else {
4360           needsLoad = true;
4361           ArgSize = PtrByteSize;
4362         }
4363         // All int arguments reserve stack space in the Darwin ABI.
4364         ArgOffset += PtrByteSize;
4365         break;
4366       }
4367       LLVM_FALLTHROUGH;
4368     case MVT::i64:  // PPC64
4369       if (GPR_idx != Num_GPR_Regs) {
4370         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4371         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4372 
4373         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4374           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4375           // value to MVT::i64 and then truncate to the correct register size.
4376           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4377 
4378         ++GPR_idx;
4379       } else {
4380         needsLoad = true;
4381         ArgSize = PtrByteSize;
4382       }
4383       // All int arguments reserve stack space in the Darwin ABI.
4384       ArgOffset += 8;
4385       break;
4386 
4387     case MVT::f32:
4388     case MVT::f64:
4389       // Every 4 bytes of argument space consumes one of the GPRs available for
4390       // argument passing.
4391       if (GPR_idx != Num_GPR_Regs) {
4392         ++GPR_idx;
4393         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4394           ++GPR_idx;
4395       }
4396       if (FPR_idx != Num_FPR_Regs) {
4397         unsigned VReg;
4398 
4399         if (ObjectVT == MVT::f32)
4400           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4401         else
4402           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4403 
4404         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4405         ++FPR_idx;
4406       } else {
4407         needsLoad = true;
4408       }
4409 
4410       // All FP arguments reserve stack space in the Darwin ABI.
4411       ArgOffset += isPPC64 ? 8 : ObjSize;
4412       break;
4413     case MVT::v4f32:
4414     case MVT::v4i32:
4415     case MVT::v8i16:
4416     case MVT::v16i8:
4417       // Note that vector arguments in registers don't reserve stack space,
4418       // except in varargs functions.
4419       if (VR_idx != Num_VR_Regs) {
4420         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4421         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4422         if (isVarArg) {
4423           while ((ArgOffset % 16) != 0) {
4424             ArgOffset += PtrByteSize;
4425             if (GPR_idx != Num_GPR_Regs)
4426               GPR_idx++;
4427           }
4428           ArgOffset += 16;
4429           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4430         }
4431         ++VR_idx;
4432       } else {
4433         if (!isVarArg && !isPPC64) {
4434           // Vectors go after all the nonvectors.
4435           CurArgOffset = VecArgOffset;
4436           VecArgOffset += 16;
4437         } else {
4438           // Vectors are aligned.
4439           ArgOffset = ((ArgOffset+15)/16)*16;
4440           CurArgOffset = ArgOffset;
4441           ArgOffset += 16;
4442         }
4443         needsLoad = true;
4444       }
4445       break;
4446     }
4447 
4448     // We need to load the argument to a virtual register if we determined above
4449     // that we ran out of physical registers of the appropriate type.
4450     if (needsLoad) {
4451       int FI = MFI.CreateFixedObject(ObjSize,
4452                                      CurArgOffset + (ArgSize - ObjSize),
4453                                      isImmutable);
4454       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4455       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4456     }
4457 
4458     InVals.push_back(ArgVal);
4459   }
4460 
4461   // Allow for Altivec parameters at the end, if needed.
4462   if (nAltivecParamsAtEnd) {
4463     MinReservedArea = ((MinReservedArea+15)/16)*16;
4464     MinReservedArea += 16*nAltivecParamsAtEnd;
4465   }
4466 
4467   // Area that is at least reserved in the caller of this function.
4468   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4469 
4470   // Set the size that is at least reserved in caller of this function.  Tail
4471   // call optimized functions' reserved stack space needs to be aligned so that
4472   // taking the difference between two stack areas will result in an aligned
4473   // stack.
4474   MinReservedArea =
4475       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4476   FuncInfo->setMinReservedArea(MinReservedArea);
4477 
4478   // If the function takes variable number of arguments, make a frame index for
4479   // the start of the first vararg value... for expansion of llvm.va_start.
4480   if (isVarArg) {
4481     int Depth = ArgOffset;
4482 
4483     FuncInfo->setVarArgsFrameIndex(
4484       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4485                             Depth, true));
4486     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4487 
4488     // If this function is vararg, store any remaining integer argument regs
4489     // to their spots on the stack so that they may be loaded by dereferencing
4490     // the result of va_next.
4491     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4492       unsigned VReg;
4493 
4494       if (isPPC64)
4495         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4496       else
4497         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4498 
4499       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4500       SDValue Store =
4501           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4502       MemOps.push_back(Store);
4503       // Increment the address by four for the next argument to store
4504       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4505       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4506     }
4507   }
4508 
4509   if (!MemOps.empty())
4510     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4511 
4512   return Chain;
4513 }
4514 
4515 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4516 /// adjusted to accommodate the arguments for the tailcall.
4517 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4518                                    unsigned ParamSize) {
4519 
4520   if (!isTailCall) return 0;
4521 
4522   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4523   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4524   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4525   // Remember only if the new adjustment is bigger.
4526   if (SPDiff < FI->getTailCallSPDelta())
4527     FI->setTailCallSPDelta(SPDiff);
4528 
4529   return SPDiff;
4530 }
4531 
4532 static bool isFunctionGlobalAddress(SDValue Callee);
4533 
4534 static bool
4535 callsShareTOCBase(const Function *Caller, SDValue Callee,
4536                     const TargetMachine &TM) {
4537    // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4538    // don't have enough information to determine if the caller and calle share
4539    // the same  TOC base, so we have to pessimistically assume they don't for
4540    // correctness.
4541    GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4542    if (!G)
4543      return false;
4544 
4545    const GlobalValue *GV = G->getGlobal();
4546   // The medium and large code models are expected to provide a sufficiently
4547   // large TOC to provide all data addressing needs of a module with a
4548   // single TOC. Since each module will be addressed with a single TOC then we
4549   // only need to check that caller and callee don't cross dso boundaries.
4550   if (CodeModel::Medium == TM.getCodeModel() ||
4551       CodeModel::Large == TM.getCodeModel())
4552     return TM.shouldAssumeDSOLocal(*Caller->getParent(), GV);
4553 
4554   // Otherwise we need to ensure callee and caller are in the same section,
4555   // since the linker may allocate multiple TOCs, and we don't know which
4556   // sections will belong to the same TOC base.
4557 
4558   if (!GV->isStrongDefinitionForLinker())
4559     return false;
4560 
4561   // Any explicitly-specified sections and section prefixes must also match.
4562   // Also, if we're using -ffunction-sections, then each function is always in
4563   // a different section (the same is true for COMDAT functions).
4564   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4565       GV->getSection() != Caller->getSection())
4566     return false;
4567   if (const auto *F = dyn_cast<Function>(GV)) {
4568     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4569       return false;
4570   }
4571 
4572   // If the callee might be interposed, then we can't assume the ultimate call
4573   // target will be in the same section. Even in cases where we can assume that
4574   // interposition won't happen, in any case where the linker might insert a
4575   // stub to allow for interposition, we must generate code as though
4576   // interposition might occur. To understand why this matters, consider a
4577   // situation where: a -> b -> c where the arrows indicate calls. b and c are
4578   // in the same section, but a is in a different module (i.e. has a different
4579   // TOC base pointer). If the linker allows for interposition between b and c,
4580   // then it will generate a stub for the call edge between b and c which will
4581   // save the TOC pointer into the designated stack slot allocated by b. If we
4582   // return true here, and therefore allow a tail call between b and c, that
4583   // stack slot won't exist and the b -> c stub will end up saving b'c TOC base
4584   // pointer into the stack slot allocated by a (where the a -> b stub saved
4585   // a's TOC base pointer). If we're not considering a tail call, but rather,
4586   // whether a nop is needed after the call instruction in b, because the linker
4587   // will insert a stub, it might complain about a missing nop if we omit it
4588   // (although many don't complain in this case).
4589   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4590     return false;
4591 
4592   return true;
4593 }
4594 
4595 static bool
4596 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4597                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4598   assert(Subtarget.is64BitELFABI());
4599 
4600   const unsigned PtrByteSize = 8;
4601   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4602 
4603   static const MCPhysReg GPR[] = {
4604     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4605     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4606   };
4607   static const MCPhysReg VR[] = {
4608     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4609     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4610   };
4611 
4612   const unsigned NumGPRs = array_lengthof(GPR);
4613   const unsigned NumFPRs = 13;
4614   const unsigned NumVRs = array_lengthof(VR);
4615   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4616 
4617   unsigned NumBytes = LinkageSize;
4618   unsigned AvailableFPRs = NumFPRs;
4619   unsigned AvailableVRs = NumVRs;
4620 
4621   for (const ISD::OutputArg& Param : Outs) {
4622     if (Param.Flags.isNest()) continue;
4623 
4624     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4625                                PtrByteSize, LinkageSize, ParamAreaSize,
4626                                NumBytes, AvailableFPRs, AvailableVRs,
4627                                Subtarget.hasQPX()))
4628       return true;
4629   }
4630   return false;
4631 }
4632 
4633 static bool
4634 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) {
4635   if (CS.arg_size() != CallerFn->arg_size())
4636     return false;
4637 
4638   ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin();
4639   ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end();
4640   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4641 
4642   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4643     const Value* CalleeArg = *CalleeArgIter;
4644     const Value* CallerArg = &(*CallerArgIter);
4645     if (CalleeArg == CallerArg)
4646       continue;
4647 
4648     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4649     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4650     //      }
4651     // 1st argument of callee is undef and has the same type as caller.
4652     if (CalleeArg->getType() == CallerArg->getType() &&
4653         isa<UndefValue>(CalleeArg))
4654       continue;
4655 
4656     return false;
4657   }
4658 
4659   return true;
4660 }
4661 
4662 // Returns true if TCO is possible between the callers and callees
4663 // calling conventions.
4664 static bool
4665 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4666                                     CallingConv::ID CalleeCC) {
4667   // Tail calls are possible with fastcc and ccc.
4668   auto isTailCallableCC  = [] (CallingConv::ID CC){
4669       return  CC == CallingConv::C || CC == CallingConv::Fast;
4670   };
4671   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4672     return false;
4673 
4674   // We can safely tail call both fastcc and ccc callees from a c calling
4675   // convention caller. If the caller is fastcc, we may have less stack space
4676   // than a non-fastcc caller with the same signature so disable tail-calls in
4677   // that case.
4678   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4679 }
4680 
4681 bool
4682 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4683                                     SDValue Callee,
4684                                     CallingConv::ID CalleeCC,
4685                                     ImmutableCallSite CS,
4686                                     bool isVarArg,
4687                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
4688                                     const SmallVectorImpl<ISD::InputArg> &Ins,
4689                                     SelectionDAG& DAG) const {
4690   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4691 
4692   if (DisableSCO && !TailCallOpt) return false;
4693 
4694   // Variadic argument functions are not supported.
4695   if (isVarArg) return false;
4696 
4697   auto &Caller = DAG.getMachineFunction().getFunction();
4698   // Check that the calling conventions are compatible for tco.
4699   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4700     return false;
4701 
4702   // Caller contains any byval parameter is not supported.
4703   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4704     return false;
4705 
4706   // Callee contains any byval parameter is not supported, too.
4707   // Note: This is a quick work around, because in some cases, e.g.
4708   // caller's stack size > callee's stack size, we are still able to apply
4709   // sibling call optimization. For example, gcc is able to do SCO for caller1
4710   // in the following example, but not for caller2.
4711   //   struct test {
4712   //     long int a;
4713   //     char ary[56];
4714   //   } gTest;
4715   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4716   //     b->a = v.a;
4717   //     return 0;
4718   //   }
4719   //   void caller1(struct test a, struct test c, struct test *b) {
4720   //     callee(gTest, b); }
4721   //   void caller2(struct test *b) { callee(gTest, b); }
4722   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4723     return false;
4724 
4725   // If callee and caller use different calling conventions, we cannot pass
4726   // parameters on stack since offsets for the parameter area may be different.
4727   if (Caller.getCallingConv() != CalleeCC &&
4728       needStackSlotPassParameters(Subtarget, Outs))
4729     return false;
4730 
4731   // No TCO/SCO on indirect call because Caller have to restore its TOC
4732   if (!isFunctionGlobalAddress(Callee) &&
4733       !isa<ExternalSymbolSDNode>(Callee))
4734     return false;
4735 
4736   // If the caller and callee potentially have different TOC bases then we
4737   // cannot tail call since we need to restore the TOC pointer after the call.
4738   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4739   if (!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4740     return false;
4741 
4742   // TCO allows altering callee ABI, so we don't have to check further.
4743   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4744     return true;
4745 
4746   if (DisableSCO) return false;
4747 
4748   // If callee use the same argument list that caller is using, then we can
4749   // apply SCO on this case. If it is not, then we need to check if callee needs
4750   // stack for passing arguments.
4751   if (!hasSameArgumentList(&Caller, CS) &&
4752       needStackSlotPassParameters(Subtarget, Outs)) {
4753     return false;
4754   }
4755 
4756   return true;
4757 }
4758 
4759 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4760 /// for tail call optimization. Targets which want to do tail call
4761 /// optimization should implement this function.
4762 bool
4763 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4764                                                      CallingConv::ID CalleeCC,
4765                                                      bool isVarArg,
4766                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4767                                                      SelectionDAG& DAG) const {
4768   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4769     return false;
4770 
4771   // Variable argument functions are not supported.
4772   if (isVarArg)
4773     return false;
4774 
4775   MachineFunction &MF = DAG.getMachineFunction();
4776   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4777   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4778     // Functions containing by val parameters are not supported.
4779     for (unsigned i = 0; i != Ins.size(); i++) {
4780        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4781        if (Flags.isByVal()) return false;
4782     }
4783 
4784     // Non-PIC/GOT tail calls are supported.
4785     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4786       return true;
4787 
4788     // At the moment we can only do local tail calls (in same module, hidden
4789     // or protected) if we are generating PIC.
4790     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4791       return G->getGlobal()->hasHiddenVisibility()
4792           || G->getGlobal()->hasProtectedVisibility();
4793   }
4794 
4795   return false;
4796 }
4797 
4798 /// isCallCompatibleAddress - Return the immediate to use if the specified
4799 /// 32-bit value is representable in the immediate field of a BxA instruction.
4800 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4801   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4802   if (!C) return nullptr;
4803 
4804   int Addr = C->getZExtValue();
4805   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4806       SignExtend32<26>(Addr) != Addr)
4807     return nullptr;  // Top 6 bits have to be sext of immediate.
4808 
4809   return DAG
4810       .getConstant(
4811           (int)C->getZExtValue() >> 2, SDLoc(Op),
4812           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4813       .getNode();
4814 }
4815 
4816 namespace {
4817 
4818 struct TailCallArgumentInfo {
4819   SDValue Arg;
4820   SDValue FrameIdxOp;
4821   int FrameIdx = 0;
4822 
4823   TailCallArgumentInfo() = default;
4824 };
4825 
4826 } // end anonymous namespace
4827 
4828 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4829 static void StoreTailCallArgumentsToStackSlot(
4830     SelectionDAG &DAG, SDValue Chain,
4831     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4832     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4833   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4834     SDValue Arg = TailCallArgs[i].Arg;
4835     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4836     int FI = TailCallArgs[i].FrameIdx;
4837     // Store relative to framepointer.
4838     MemOpChains.push_back(DAG.getStore(
4839         Chain, dl, Arg, FIN,
4840         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4841   }
4842 }
4843 
4844 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4845 /// the appropriate stack slot for the tail call optimized function call.
4846 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4847                                              SDValue OldRetAddr, SDValue OldFP,
4848                                              int SPDiff, const SDLoc &dl) {
4849   if (SPDiff) {
4850     // Calculate the new stack slot for the return address.
4851     MachineFunction &MF = DAG.getMachineFunction();
4852     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4853     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4854     bool isPPC64 = Subtarget.isPPC64();
4855     int SlotSize = isPPC64 ? 8 : 4;
4856     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4857     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4858                                                          NewRetAddrLoc, true);
4859     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4860     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4861     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4862                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4863   }
4864   return Chain;
4865 }
4866 
4867 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4868 /// the position of the argument.
4869 static void
4870 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4871                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4872                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4873   int Offset = ArgOffset + SPDiff;
4874   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4875   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4876   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4877   SDValue FIN = DAG.getFrameIndex(FI, VT);
4878   TailCallArgumentInfo Info;
4879   Info.Arg = Arg;
4880   Info.FrameIdxOp = FIN;
4881   Info.FrameIdx = FI;
4882   TailCallArguments.push_back(Info);
4883 }
4884 
4885 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4886 /// stack slot. Returns the chain as result and the loaded frame pointers in
4887 /// LROpOut/FPOpout. Used when tail calling.
4888 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4889     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4890     SDValue &FPOpOut, const SDLoc &dl) const {
4891   if (SPDiff) {
4892     // Load the LR and FP stack slot for later adjusting.
4893     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4894     LROpOut = getReturnAddrFrameIndex(DAG);
4895     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4896     Chain = SDValue(LROpOut.getNode(), 1);
4897   }
4898   return Chain;
4899 }
4900 
4901 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4902 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4903 /// specified by the specific parameter attribute. The copy will be passed as
4904 /// a byval function parameter.
4905 /// Sometimes what we are copying is the end of a larger object, the part that
4906 /// does not fit in registers.
4907 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4908                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4909                                          SelectionDAG &DAG, const SDLoc &dl) {
4910   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4911   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
4912                        Flags.getNonZeroByValAlign(), false, false, false,
4913                        MachinePointerInfo(), MachinePointerInfo());
4914 }
4915 
4916 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4917 /// tail calls.
4918 static void LowerMemOpCallTo(
4919     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4920     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4921     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4922     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4923   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4924   if (!isTailCall) {
4925     if (isVector) {
4926       SDValue StackPtr;
4927       if (isPPC64)
4928         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4929       else
4930         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4931       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4932                            DAG.getConstant(ArgOffset, dl, PtrVT));
4933     }
4934     MemOpChains.push_back(
4935         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4936     // Calculate and remember argument location.
4937   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4938                                   TailCallArguments);
4939 }
4940 
4941 static void
4942 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4943                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4944                 SDValue FPOp,
4945                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4946   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4947   // might overwrite each other in case of tail call optimization.
4948   SmallVector<SDValue, 8> MemOpChains2;
4949   // Do not flag preceding copytoreg stuff together with the following stuff.
4950   InFlag = SDValue();
4951   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4952                                     MemOpChains2, dl);
4953   if (!MemOpChains2.empty())
4954     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4955 
4956   // Store the return address to the appropriate stack slot.
4957   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4958 
4959   // Emit callseq_end just before tailcall node.
4960   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4961                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4962   InFlag = Chain.getValue(1);
4963 }
4964 
4965 // Is this global address that of a function that can be called by name? (as
4966 // opposed to something that must hold a descriptor for an indirect call).
4967 static bool isFunctionGlobalAddress(SDValue Callee) {
4968   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4969     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4970         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4971       return false;
4972 
4973     return G->getGlobal()->getValueType()->isFunctionTy();
4974   }
4975 
4976   return false;
4977 }
4978 
4979 SDValue PPCTargetLowering::LowerCallResult(
4980     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
4981     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4982     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4983   SmallVector<CCValAssign, 16> RVLocs;
4984   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
4985                     *DAG.getContext());
4986 
4987   CCRetInfo.AnalyzeCallResult(
4988       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
4989                ? RetCC_PPC_Cold
4990                : RetCC_PPC);
4991 
4992   // Copy all of the result registers out of their specified physreg.
4993   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4994     CCValAssign &VA = RVLocs[i];
4995     assert(VA.isRegLoc() && "Can only return in registers!");
4996 
4997     SDValue Val;
4998 
4999     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5000       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5001                                       InFlag);
5002       Chain = Lo.getValue(1);
5003       InFlag = Lo.getValue(2);
5004       VA = RVLocs[++i]; // skip ahead to next loc
5005       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5006                                       InFlag);
5007       Chain = Hi.getValue(1);
5008       InFlag = Hi.getValue(2);
5009       if (!Subtarget.isLittleEndian())
5010         std::swap (Lo, Hi);
5011       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5012     } else {
5013       Val = DAG.getCopyFromReg(Chain, dl,
5014                                VA.getLocReg(), VA.getLocVT(), InFlag);
5015       Chain = Val.getValue(1);
5016       InFlag = Val.getValue(2);
5017     }
5018 
5019     switch (VA.getLocInfo()) {
5020     default: llvm_unreachable("Unknown loc info!");
5021     case CCValAssign::Full: break;
5022     case CCValAssign::AExt:
5023       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5024       break;
5025     case CCValAssign::ZExt:
5026       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5027                         DAG.getValueType(VA.getValVT()));
5028       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5029       break;
5030     case CCValAssign::SExt:
5031       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5032                         DAG.getValueType(VA.getValVT()));
5033       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5034       break;
5035     }
5036 
5037     InVals.push_back(Val);
5038   }
5039 
5040   return Chain;
5041 }
5042 
5043 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5044                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5045   // PatchPoint calls are not indirect.
5046   if (isPatchPoint)
5047     return false;
5048 
5049   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5050     return false;
5051 
5052   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5053   // becuase the immediate function pointer points to a descriptor instead of
5054   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5055   // pointer immediate points to the global entry point, while the BLA would
5056   // need to jump to the local entry point (see rL211174).
5057   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5058       isBLACompatibleAddress(Callee, DAG))
5059     return false;
5060 
5061   return true;
5062 }
5063 
5064 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5065                               const Function &Caller,
5066                               const SDValue &Callee,
5067                               const PPCSubtarget &Subtarget,
5068                               const TargetMachine &TM) {
5069   if (CFlags.IsTailCall)
5070     return PPCISD::TC_RETURN;
5071 
5072   // This is a call through a function pointer.
5073   if (CFlags.IsIndirect) {
5074     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5075     // indirect calls. The save of the caller's TOC pointer to the stack will be
5076     // inserted into the DAG as part of call lowering. The restore of the TOC
5077     // pointer is modeled by using a pseudo instruction for the call opcode that
5078     // represents the 2 instruction sequence of an indirect branch and link,
5079     // immediately followed by a load of the TOC pointer from the the stack save
5080     // slot into gpr2.
5081     if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5082       return PPCISD::BCTRL_LOAD_TOC;
5083 
5084     // An indirect call that does not need a TOC restore.
5085     return PPCISD::BCTRL;
5086   }
5087 
5088   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5089   // immediately following the call instruction if the caller and callee may
5090   // have different TOC bases. At link time if the linker determines the calls
5091   // may not share a TOC base, the call is redirected to a trampoline inserted
5092   // by the linker. The trampoline will (among other things) save the callers
5093   // TOC pointer at an ABI designated offset in the linkage area and the linker
5094   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5095   // into gpr2.
5096   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5097     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5098                                                   : PPCISD::CALL_NOP;
5099 
5100   return PPCISD::CALL;
5101 }
5102 
5103 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5104                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5105   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5106     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5107       return SDValue(Dest, 0);
5108 
5109   // Returns true if the callee is local, and false otherwise.
5110   auto isLocalCallee = [&]() {
5111     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5112     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5113     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5114 
5115     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5116            !dyn_cast_or_null<GlobalIFunc>(GV);
5117   };
5118 
5119   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5120   // a static relocation model causes some versions of GNU LD (2.17.50, at
5121   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5122   // built with secure-PLT.
5123   bool UsePlt =
5124       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5125       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5126 
5127   // On AIX, direct function calls reference the symbol for the function's
5128   // entry point, which is named by prepending a "." before the function's
5129   // C-linkage name.
5130   const auto getAIXFuncEntryPointSymbolSDNode =
5131       [&](StringRef FuncName, bool IsDeclaration,
5132           const XCOFF::StorageClass &SC) {
5133         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5134 
5135         MCSymbolXCOFF *S = cast<MCSymbolXCOFF>(
5136             Context.getOrCreateSymbol(Twine(".") + Twine(FuncName)));
5137 
5138         if (IsDeclaration && !S->hasRepresentedCsectSet()) {
5139           // On AIX, an undefined symbol needs to be associated with a
5140           // MCSectionXCOFF to get the correct storage mapping class.
5141           // In this case, XCOFF::XMC_PR.
5142           MCSectionXCOFF *Sec = Context.getXCOFFSection(
5143               S->getName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC,
5144               SectionKind::getMetadata());
5145           S->setRepresentedCsect(Sec);
5146         }
5147 
5148         MVT PtrVT =
5149             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5150         return DAG.getMCSymbol(S, PtrVT);
5151       };
5152 
5153   if (isFunctionGlobalAddress(Callee)) {
5154     const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
5155     const GlobalValue *GV = G->getGlobal();
5156 
5157     if (!Subtarget.isAIXABI())
5158       return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5159                                         UsePlt ? PPCII::MO_PLT : 0);
5160 
5161     assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5162     const GlobalObject *GO = cast<GlobalObject>(GV);
5163     const XCOFF::StorageClass SC =
5164         TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GO);
5165     return getAIXFuncEntryPointSymbolSDNode(GO->getName(), GO->isDeclaration(),
5166                                             SC);
5167   }
5168 
5169   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5170     const char *SymName = S->getSymbol();
5171     if (!Subtarget.isAIXABI())
5172       return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5173                                          UsePlt ? PPCII::MO_PLT : 0);
5174 
5175     // If there exists a user-declared function whose name is the same as the
5176     // ExternalSymbol's, then we pick up the user-declared version.
5177     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5178     if (const Function *F =
5179             dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) {
5180       const XCOFF::StorageClass SC =
5181           TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F);
5182       return getAIXFuncEntryPointSymbolSDNode(F->getName(), F->isDeclaration(),
5183                                               SC);
5184     }
5185 
5186     return getAIXFuncEntryPointSymbolSDNode(SymName, true, XCOFF::C_EXT);
5187   }
5188 
5189   // No transformation needed.
5190   assert(Callee.getNode() && "What no callee?");
5191   return Callee;
5192 }
5193 
5194 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5195   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5196          "Expected a CALLSEQ_STARTSDNode.");
5197 
5198   // The last operand is the chain, except when the node has glue. If the node
5199   // has glue, then the last operand is the glue, and the chain is the second
5200   // last operand.
5201   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5202   if (LastValue.getValueType() != MVT::Glue)
5203     return LastValue;
5204 
5205   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5206 }
5207 
5208 // Creates the node that moves a functions address into the count register
5209 // to prepare for an indirect call instruction.
5210 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5211                                 SDValue &Glue, SDValue &Chain,
5212                                 const SDLoc &dl) {
5213   SDValue MTCTROps[] = {Chain, Callee, Glue};
5214   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5215   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5216                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5217   // The glue is the second value produced.
5218   Glue = Chain.getValue(1);
5219 }
5220 
5221 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5222                                           SDValue &Glue, SDValue &Chain,
5223                                           SDValue CallSeqStart,
5224                                           ImmutableCallSite CS, const SDLoc &dl,
5225                                           bool hasNest,
5226                                           const PPCSubtarget &Subtarget) {
5227   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5228   // entry point, but to the function descriptor (the function entry point
5229   // address is part of the function descriptor though).
5230   // The function descriptor is a three doubleword structure with the
5231   // following fields: function entry point, TOC base address and
5232   // environment pointer.
5233   // Thus for a call through a function pointer, the following actions need
5234   // to be performed:
5235   //   1. Save the TOC of the caller in the TOC save area of its stack
5236   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5237   //   2. Load the address of the function entry point from the function
5238   //      descriptor.
5239   //   3. Load the TOC of the callee from the function descriptor into r2.
5240   //   4. Load the environment pointer from the function descriptor into
5241   //      r11.
5242   //   5. Branch to the function entry point address.
5243   //   6. On return of the callee, the TOC of the caller needs to be
5244   //      restored (this is done in FinishCall()).
5245   //
5246   // The loads are scheduled at the beginning of the call sequence, and the
5247   // register copies are flagged together to ensure that no other
5248   // operations can be scheduled in between. E.g. without flagging the
5249   // copies together, a TOC access in the caller could be scheduled between
5250   // the assignment of the callee TOC and the branch to the callee, which leads
5251   // to incorrect code.
5252 
5253   // Start by loading the function address from the descriptor.
5254   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5255   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5256                       ? (MachineMemOperand::MODereferenceable |
5257                          MachineMemOperand::MOInvariant)
5258                       : MachineMemOperand::MONone;
5259 
5260   MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr);
5261 
5262   // Registers used in building the DAG.
5263   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5264   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5265 
5266   // Offsets of descriptor members.
5267   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5268   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5269 
5270   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5271   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5272 
5273   // One load for the functions entry point address.
5274   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5275                                     Alignment, MMOFlags);
5276 
5277   // One for loading the TOC anchor for the module that contains the called
5278   // function.
5279   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5280   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5281   SDValue TOCPtr =
5282       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5283                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5284 
5285   // One for loading the environment pointer.
5286   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5287   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5288   SDValue LoadEnvPtr =
5289       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5290                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5291 
5292 
5293   // Then copy the newly loaded TOC anchor to the TOC pointer.
5294   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5295   Chain = TOCVal.getValue(0);
5296   Glue = TOCVal.getValue(1);
5297 
5298   // If the function call has an explicit 'nest' parameter, it takes the
5299   // place of the environment pointer.
5300   assert((!hasNest || !Subtarget.isAIXABI()) &&
5301          "Nest parameter is not supported on AIX.");
5302   if (!hasNest) {
5303     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5304     Chain = EnvVal.getValue(0);
5305     Glue = EnvVal.getValue(1);
5306   }
5307 
5308   // The rest of the indirect call sequence is the same as the non-descriptor
5309   // DAG.
5310   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5311 }
5312 
5313 static void
5314 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5315                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5316                   SelectionDAG &DAG,
5317                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5318                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5319                   const PPCSubtarget &Subtarget) {
5320   const bool IsPPC64 = Subtarget.isPPC64();
5321   // MVT for a general purpose register.
5322   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5323 
5324   // First operand is always the chain.
5325   Ops.push_back(Chain);
5326 
5327   // If it's a direct call pass the callee as the second operand.
5328   if (!CFlags.IsIndirect)
5329     Ops.push_back(Callee);
5330   else {
5331     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5332 
5333     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5334     // on the stack (this would have been done in `LowerCall_64SVR4` or
5335     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5336     // represents both the indirect branch and a load that restores the TOC
5337     // pointer from the linkage area. The operand for the TOC restore is an add
5338     // of the TOC save offset to the stack pointer. This must be the second
5339     // operand: after the chain input but before any other variadic arguments.
5340     if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
5341       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5342 
5343       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5344       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5345       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5346       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5347       Ops.push_back(AddTOC);
5348     }
5349 
5350     // Add the register used for the environment pointer.
5351     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5352       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5353                                     RegVT));
5354 
5355 
5356     // Add CTR register as callee so a bctr can be emitted later.
5357     if (CFlags.IsTailCall)
5358       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5359   }
5360 
5361   // If this is a tail call add stack pointer delta.
5362   if (CFlags.IsTailCall)
5363     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5364 
5365   // Add argument registers to the end of the list so that they are known live
5366   // into the call.
5367   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5368     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5369                                   RegsToPass[i].second.getValueType()));
5370 
5371   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5372   // no way to mark dependencies as implicit here.
5373   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5374   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5375       !CFlags.IsPatchPoint)
5376     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5377 
5378   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5379   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5380     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5381 
5382   // Add a register mask operand representing the call-preserved registers.
5383   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5384   const uint32_t *Mask =
5385       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5386   assert(Mask && "Missing call preserved mask for calling convention");
5387   Ops.push_back(DAG.getRegisterMask(Mask));
5388 
5389   // If the glue is valid, it is the last operand.
5390   if (Glue.getNode())
5391     Ops.push_back(Glue);
5392 }
5393 
5394 SDValue PPCTargetLowering::FinishCall(
5395     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5396     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5397     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5398     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5399     SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const {
5400 
5401   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI())
5402     setUsesTOCBasePtr(DAG);
5403 
5404   unsigned CallOpc =
5405       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5406                     Subtarget, DAG.getTarget());
5407 
5408   if (!CFlags.IsIndirect)
5409     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5410   else if (Subtarget.usesFunctionDescriptors())
5411     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CS,
5412                                   dl, CFlags.HasNest, Subtarget);
5413   else
5414     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5415 
5416   // Build the operand list for the call instruction.
5417   SmallVector<SDValue, 8> Ops;
5418   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5419                     SPDiff, Subtarget);
5420 
5421   // Emit tail call.
5422   if (CFlags.IsTailCall) {
5423     assert(((Callee.getOpcode() == ISD::Register &&
5424              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5425             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5426             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5427             isa<ConstantSDNode>(Callee)) &&
5428            "Expecting a global address, external symbol, absolute value or "
5429            "register");
5430     assert(CallOpc == PPCISD::TC_RETURN &&
5431            "Unexpected call opcode for a tail call.");
5432     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5433     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5434   }
5435 
5436   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5437   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5438   Glue = Chain.getValue(1);
5439 
5440   // When performing tail call optimization the callee pops its arguments off
5441   // the stack. Account for this here so these bytes can be pushed back on in
5442   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5443   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5444                          getTargetMachine().Options.GuaranteedTailCallOpt)
5445                             ? NumBytes
5446                             : 0;
5447 
5448   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5449                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5450                              Glue, dl);
5451   Glue = Chain.getValue(1);
5452 
5453   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5454                          DAG, InVals);
5455 }
5456 
5457 SDValue
5458 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5459                              SmallVectorImpl<SDValue> &InVals) const {
5460   SelectionDAG &DAG                     = CLI.DAG;
5461   SDLoc &dl                             = CLI.DL;
5462   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5463   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5464   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5465   SDValue Chain                         = CLI.Chain;
5466   SDValue Callee                        = CLI.Callee;
5467   bool &isTailCall                      = CLI.IsTailCall;
5468   CallingConv::ID CallConv              = CLI.CallConv;
5469   bool isVarArg                         = CLI.IsVarArg;
5470   bool isPatchPoint                     = CLI.IsPatchPoint;
5471   ImmutableCallSite CS                  = CLI.CS;
5472 
5473   if (isTailCall) {
5474     if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall()))
5475       isTailCall = false;
5476     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5477       isTailCall =
5478         IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
5479                                                  isVarArg, Outs, Ins, DAG);
5480     else
5481       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5482                                                      Ins, DAG);
5483     if (isTailCall) {
5484       ++NumTailCalls;
5485       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5486         ++NumSiblingCalls;
5487 
5488       assert(isa<GlobalAddressSDNode>(Callee) &&
5489              "Callee should be an llvm::Function object.");
5490       LLVM_DEBUG(
5491           const GlobalValue *GV =
5492               cast<GlobalAddressSDNode>(Callee)->getGlobal();
5493           const unsigned Width =
5494               80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0");
5495           dbgs() << "TCO caller: "
5496                  << left_justify(DAG.getMachineFunction().getName(), Width)
5497                  << ", callee linkage: " << GV->getVisibility() << ", "
5498                  << GV->getLinkage() << "\n");
5499     }
5500   }
5501 
5502   if (!isTailCall && CS && CS.isMustTailCall())
5503     report_fatal_error("failed to perform tail call elimination on a call "
5504                        "site marked musttail");
5505 
5506   // When long calls (i.e. indirect calls) are always used, calls are always
5507   // made via function pointer. If we have a function name, first translate it
5508   // into a pointer.
5509   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5510       !isTailCall)
5511     Callee = LowerGlobalAddress(Callee, DAG);
5512 
5513   CallFlags CFlags(
5514       CallConv, isTailCall, isVarArg, isPatchPoint,
5515       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5516       // hasNest
5517       Subtarget.is64BitELFABI() &&
5518           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }));
5519 
5520   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5521     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5522                             InVals, CS);
5523 
5524   if (Subtarget.isSVR4ABI())
5525     return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5526                             InVals, CS);
5527 
5528   if (Subtarget.isAIXABI())
5529     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5530                          InVals, CS);
5531 
5532   return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5533                           InVals, CS);
5534 }
5535 
5536 SDValue PPCTargetLowering::LowerCall_32SVR4(
5537     SDValue Chain, SDValue Callee, CallFlags CFlags,
5538     const SmallVectorImpl<ISD::OutputArg> &Outs,
5539     const SmallVectorImpl<SDValue> &OutVals,
5540     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5541     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5542     ImmutableCallSite CS) const {
5543   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5544   // of the 32-bit SVR4 ABI stack frame layout.
5545 
5546   const CallingConv::ID CallConv = CFlags.CallConv;
5547   const bool IsVarArg = CFlags.IsVarArg;
5548   const bool IsTailCall = CFlags.IsTailCall;
5549 
5550   assert((CallConv == CallingConv::C ||
5551           CallConv == CallingConv::Cold ||
5552           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5553 
5554   unsigned PtrByteSize = 4;
5555 
5556   MachineFunction &MF = DAG.getMachineFunction();
5557 
5558   // Mark this function as potentially containing a function that contains a
5559   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5560   // and restoring the callers stack pointer in this functions epilog. This is
5561   // done because by tail calling the called function might overwrite the value
5562   // in this function's (MF) stack pointer stack slot 0(SP).
5563   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5564       CallConv == CallingConv::Fast)
5565     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5566 
5567   // Count how many bytes are to be pushed on the stack, including the linkage
5568   // area, parameter list area and the part of the local variable space which
5569   // contains copies of aggregates which are passed by value.
5570 
5571   // Assign locations to all of the outgoing arguments.
5572   SmallVector<CCValAssign, 16> ArgLocs;
5573   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5574 
5575   // Reserve space for the linkage area on the stack.
5576   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5577                        PtrByteSize);
5578   if (useSoftFloat())
5579     CCInfo.PreAnalyzeCallOperands(Outs);
5580 
5581   if (IsVarArg) {
5582     // Handle fixed and variable vector arguments differently.
5583     // Fixed vector arguments go into registers as long as registers are
5584     // available. Variable vector arguments always go into memory.
5585     unsigned NumArgs = Outs.size();
5586 
5587     for (unsigned i = 0; i != NumArgs; ++i) {
5588       MVT ArgVT = Outs[i].VT;
5589       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5590       bool Result;
5591 
5592       if (Outs[i].IsFixed) {
5593         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5594                                CCInfo);
5595       } else {
5596         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5597                                       ArgFlags, CCInfo);
5598       }
5599 
5600       if (Result) {
5601 #ifndef NDEBUG
5602         errs() << "Call operand #" << i << " has unhandled type "
5603              << EVT(ArgVT).getEVTString() << "\n";
5604 #endif
5605         llvm_unreachable(nullptr);
5606       }
5607     }
5608   } else {
5609     // All arguments are treated the same.
5610     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5611   }
5612   CCInfo.clearWasPPCF128();
5613 
5614   // Assign locations to all of the outgoing aggregate by value arguments.
5615   SmallVector<CCValAssign, 16> ByValArgLocs;
5616   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5617 
5618   // Reserve stack space for the allocations in CCInfo.
5619   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
5620 
5621   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5622 
5623   // Size of the linkage area, parameter list area and the part of the local
5624   // space variable where copies of aggregates which are passed by value are
5625   // stored.
5626   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5627 
5628   // Calculate by how many bytes the stack has to be adjusted in case of tail
5629   // call optimization.
5630   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5631 
5632   // Adjust the stack pointer for the new arguments...
5633   // These operations are automatically eliminated by the prolog/epilog pass
5634   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5635   SDValue CallSeqStart = Chain;
5636 
5637   // Load the return address and frame pointer so it can be moved somewhere else
5638   // later.
5639   SDValue LROp, FPOp;
5640   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5641 
5642   // Set up a copy of the stack pointer for use loading and storing any
5643   // arguments that may not fit in the registers available for argument
5644   // passing.
5645   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5646 
5647   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5648   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5649   SmallVector<SDValue, 8> MemOpChains;
5650 
5651   bool seenFloatArg = false;
5652   // Walk the register/memloc assignments, inserting copies/loads.
5653   // i - Tracks the index into the list of registers allocated for the call
5654   // RealArgIdx - Tracks the index into the list of actual function arguments
5655   // j - Tracks the index into the list of byval arguments
5656   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5657        i != e;
5658        ++i, ++RealArgIdx) {
5659     CCValAssign &VA = ArgLocs[i];
5660     SDValue Arg = OutVals[RealArgIdx];
5661     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5662 
5663     if (Flags.isByVal()) {
5664       // Argument is an aggregate which is passed by value, thus we need to
5665       // create a copy of it in the local variable space of the current stack
5666       // frame (which is the stack frame of the caller) and pass the address of
5667       // this copy to the callee.
5668       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5669       CCValAssign &ByValVA = ByValArgLocs[j++];
5670       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5671 
5672       // Memory reserved in the local variable space of the callers stack frame.
5673       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5674 
5675       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5676       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5677                            StackPtr, PtrOff);
5678 
5679       // Create a copy of the argument in the local area of the current
5680       // stack frame.
5681       SDValue MemcpyCall =
5682         CreateCopyOfByValArgument(Arg, PtrOff,
5683                                   CallSeqStart.getNode()->getOperand(0),
5684                                   Flags, DAG, dl);
5685 
5686       // This must go outside the CALLSEQ_START..END.
5687       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5688                                                      SDLoc(MemcpyCall));
5689       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5690                              NewCallSeqStart.getNode());
5691       Chain = CallSeqStart = NewCallSeqStart;
5692 
5693       // Pass the address of the aggregate copy on the stack either in a
5694       // physical register or in the parameter list area of the current stack
5695       // frame to the callee.
5696       Arg = PtrOff;
5697     }
5698 
5699     // When useCRBits() is true, there can be i1 arguments.
5700     // It is because getRegisterType(MVT::i1) => MVT::i1,
5701     // and for other integer types getRegisterType() => MVT::i32.
5702     // Extend i1 and ensure callee will get i32.
5703     if (Arg.getValueType() == MVT::i1)
5704       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5705                         dl, MVT::i32, Arg);
5706 
5707     if (VA.isRegLoc()) {
5708       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5709       // Put argument in a physical register.
5710       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5711         bool IsLE = Subtarget.isLittleEndian();
5712         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5713                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5714         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5715         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5716                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5717         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5718                              SVal.getValue(0)));
5719       } else
5720         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5721     } else {
5722       // Put argument in the parameter list area of the current stack frame.
5723       assert(VA.isMemLoc());
5724       unsigned LocMemOffset = VA.getLocMemOffset();
5725 
5726       if (!IsTailCall) {
5727         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5728         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5729                              StackPtr, PtrOff);
5730 
5731         MemOpChains.push_back(
5732             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5733       } else {
5734         // Calculate and remember argument location.
5735         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5736                                  TailCallArguments);
5737       }
5738     }
5739   }
5740 
5741   if (!MemOpChains.empty())
5742     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5743 
5744   // Build a sequence of copy-to-reg nodes chained together with token chain
5745   // and flag operands which copy the outgoing args into the appropriate regs.
5746   SDValue InFlag;
5747   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5748     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5749                              RegsToPass[i].second, InFlag);
5750     InFlag = Chain.getValue(1);
5751   }
5752 
5753   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5754   // registers.
5755   if (IsVarArg) {
5756     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5757     SDValue Ops[] = { Chain, InFlag };
5758 
5759     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5760                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5761 
5762     InFlag = Chain.getValue(1);
5763   }
5764 
5765   if (IsTailCall)
5766     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5767                     TailCallArguments);
5768 
5769   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5770                     Callee, SPDiff, NumBytes, Ins, InVals, CS);
5771 }
5772 
5773 // Copy an argument into memory, being careful to do this outside the
5774 // call sequence for the call to which the argument belongs.
5775 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5776     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5777     SelectionDAG &DAG, const SDLoc &dl) const {
5778   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5779                         CallSeqStart.getNode()->getOperand(0),
5780                         Flags, DAG, dl);
5781   // The MEMCPY must go outside the CALLSEQ_START..END.
5782   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5783   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5784                                                  SDLoc(MemcpyCall));
5785   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5786                          NewCallSeqStart.getNode());
5787   return NewCallSeqStart;
5788 }
5789 
5790 SDValue PPCTargetLowering::LowerCall_64SVR4(
5791     SDValue Chain, SDValue Callee, CallFlags CFlags,
5792     const SmallVectorImpl<ISD::OutputArg> &Outs,
5793     const SmallVectorImpl<SDValue> &OutVals,
5794     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5795     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5796     ImmutableCallSite CS) const {
5797   bool isELFv2ABI = Subtarget.isELFv2ABI();
5798   bool isLittleEndian = Subtarget.isLittleEndian();
5799   unsigned NumOps = Outs.size();
5800   bool IsSibCall = false;
5801   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5802 
5803   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5804   unsigned PtrByteSize = 8;
5805 
5806   MachineFunction &MF = DAG.getMachineFunction();
5807 
5808   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5809     IsSibCall = true;
5810 
5811   // Mark this function as potentially containing a function that contains a
5812   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5813   // and restoring the callers stack pointer in this functions epilog. This is
5814   // done because by tail calling the called function might overwrite the value
5815   // in this function's (MF) stack pointer stack slot 0(SP).
5816   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5817     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5818 
5819   assert(!(IsFastCall && CFlags.IsVarArg) &&
5820          "fastcc not supported on varargs functions");
5821 
5822   // Count how many bytes are to be pushed on the stack, including the linkage
5823   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5824   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5825   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5826   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5827   unsigned NumBytes = LinkageSize;
5828   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5829   unsigned &QFPR_idx = FPR_idx;
5830 
5831   static const MCPhysReg GPR[] = {
5832     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5833     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5834   };
5835   static const MCPhysReg VR[] = {
5836     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5837     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5838   };
5839 
5840   const unsigned NumGPRs = array_lengthof(GPR);
5841   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5842   const unsigned NumVRs  = array_lengthof(VR);
5843   const unsigned NumQFPRs = NumFPRs;
5844 
5845   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5846   // can be passed to the callee in registers.
5847   // For the fast calling convention, there is another check below.
5848   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5849   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5850   if (!HasParameterArea) {
5851     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5852     unsigned AvailableFPRs = NumFPRs;
5853     unsigned AvailableVRs = NumVRs;
5854     unsigned NumBytesTmp = NumBytes;
5855     for (unsigned i = 0; i != NumOps; ++i) {
5856       if (Outs[i].Flags.isNest()) continue;
5857       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5858                                 PtrByteSize, LinkageSize, ParamAreaSize,
5859                                 NumBytesTmp, AvailableFPRs, AvailableVRs,
5860                                 Subtarget.hasQPX()))
5861         HasParameterArea = true;
5862     }
5863   }
5864 
5865   // When using the fast calling convention, we don't provide backing for
5866   // arguments that will be in registers.
5867   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5868 
5869   // Avoid allocating parameter area for fastcc functions if all the arguments
5870   // can be passed in the registers.
5871   if (IsFastCall)
5872     HasParameterArea = false;
5873 
5874   // Add up all the space actually used.
5875   for (unsigned i = 0; i != NumOps; ++i) {
5876     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5877     EVT ArgVT = Outs[i].VT;
5878     EVT OrigVT = Outs[i].ArgVT;
5879 
5880     if (Flags.isNest())
5881       continue;
5882 
5883     if (IsFastCall) {
5884       if (Flags.isByVal()) {
5885         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5886         if (NumGPRsUsed > NumGPRs)
5887           HasParameterArea = true;
5888       } else {
5889         switch (ArgVT.getSimpleVT().SimpleTy) {
5890         default: llvm_unreachable("Unexpected ValueType for argument!");
5891         case MVT::i1:
5892         case MVT::i32:
5893         case MVT::i64:
5894           if (++NumGPRsUsed <= NumGPRs)
5895             continue;
5896           break;
5897         case MVT::v4i32:
5898         case MVT::v8i16:
5899         case MVT::v16i8:
5900         case MVT::v2f64:
5901         case MVT::v2i64:
5902         case MVT::v1i128:
5903         case MVT::f128:
5904           if (++NumVRsUsed <= NumVRs)
5905             continue;
5906           break;
5907         case MVT::v4f32:
5908           // When using QPX, this is handled like a FP register, otherwise, it
5909           // is an Altivec register.
5910           if (Subtarget.hasQPX()) {
5911             if (++NumFPRsUsed <= NumFPRs)
5912               continue;
5913           } else {
5914             if (++NumVRsUsed <= NumVRs)
5915               continue;
5916           }
5917           break;
5918         case MVT::f32:
5919         case MVT::f64:
5920         case MVT::v4f64: // QPX
5921         case MVT::v4i1:  // QPX
5922           if (++NumFPRsUsed <= NumFPRs)
5923             continue;
5924           break;
5925         }
5926         HasParameterArea = true;
5927       }
5928     }
5929 
5930     /* Respect alignment of argument on the stack.  */
5931     auto Alignement =
5932         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5933     NumBytes = alignTo(NumBytes, Alignement);
5934 
5935     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5936     if (Flags.isInConsecutiveRegsLast())
5937       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5938   }
5939 
5940   unsigned NumBytesActuallyUsed = NumBytes;
5941 
5942   // In the old ELFv1 ABI,
5943   // the prolog code of the callee may store up to 8 GPR argument registers to
5944   // the stack, allowing va_start to index over them in memory if its varargs.
5945   // Because we cannot tell if this is needed on the caller side, we have to
5946   // conservatively assume that it is needed.  As such, make sure we have at
5947   // least enough stack space for the caller to store the 8 GPRs.
5948   // In the ELFv2 ABI, we allocate the parameter area iff a callee
5949   // really requires memory operands, e.g. a vararg function.
5950   if (HasParameterArea)
5951     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5952   else
5953     NumBytes = LinkageSize;
5954 
5955   // Tail call needs the stack to be aligned.
5956   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5957     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5958 
5959   int SPDiff = 0;
5960 
5961   // Calculate by how many bytes the stack has to be adjusted in case of tail
5962   // call optimization.
5963   if (!IsSibCall)
5964     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
5965 
5966   // To protect arguments on the stack from being clobbered in a tail call,
5967   // force all the loads to happen before doing any other lowering.
5968   if (CFlags.IsTailCall)
5969     Chain = DAG.getStackArgumentTokenFactor(Chain);
5970 
5971   // Adjust the stack pointer for the new arguments...
5972   // These operations are automatically eliminated by the prolog/epilog pass
5973   if (!IsSibCall)
5974     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5975   SDValue CallSeqStart = Chain;
5976 
5977   // Load the return address and frame pointer so it can be move somewhere else
5978   // later.
5979   SDValue LROp, FPOp;
5980   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5981 
5982   // Set up a copy of the stack pointer for use loading and storing any
5983   // arguments that may not fit in the registers available for argument
5984   // passing.
5985   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5986 
5987   // Figure out which arguments are going to go in registers, and which in
5988   // memory.  Also, if this is a vararg function, floating point operations
5989   // must be stored to our stack, and loaded into integer regs as well, if
5990   // any integer regs are available for argument passing.
5991   unsigned ArgOffset = LinkageSize;
5992 
5993   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5994   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5995 
5996   SmallVector<SDValue, 8> MemOpChains;
5997   for (unsigned i = 0; i != NumOps; ++i) {
5998     SDValue Arg = OutVals[i];
5999     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6000     EVT ArgVT = Outs[i].VT;
6001     EVT OrigVT = Outs[i].ArgVT;
6002 
6003     // PtrOff will be used to store the current argument to the stack if a
6004     // register cannot be found for it.
6005     SDValue PtrOff;
6006 
6007     // We re-align the argument offset for each argument, except when using the
6008     // fast calling convention, when we need to make sure we do that only when
6009     // we'll actually use a stack slot.
6010     auto ComputePtrOff = [&]() {
6011       /* Respect alignment of argument on the stack.  */
6012       auto Alignment =
6013           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6014       ArgOffset = alignTo(ArgOffset, Alignment);
6015 
6016       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6017 
6018       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6019     };
6020 
6021     if (!IsFastCall) {
6022       ComputePtrOff();
6023 
6024       /* Compute GPR index associated with argument offset.  */
6025       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6026       GPR_idx = std::min(GPR_idx, NumGPRs);
6027     }
6028 
6029     // Promote integers to 64-bit values.
6030     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6031       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6032       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6033       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6034     }
6035 
6036     // FIXME memcpy is used way more than necessary.  Correctness first.
6037     // Note: "by value" is code for passing a structure by value, not
6038     // basic types.
6039     if (Flags.isByVal()) {
6040       // Note: Size includes alignment padding, so
6041       //   struct x { short a; char b; }
6042       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6043       // These are the proper values we need for right-justifying the
6044       // aggregate in a parameter register.
6045       unsigned Size = Flags.getByValSize();
6046 
6047       // An empty aggregate parameter takes up no storage and no
6048       // registers.
6049       if (Size == 0)
6050         continue;
6051 
6052       if (IsFastCall)
6053         ComputePtrOff();
6054 
6055       // All aggregates smaller than 8 bytes must be passed right-justified.
6056       if (Size==1 || Size==2 || Size==4) {
6057         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6058         if (GPR_idx != NumGPRs) {
6059           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6060                                         MachinePointerInfo(), VT);
6061           MemOpChains.push_back(Load.getValue(1));
6062           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6063 
6064           ArgOffset += PtrByteSize;
6065           continue;
6066         }
6067       }
6068 
6069       if (GPR_idx == NumGPRs && Size < 8) {
6070         SDValue AddPtr = PtrOff;
6071         if (!isLittleEndian) {
6072           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6073                                           PtrOff.getValueType());
6074           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6075         }
6076         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6077                                                           CallSeqStart,
6078                                                           Flags, DAG, dl);
6079         ArgOffset += PtrByteSize;
6080         continue;
6081       }
6082       // Copy entire object into memory.  There are cases where gcc-generated
6083       // code assumes it is there, even if it could be put entirely into
6084       // registers.  (This is not what the doc says.)
6085 
6086       // FIXME: The above statement is likely due to a misunderstanding of the
6087       // documents.  All arguments must be copied into the parameter area BY
6088       // THE CALLEE in the event that the callee takes the address of any
6089       // formal argument.  That has not yet been implemented.  However, it is
6090       // reasonable to use the stack area as a staging area for the register
6091       // load.
6092 
6093       // Skip this for small aggregates, as we will use the same slot for a
6094       // right-justified copy, below.
6095       if (Size >= 8)
6096         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6097                                                           CallSeqStart,
6098                                                           Flags, DAG, dl);
6099 
6100       // When a register is available, pass a small aggregate right-justified.
6101       if (Size < 8 && GPR_idx != NumGPRs) {
6102         // The easiest way to get this right-justified in a register
6103         // is to copy the structure into the rightmost portion of a
6104         // local variable slot, then load the whole slot into the
6105         // register.
6106         // FIXME: The memcpy seems to produce pretty awful code for
6107         // small aggregates, particularly for packed ones.
6108         // FIXME: It would be preferable to use the slot in the
6109         // parameter save area instead of a new local variable.
6110         SDValue AddPtr = PtrOff;
6111         if (!isLittleEndian) {
6112           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6113           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6114         }
6115         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6116                                                           CallSeqStart,
6117                                                           Flags, DAG, dl);
6118 
6119         // Load the slot into the register.
6120         SDValue Load =
6121             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6122         MemOpChains.push_back(Load.getValue(1));
6123         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6124 
6125         // Done with this argument.
6126         ArgOffset += PtrByteSize;
6127         continue;
6128       }
6129 
6130       // For aggregates larger than PtrByteSize, copy the pieces of the
6131       // object that fit into registers from the parameter save area.
6132       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6133         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6134         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6135         if (GPR_idx != NumGPRs) {
6136           SDValue Load =
6137               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6138           MemOpChains.push_back(Load.getValue(1));
6139           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6140           ArgOffset += PtrByteSize;
6141         } else {
6142           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6143           break;
6144         }
6145       }
6146       continue;
6147     }
6148 
6149     switch (Arg.getSimpleValueType().SimpleTy) {
6150     default: llvm_unreachable("Unexpected ValueType for argument!");
6151     case MVT::i1:
6152     case MVT::i32:
6153     case MVT::i64:
6154       if (Flags.isNest()) {
6155         // The 'nest' parameter, if any, is passed in R11.
6156         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6157         break;
6158       }
6159 
6160       // These can be scalar arguments or elements of an integer array type
6161       // passed directly.  Clang may use those instead of "byval" aggregate
6162       // types to avoid forcing arguments to memory unnecessarily.
6163       if (GPR_idx != NumGPRs) {
6164         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6165       } else {
6166         if (IsFastCall)
6167           ComputePtrOff();
6168 
6169         assert(HasParameterArea &&
6170                "Parameter area must exist to pass an argument in memory.");
6171         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6172                          true, CFlags.IsTailCall, false, MemOpChains,
6173                          TailCallArguments, dl);
6174         if (IsFastCall)
6175           ArgOffset += PtrByteSize;
6176       }
6177       if (!IsFastCall)
6178         ArgOffset += PtrByteSize;
6179       break;
6180     case MVT::f32:
6181     case MVT::f64: {
6182       // These can be scalar arguments or elements of a float array type
6183       // passed directly.  The latter are used to implement ELFv2 homogenous
6184       // float aggregates.
6185 
6186       // Named arguments go into FPRs first, and once they overflow, the
6187       // remaining arguments go into GPRs and then the parameter save area.
6188       // Unnamed arguments for vararg functions always go to GPRs and
6189       // then the parameter save area.  For now, put all arguments to vararg
6190       // routines always in both locations (FPR *and* GPR or stack slot).
6191       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6192       bool NeededLoad = false;
6193 
6194       // First load the argument into the next available FPR.
6195       if (FPR_idx != NumFPRs)
6196         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6197 
6198       // Next, load the argument into GPR or stack slot if needed.
6199       if (!NeedGPROrStack)
6200         ;
6201       else if (GPR_idx != NumGPRs && !IsFastCall) {
6202         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6203         // once we support fp <-> gpr moves.
6204 
6205         // In the non-vararg case, this can only ever happen in the
6206         // presence of f32 array types, since otherwise we never run
6207         // out of FPRs before running out of GPRs.
6208         SDValue ArgVal;
6209 
6210         // Double values are always passed in a single GPR.
6211         if (Arg.getValueType() != MVT::f32) {
6212           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6213 
6214         // Non-array float values are extended and passed in a GPR.
6215         } else if (!Flags.isInConsecutiveRegs()) {
6216           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6217           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6218 
6219         // If we have an array of floats, we collect every odd element
6220         // together with its predecessor into one GPR.
6221         } else if (ArgOffset % PtrByteSize != 0) {
6222           SDValue Lo, Hi;
6223           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6224           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6225           if (!isLittleEndian)
6226             std::swap(Lo, Hi);
6227           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6228 
6229         // The final element, if even, goes into the first half of a GPR.
6230         } else if (Flags.isInConsecutiveRegsLast()) {
6231           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6232           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6233           if (!isLittleEndian)
6234             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6235                                  DAG.getConstant(32, dl, MVT::i32));
6236 
6237         // Non-final even elements are skipped; they will be handled
6238         // together the with subsequent argument on the next go-around.
6239         } else
6240           ArgVal = SDValue();
6241 
6242         if (ArgVal.getNode())
6243           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6244       } else {
6245         if (IsFastCall)
6246           ComputePtrOff();
6247 
6248         // Single-precision floating-point values are mapped to the
6249         // second (rightmost) word of the stack doubleword.
6250         if (Arg.getValueType() == MVT::f32 &&
6251             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6252           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6253           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6254         }
6255 
6256         assert(HasParameterArea &&
6257                "Parameter area must exist to pass an argument in memory.");
6258         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6259                          true, CFlags.IsTailCall, false, MemOpChains,
6260                          TailCallArguments, dl);
6261 
6262         NeededLoad = true;
6263       }
6264       // When passing an array of floats, the array occupies consecutive
6265       // space in the argument area; only round up to the next doubleword
6266       // at the end of the array.  Otherwise, each float takes 8 bytes.
6267       if (!IsFastCall || NeededLoad) {
6268         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6269                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6270         if (Flags.isInConsecutiveRegsLast())
6271           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6272       }
6273       break;
6274     }
6275     case MVT::v4f32:
6276     case MVT::v4i32:
6277     case MVT::v8i16:
6278     case MVT::v16i8:
6279     case MVT::v2f64:
6280     case MVT::v2i64:
6281     case MVT::v1i128:
6282     case MVT::f128:
6283       if (!Subtarget.hasQPX()) {
6284       // These can be scalar arguments or elements of a vector array type
6285       // passed directly.  The latter are used to implement ELFv2 homogenous
6286       // vector aggregates.
6287 
6288       // For a varargs call, named arguments go into VRs or on the stack as
6289       // usual; unnamed arguments always go to the stack or the corresponding
6290       // GPRs when within range.  For now, we always put the value in both
6291       // locations (or even all three).
6292       if (CFlags.IsVarArg) {
6293         assert(HasParameterArea &&
6294                "Parameter area must exist if we have a varargs call.");
6295         // We could elide this store in the case where the object fits
6296         // entirely in R registers.  Maybe later.
6297         SDValue Store =
6298             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6299         MemOpChains.push_back(Store);
6300         if (VR_idx != NumVRs) {
6301           SDValue Load =
6302               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6303           MemOpChains.push_back(Load.getValue(1));
6304           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6305         }
6306         ArgOffset += 16;
6307         for (unsigned i=0; i<16; i+=PtrByteSize) {
6308           if (GPR_idx == NumGPRs)
6309             break;
6310           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6311                                    DAG.getConstant(i, dl, PtrVT));
6312           SDValue Load =
6313               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6314           MemOpChains.push_back(Load.getValue(1));
6315           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6316         }
6317         break;
6318       }
6319 
6320       // Non-varargs Altivec params go into VRs or on the stack.
6321       if (VR_idx != NumVRs) {
6322         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6323       } else {
6324         if (IsFastCall)
6325           ComputePtrOff();
6326 
6327         assert(HasParameterArea &&
6328                "Parameter area must exist to pass an argument in memory.");
6329         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6330                          true, CFlags.IsTailCall, true, MemOpChains,
6331                          TailCallArguments, dl);
6332         if (IsFastCall)
6333           ArgOffset += 16;
6334       }
6335 
6336       if (!IsFastCall)
6337         ArgOffset += 16;
6338       break;
6339       } // not QPX
6340 
6341       assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6342              "Invalid QPX parameter type");
6343 
6344       LLVM_FALLTHROUGH;
6345     case MVT::v4f64:
6346     case MVT::v4i1: {
6347       bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6348       if (CFlags.IsVarArg) {
6349         assert(HasParameterArea &&
6350                "Parameter area must exist if we have a varargs call.");
6351         // We could elide this store in the case where the object fits
6352         // entirely in R registers.  Maybe later.
6353         SDValue Store =
6354             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6355         MemOpChains.push_back(Store);
6356         if (QFPR_idx != NumQFPRs) {
6357           SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6358                                      PtrOff, MachinePointerInfo());
6359           MemOpChains.push_back(Load.getValue(1));
6360           RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6361         }
6362         ArgOffset += (IsF32 ? 16 : 32);
6363         for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6364           if (GPR_idx == NumGPRs)
6365             break;
6366           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6367                                    DAG.getConstant(i, dl, PtrVT));
6368           SDValue Load =
6369               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6370           MemOpChains.push_back(Load.getValue(1));
6371           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6372         }
6373         break;
6374       }
6375 
6376       // Non-varargs QPX params go into registers or on the stack.
6377       if (QFPR_idx != NumQFPRs) {
6378         RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6379       } else {
6380         if (IsFastCall)
6381           ComputePtrOff();
6382 
6383         assert(HasParameterArea &&
6384                "Parameter area must exist to pass an argument in memory.");
6385         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6386                          true, CFlags.IsTailCall, true, MemOpChains,
6387                          TailCallArguments, dl);
6388         if (IsFastCall)
6389           ArgOffset += (IsF32 ? 16 : 32);
6390       }
6391 
6392       if (!IsFastCall)
6393         ArgOffset += (IsF32 ? 16 : 32);
6394       break;
6395       }
6396     }
6397   }
6398 
6399   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6400          "mismatch in size of parameter area");
6401   (void)NumBytesActuallyUsed;
6402 
6403   if (!MemOpChains.empty())
6404     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6405 
6406   // Check if this is an indirect call (MTCTR/BCTRL).
6407   // See prepareDescriptorIndirectCall and buildCallOperands for more
6408   // information about calls through function pointers in the 64-bit SVR4 ABI.
6409   if (CFlags.IsIndirect) {
6410     assert(!CFlags.IsTailCall &&  "Indirect tails calls not supported");
6411     // Load r2 into a virtual register and store it to the TOC save area.
6412     setUsesTOCBasePtr(DAG);
6413     SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6414     // TOC save area offset.
6415     unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6416     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6417     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6418     Chain = DAG.getStore(
6419         Val.getValue(1), dl, Val, AddPtr,
6420         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
6421     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6422     // This does not mean the MTCTR instruction must use R12; it's easier
6423     // to model this as an extra parameter, so do that.
6424     if (isELFv2ABI && !CFlags.IsPatchPoint)
6425       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6426   }
6427 
6428   // Build a sequence of copy-to-reg nodes chained together with token chain
6429   // and flag operands which copy the outgoing args into the appropriate regs.
6430   SDValue InFlag;
6431   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6432     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6433                              RegsToPass[i].second, InFlag);
6434     InFlag = Chain.getValue(1);
6435   }
6436 
6437   if (CFlags.IsTailCall && !IsSibCall)
6438     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6439                     TailCallArguments);
6440 
6441   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6442                     Callee, SPDiff, NumBytes, Ins, InVals, CS);
6443 }
6444 
6445 SDValue PPCTargetLowering::LowerCall_Darwin(
6446     SDValue Chain, SDValue Callee, CallFlags CFlags,
6447     const SmallVectorImpl<ISD::OutputArg> &Outs,
6448     const SmallVectorImpl<SDValue> &OutVals,
6449     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6450     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6451     ImmutableCallSite CS) const {
6452   unsigned NumOps = Outs.size();
6453 
6454   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6455   bool isPPC64 = PtrVT == MVT::i64;
6456   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6457 
6458   MachineFunction &MF = DAG.getMachineFunction();
6459 
6460   // Mark this function as potentially containing a function that contains a
6461   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6462   // and restoring the callers stack pointer in this functions epilog. This is
6463   // done because by tail calling the called function might overwrite the value
6464   // in this function's (MF) stack pointer stack slot 0(SP).
6465   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6466       CFlags.CallConv == CallingConv::Fast)
6467     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6468 
6469   // Count how many bytes are to be pushed on the stack, including the linkage
6470   // area, and parameter passing area.  We start with 24/48 bytes, which is
6471   // prereserved space for [SP][CR][LR][3 x unused].
6472   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6473   unsigned NumBytes = LinkageSize;
6474 
6475   // Add up all the space actually used.
6476   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6477   // they all go in registers, but we must reserve stack space for them for
6478   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6479   // assigned stack space in order, with padding so Altivec parameters are
6480   // 16-byte aligned.
6481   unsigned nAltivecParamsAtEnd = 0;
6482   for (unsigned i = 0; i != NumOps; ++i) {
6483     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6484     EVT ArgVT = Outs[i].VT;
6485     // Varargs Altivec parameters are padded to a 16 byte boundary.
6486     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6487         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6488         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6489       if (!CFlags.IsVarArg && !isPPC64) {
6490         // Non-varargs Altivec parameters go after all the non-Altivec
6491         // parameters; handle those later so we know how much padding we need.
6492         nAltivecParamsAtEnd++;
6493         continue;
6494       }
6495       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6496       NumBytes = ((NumBytes+15)/16)*16;
6497     }
6498     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6499   }
6500 
6501   // Allow for Altivec parameters at the end, if needed.
6502   if (nAltivecParamsAtEnd) {
6503     NumBytes = ((NumBytes+15)/16)*16;
6504     NumBytes += 16*nAltivecParamsAtEnd;
6505   }
6506 
6507   // The prolog code of the callee may store up to 8 GPR argument registers to
6508   // the stack, allowing va_start to index over them in memory if its varargs.
6509   // Because we cannot tell if this is needed on the caller side, we have to
6510   // conservatively assume that it is needed.  As such, make sure we have at
6511   // least enough stack space for the caller to store the 8 GPRs.
6512   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6513 
6514   // Tail call needs the stack to be aligned.
6515   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6516       CFlags.CallConv == CallingConv::Fast)
6517     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6518 
6519   // Calculate by how many bytes the stack has to be adjusted in case of tail
6520   // call optimization.
6521   int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6522 
6523   // To protect arguments on the stack from being clobbered in a tail call,
6524   // force all the loads to happen before doing any other lowering.
6525   if (CFlags.IsTailCall)
6526     Chain = DAG.getStackArgumentTokenFactor(Chain);
6527 
6528   // Adjust the stack pointer for the new arguments...
6529   // These operations are automatically eliminated by the prolog/epilog pass
6530   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6531   SDValue CallSeqStart = Chain;
6532 
6533   // Load the return address and frame pointer so it can be move somewhere else
6534   // later.
6535   SDValue LROp, FPOp;
6536   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6537 
6538   // Set up a copy of the stack pointer for use loading and storing any
6539   // arguments that may not fit in the registers available for argument
6540   // passing.
6541   SDValue StackPtr;
6542   if (isPPC64)
6543     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6544   else
6545     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6546 
6547   // Figure out which arguments are going to go in registers, and which in
6548   // memory.  Also, if this is a vararg function, floating point operations
6549   // must be stored to our stack, and loaded into integer regs as well, if
6550   // any integer regs are available for argument passing.
6551   unsigned ArgOffset = LinkageSize;
6552   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6553 
6554   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6555     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6556     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6557   };
6558   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6559     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6560     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6561   };
6562   static const MCPhysReg VR[] = {
6563     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6564     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6565   };
6566   const unsigned NumGPRs = array_lengthof(GPR_32);
6567   const unsigned NumFPRs = 13;
6568   const unsigned NumVRs  = array_lengthof(VR);
6569 
6570   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6571 
6572   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6573   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6574 
6575   SmallVector<SDValue, 8> MemOpChains;
6576   for (unsigned i = 0; i != NumOps; ++i) {
6577     SDValue Arg = OutVals[i];
6578     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6579 
6580     // PtrOff will be used to store the current argument to the stack if a
6581     // register cannot be found for it.
6582     SDValue PtrOff;
6583 
6584     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6585 
6586     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6587 
6588     // On PPC64, promote integers to 64-bit values.
6589     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6590       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6591       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6592       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6593     }
6594 
6595     // FIXME memcpy is used way more than necessary.  Correctness first.
6596     // Note: "by value" is code for passing a structure by value, not
6597     // basic types.
6598     if (Flags.isByVal()) {
6599       unsigned Size = Flags.getByValSize();
6600       // Very small objects are passed right-justified.  Everything else is
6601       // passed left-justified.
6602       if (Size==1 || Size==2) {
6603         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6604         if (GPR_idx != NumGPRs) {
6605           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6606                                         MachinePointerInfo(), VT);
6607           MemOpChains.push_back(Load.getValue(1));
6608           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6609 
6610           ArgOffset += PtrByteSize;
6611         } else {
6612           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6613                                           PtrOff.getValueType());
6614           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6615           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6616                                                             CallSeqStart,
6617                                                             Flags, DAG, dl);
6618           ArgOffset += PtrByteSize;
6619         }
6620         continue;
6621       }
6622       // Copy entire object into memory.  There are cases where gcc-generated
6623       // code assumes it is there, even if it could be put entirely into
6624       // registers.  (This is not what the doc says.)
6625       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6626                                                         CallSeqStart,
6627                                                         Flags, DAG, dl);
6628 
6629       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6630       // copy the pieces of the object that fit into registers from the
6631       // parameter save area.
6632       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6633         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6634         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6635         if (GPR_idx != NumGPRs) {
6636           SDValue Load =
6637               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6638           MemOpChains.push_back(Load.getValue(1));
6639           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6640           ArgOffset += PtrByteSize;
6641         } else {
6642           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6643           break;
6644         }
6645       }
6646       continue;
6647     }
6648 
6649     switch (Arg.getSimpleValueType().SimpleTy) {
6650     default: llvm_unreachable("Unexpected ValueType for argument!");
6651     case MVT::i1:
6652     case MVT::i32:
6653     case MVT::i64:
6654       if (GPR_idx != NumGPRs) {
6655         if (Arg.getValueType() == MVT::i1)
6656           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6657 
6658         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6659       } else {
6660         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6661                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6662                          TailCallArguments, dl);
6663       }
6664       ArgOffset += PtrByteSize;
6665       break;
6666     case MVT::f32:
6667     case MVT::f64:
6668       if (FPR_idx != NumFPRs) {
6669         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6670 
6671         if (CFlags.IsVarArg) {
6672           SDValue Store =
6673               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6674           MemOpChains.push_back(Store);
6675 
6676           // Float varargs are always shadowed in available integer registers
6677           if (GPR_idx != NumGPRs) {
6678             SDValue Load =
6679                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6680             MemOpChains.push_back(Load.getValue(1));
6681             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6682           }
6683           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6684             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6685             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6686             SDValue Load =
6687                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6688             MemOpChains.push_back(Load.getValue(1));
6689             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6690           }
6691         } else {
6692           // If we have any FPRs remaining, we may also have GPRs remaining.
6693           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6694           // GPRs.
6695           if (GPR_idx != NumGPRs)
6696             ++GPR_idx;
6697           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6698               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6699             ++GPR_idx;
6700         }
6701       } else
6702         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6703                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6704                          TailCallArguments, dl);
6705       if (isPPC64)
6706         ArgOffset += 8;
6707       else
6708         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6709       break;
6710     case MVT::v4f32:
6711     case MVT::v4i32:
6712     case MVT::v8i16:
6713     case MVT::v16i8:
6714       if (CFlags.IsVarArg) {
6715         // These go aligned on the stack, or in the corresponding R registers
6716         // when within range.  The Darwin PPC ABI doc claims they also go in
6717         // V registers; in fact gcc does this only for arguments that are
6718         // prototyped, not for those that match the ...  We do it for all
6719         // arguments, seems to work.
6720         while (ArgOffset % 16 !=0) {
6721           ArgOffset += PtrByteSize;
6722           if (GPR_idx != NumGPRs)
6723             GPR_idx++;
6724         }
6725         // We could elide this store in the case where the object fits
6726         // entirely in R registers.  Maybe later.
6727         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6728                              DAG.getConstant(ArgOffset, dl, PtrVT));
6729         SDValue Store =
6730             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6731         MemOpChains.push_back(Store);
6732         if (VR_idx != NumVRs) {
6733           SDValue Load =
6734               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6735           MemOpChains.push_back(Load.getValue(1));
6736           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6737         }
6738         ArgOffset += 16;
6739         for (unsigned i=0; i<16; i+=PtrByteSize) {
6740           if (GPR_idx == NumGPRs)
6741             break;
6742           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6743                                    DAG.getConstant(i, dl, PtrVT));
6744           SDValue Load =
6745               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6746           MemOpChains.push_back(Load.getValue(1));
6747           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6748         }
6749         break;
6750       }
6751 
6752       // Non-varargs Altivec params generally go in registers, but have
6753       // stack space allocated at the end.
6754       if (VR_idx != NumVRs) {
6755         // Doesn't have GPR space allocated.
6756         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6757       } else if (nAltivecParamsAtEnd==0) {
6758         // We are emitting Altivec params in order.
6759         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6760                          isPPC64, CFlags.IsTailCall, true, MemOpChains,
6761                          TailCallArguments, dl);
6762         ArgOffset += 16;
6763       }
6764       break;
6765     }
6766   }
6767   // If all Altivec parameters fit in registers, as they usually do,
6768   // they get stack space following the non-Altivec parameters.  We
6769   // don't track this here because nobody below needs it.
6770   // If there are more Altivec parameters than fit in registers emit
6771   // the stores here.
6772   if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
6773     unsigned j = 0;
6774     // Offset is aligned; skip 1st 12 params which go in V registers.
6775     ArgOffset = ((ArgOffset+15)/16)*16;
6776     ArgOffset += 12*16;
6777     for (unsigned i = 0; i != NumOps; ++i) {
6778       SDValue Arg = OutVals[i];
6779       EVT ArgType = Outs[i].VT;
6780       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6781           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6782         if (++j > NumVRs) {
6783           SDValue PtrOff;
6784           // We are emitting Altivec params in order.
6785           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6786                            isPPC64, CFlags.IsTailCall, true, MemOpChains,
6787                            TailCallArguments, dl);
6788           ArgOffset += 16;
6789         }
6790       }
6791     }
6792   }
6793 
6794   if (!MemOpChains.empty())
6795     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6796 
6797   // On Darwin, R12 must contain the address of an indirect callee.  This does
6798   // not mean the MTCTR instruction must use R12; it's easier to model this as
6799   // an extra parameter, so do that.
6800   if (CFlags.IsIndirect) {
6801     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
6802     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6803                                                    PPC::R12), Callee));
6804   }
6805 
6806   // Build a sequence of copy-to-reg nodes chained together with token chain
6807   // and flag operands which copy the outgoing args into the appropriate regs.
6808   SDValue InFlag;
6809   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6810     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6811                              RegsToPass[i].second, InFlag);
6812     InFlag = Chain.getValue(1);
6813   }
6814 
6815   if (CFlags.IsTailCall)
6816     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6817                     TailCallArguments);
6818 
6819   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6820                     Callee, SPDiff, NumBytes, Ins, InVals, CS);
6821 }
6822 
6823 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6824                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6825                    CCState &State) {
6826 
6827   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6828       State.getMachineFunction().getSubtarget());
6829   const bool IsPPC64 = Subtarget.isPPC64();
6830   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6831   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6832 
6833   assert((!ValVT.isInteger() ||
6834           (ValVT.getSizeInBits() <= RegVT.getSizeInBits())) &&
6835          "Integer argument exceeds register size: should have been legalized");
6836 
6837   if (ValVT == MVT::f128)
6838     report_fatal_error("f128 is unimplemented on AIX.");
6839 
6840   if (ArgFlags.isNest())
6841     report_fatal_error("Nest arguments are unimplemented.");
6842 
6843   if (ValVT.isVector() || LocVT.isVector())
6844     report_fatal_error("Vector arguments are unimplemented on AIX.");
6845 
6846   static const MCPhysReg GPR_32[] = {// 32-bit registers.
6847                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6848                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6849   static const MCPhysReg GPR_64[] = {// 64-bit registers.
6850                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6851                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6852 
6853   if (ArgFlags.isByVal()) {
6854     if (ArgFlags.getNonZeroByValAlign() > PtrByteSize)
6855       report_fatal_error("Pass-by-value arguments with alignment greater than "
6856                          "register width are not supported.");
6857 
6858     const unsigned ByValSize = ArgFlags.getByValSize();
6859 
6860     // An empty aggregate parameter takes up no storage and no registers,
6861     // but needs a MemLoc for a stack slot for the formal arguments side.
6862     if (ByValSize == 0) {
6863       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6864                                        State.getNextStackOffset(), RegVT,
6865                                        LocInfo));
6866       return false;
6867     }
6868 
6869     State.AllocateStack(alignTo(ByValSize, PtrByteSize), PtrByteSize);
6870 
6871     for (unsigned I = 0, E = ByValSize; I < E; I += PtrByteSize) {
6872       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6873         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6874       else
6875         report_fatal_error(
6876             "Pass-by-value arguments are only supported in registers.");
6877     }
6878     return false;
6879   }
6880 
6881   // Arguments always reserve parameter save area.
6882   switch (ValVT.SimpleTy) {
6883   default:
6884     report_fatal_error("Unhandled value type for argument.");
6885   case MVT::i64:
6886     // i64 arguments should have been split to i32 for PPC32.
6887     assert(IsPPC64 && "PPC32 should have split i64 values.");
6888     LLVM_FALLTHROUGH;
6889   case MVT::i1:
6890   case MVT::i32: {
6891     const unsigned Offset = State.AllocateStack(PtrByteSize, PtrByteSize);
6892     // AIX integer arguments are always passed in register width.
6893     if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
6894       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6895                                   : CCValAssign::LocInfo::ZExt;
6896     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6897       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6898     else
6899       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6900 
6901     return false;
6902   }
6903   case MVT::f32:
6904   case MVT::f64: {
6905     // Parameter save area (PSA) is reserved even if the float passes in fpr.
6906     const unsigned StoreSize = LocVT.getStoreSize();
6907     // Floats are always 4-byte aligned in the PSA on AIX.
6908     // This includes f64 in 64-bit mode for ABI compatibility.
6909     const unsigned Offset = State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4);
6910     unsigned FReg = State.AllocateReg(FPR);
6911     if (FReg)
6912       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6913 
6914     // Reserve and initialize GPRs or initialize the PSA as required.
6915     for (unsigned I = 0; I < StoreSize; I += PtrByteSize) {
6916       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6917         assert(FReg && "An FPR should be available when a GPR is reserved.");
6918         if (State.isVarArg()) {
6919           // Successfully reserved GPRs are only initialized for vararg calls.
6920           // Custom handling is required for:
6921           //   f64 in PPC32 needs to be split into 2 GPRs.
6922           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6923           State.addLoc(
6924               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6925         }
6926       } else {
6927         // If there are insufficient GPRs, the PSA needs to be initialized.
6928         // Initialization occurs even if an FPR was initialized for
6929         // compatibility with the AIX XL compiler. The full memory for the
6930         // argument will be initialized even if a prior word is saved in GPR.
6931         // A custom memLoc is used when the argument also passes in FPR so
6932         // that the callee handling can skip over it easily.
6933         State.addLoc(
6934             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6935                                              LocInfo)
6936                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6937         break;
6938       }
6939     }
6940 
6941     return false;
6942   }
6943   }
6944   return true;
6945 }
6946 
6947 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6948                                                     bool IsPPC64) {
6949   assert((IsPPC64 || SVT != MVT::i64) &&
6950          "i64 should have been split for 32-bit codegen.");
6951 
6952   switch (SVT) {
6953   default:
6954     report_fatal_error("Unexpected value type for formal argument");
6955   case MVT::i1:
6956   case MVT::i32:
6957   case MVT::i64:
6958     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6959   case MVT::f32:
6960     return &PPC::F4RCRegClass;
6961   case MVT::f64:
6962     return &PPC::F8RCRegClass;
6963   }
6964 }
6965 
6966 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6967                                         SelectionDAG &DAG, SDValue ArgValue,
6968                                         MVT LocVT, const SDLoc &dl) {
6969   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
6970   assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
6971 
6972   if (Flags.isSExt())
6973     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6974                            DAG.getValueType(ValVT));
6975   else if (Flags.isZExt())
6976     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6977                            DAG.getValueType(ValVT));
6978 
6979   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
6980 }
6981 
6982 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
6983   const unsigned LASize = FL->getLinkageSize();
6984 
6985   if (PPC::GPRCRegClass.contains(Reg)) {
6986     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
6987            "Reg must be a valid argument register!");
6988     return LASize + 4 * (Reg - PPC::R3);
6989   }
6990 
6991   if (PPC::G8RCRegClass.contains(Reg)) {
6992     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
6993            "Reg must be a valid argument register!");
6994     return LASize + 8 * (Reg - PPC::X3);
6995   }
6996 
6997   llvm_unreachable("Only general purpose registers expected.");
6998 }
6999 
7000 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7001     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7002     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7003     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7004 
7005   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7006           CallConv == CallingConv::Fast) &&
7007          "Unexpected calling convention!");
7008 
7009   if (isVarArg)
7010     report_fatal_error("This call type is unimplemented on AIX.");
7011 
7012   if (getTargetMachine().Options.GuaranteedTailCallOpt)
7013     report_fatal_error("Tail call support is unimplemented on AIX.");
7014 
7015   if (useSoftFloat())
7016     report_fatal_error("Soft float support is unimplemented on AIX.");
7017 
7018   const PPCSubtarget &Subtarget =
7019       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7020   if (Subtarget.hasQPX())
7021     report_fatal_error("QPX support is not supported on AIX.");
7022 
7023   const bool IsPPC64 = Subtarget.isPPC64();
7024   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7025 
7026   // Assign locations to all of the incoming arguments.
7027   SmallVector<CCValAssign, 16> ArgLocs;
7028   MachineFunction &MF = DAG.getMachineFunction();
7029   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7030 
7031   const EVT PtrVT = getPointerTy(MF.getDataLayout());
7032   // Reserve space for the linkage area on the stack.
7033   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7034   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
7035   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7036 
7037   SmallVector<SDValue, 8> MemOps;
7038 
7039   for (CCValAssign &VA : ArgLocs) {
7040     EVT ValVT = VA.getValVT();
7041     MVT LocVT = VA.getLocVT();
7042     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7043     assert((VA.isRegLoc() || VA.isMemLoc()) &&
7044            "Unexpected location for function call argument.");
7045 
7046     // For compatibility with the AIX XL compiler, the float args in the
7047     // parameter save area are initialized even if the argument is available
7048     // in register.  The caller is required to initialize both the register
7049     // and memory, however, the callee can choose to expect it in either.
7050     // The memloc is dismissed here because the argument is retrieved from
7051     // the register.
7052     if (VA.isMemLoc() && VA.needsCustom())
7053       continue;
7054 
7055     if (Flags.isByVal() && VA.isMemLoc()) {
7056       if (Flags.getByValSize() != 0)
7057         report_fatal_error(
7058             "ByVal arguments passed on stack not implemented yet");
7059 
7060       const int FI = MF.getFrameInfo().CreateFixedObject(
7061           PtrByteSize, VA.getLocMemOffset(), /* IsImmutable */ false,
7062           /* IsAliased */ true);
7063       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7064       InVals.push_back(FIN);
7065 
7066       continue;
7067     }
7068 
7069     if (Flags.isByVal()) {
7070       assert(VA.isRegLoc() && "MemLocs should already be handled.");
7071 
7072       const unsigned ByValSize = Flags.getByValSize();
7073       if (ByValSize > PtrByteSize)
7074         report_fatal_error("Formal arguments greater then register size not "
7075                            "implemented yet.");
7076 
7077       const MCPhysReg ArgReg = VA.getLocReg();
7078       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7079       const unsigned Offset = mapArgRegToOffsetAIX(ArgReg, FL);
7080 
7081       const unsigned StackSize = alignTo(ByValSize, PtrByteSize);
7082       const int FI = MF.getFrameInfo().CreateFixedObject(
7083           StackSize, Offset, /* IsImmutable */ false, /* IsAliased */ true);
7084       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7085 
7086       InVals.push_back(FIN);
7087 
7088       const unsigned VReg = MF.addLiveIn(ArgReg, IsPPC64 ? &PPC::G8RCRegClass
7089                                                          : &PPC::GPRCRegClass);
7090 
7091       // Since the callers side has left justified the aggregate in the
7092       // register, we can simply store the entire register into the stack
7093       // slot.
7094       // The store to the fixedstack object is needed becuase accessing a
7095       // field of the ByVal will use a gep and load. Ideally we will optimize
7096       // to extracting the value from the register directly, and elide the
7097       // stores when the arguments address is not taken, but that will need to
7098       // be future work.
7099       SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7100       SDValue Store =
7101           DAG.getStore(CopyFrom.getValue(1), dl, CopyFrom, FIN,
7102                        MachinePointerInfo::getFixedStack(MF, FI, 0));
7103 
7104       MemOps.push_back(Store);
7105       continue;
7106     }
7107 
7108     if (VA.isRegLoc()) {
7109       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7110       unsigned VReg =
7111           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7112       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7113       if (ValVT.isScalarInteger() &&
7114           (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7115         ArgValue =
7116             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7117       }
7118       InVals.push_back(ArgValue);
7119       continue;
7120     }
7121 
7122     const unsigned LocSize = LocVT.getStoreSize();
7123     const unsigned ValSize = ValVT.getStoreSize();
7124     assert((ValSize <= LocSize) && "Object size is larger than size of MemLoc");
7125     int CurArgOffset = VA.getLocMemOffset();
7126     // Objects are right-justified because AIX is big-endian.
7127     if (LocSize > ValSize)
7128       CurArgOffset += LocSize - ValSize;
7129     MachineFrameInfo &MFI = MF.getFrameInfo();
7130     // Potential tail calls could cause overwriting of argument stack slots.
7131     const bool IsImmutable =
7132         !(getTargetMachine().Options.GuaranteedTailCallOpt &&
7133           (CallConv == CallingConv::Fast));
7134     int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7135     SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7136     SDValue ArgValue = DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7137     InVals.push_back(ArgValue);
7138   }
7139 
7140   // On AIX a minimum of 8 words is saved to the parameter save area.
7141   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7142   // Area that is at least reserved in the caller of this function.
7143   unsigned CallerReservedArea =
7144       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7145 
7146   // Set the size that is at least reserved in caller of this function. Tail
7147   // call optimized function's reserved stack space needs to be aligned so
7148   // that taking the difference between two stack areas will result in an
7149   // aligned stack.
7150   CallerReservedArea =
7151       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7152   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7153   FuncInfo->setMinReservedArea(CallerReservedArea);
7154 
7155   if (!MemOps.empty())
7156     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7157 
7158   return Chain;
7159 }
7160 
7161 SDValue PPCTargetLowering::LowerCall_AIX(
7162     SDValue Chain, SDValue Callee, CallFlags CFlags,
7163     const SmallVectorImpl<ISD::OutputArg> &Outs,
7164     const SmallVectorImpl<SDValue> &OutVals,
7165     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7166     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7167     ImmutableCallSite CS) const {
7168 
7169   assert((CFlags.CallConv == CallingConv::C ||
7170           CFlags.CallConv == CallingConv::Cold ||
7171           CFlags.CallConv == CallingConv::Fast) &&
7172          "Unexpected calling convention!");
7173 
7174   if (CFlags.IsPatchPoint)
7175     report_fatal_error("This call type is unimplemented on AIX.");
7176 
7177   const PPCSubtarget& Subtarget =
7178       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7179   if (Subtarget.hasQPX())
7180     report_fatal_error("QPX is not supported on AIX.");
7181   if (Subtarget.hasAltivec())
7182     report_fatal_error("Altivec support is unimplemented on AIX.");
7183 
7184   MachineFunction &MF = DAG.getMachineFunction();
7185   SmallVector<CCValAssign, 16> ArgLocs;
7186   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7187                  *DAG.getContext());
7188 
7189   // Reserve space for the linkage save area (LSA) on the stack.
7190   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7191   //   [SP][CR][LR][2 x reserved][TOC].
7192   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7193   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7194   const bool IsPPC64 = Subtarget.isPPC64();
7195   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7196   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7197   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
7198   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7199 
7200   // The prolog code of the callee may store up to 8 GPR argument registers to
7201   // the stack, allowing va_start to index over them in memory if the callee
7202   // is variadic.
7203   // Because we cannot tell if this is needed on the caller side, we have to
7204   // conservatively assume that it is needed.  As such, make sure we have at
7205   // least enough stack space for the caller to store the 8 GPRs.
7206   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7207   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7208                                      CCInfo.getNextStackOffset());
7209 
7210   // Adjust the stack pointer for the new arguments...
7211   // These operations are automatically eliminated by the prolog/epilog pass.
7212   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7213   SDValue CallSeqStart = Chain;
7214 
7215   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7216   SmallVector<SDValue, 8> MemOpChains;
7217 
7218   // Set up a copy of the stack pointer for loading and storing any
7219   // arguments that may not fit in the registers available for argument
7220   // passing.
7221   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7222                                    : DAG.getRegister(PPC::R1, MVT::i32);
7223 
7224   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7225     const unsigned ValNo = ArgLocs[I].getValNo();
7226     SDValue Arg = OutVals[ValNo];
7227     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7228 
7229     if (Flags.isByVal()) {
7230       const unsigned ByValSize = Flags.getByValSize();
7231 
7232       // Nothing to do for zero-sized ByVals on the caller side.
7233       if (!ByValSize) {
7234         ++I;
7235         continue;
7236       }
7237 
7238       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7239         return DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain,
7240                               (LoadOffset != 0)
7241                                   ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7242                                   : Arg,
7243                               MachinePointerInfo(), VT);
7244       };
7245 
7246       unsigned LoadOffset = 0;
7247 
7248       // Initialize registers, which are fully occupied by the by-val argument.
7249       while (I != E && LoadOffset + PtrByteSize <= ByValSize) {
7250         SDValue Load = GetLoad(PtrVT, LoadOffset);
7251         MemOpChains.push_back(Load.getValue(1));
7252         LoadOffset += PtrByteSize;
7253         const CCValAssign &ByValVA = ArgLocs[I++];
7254         assert(ByValVA.isRegLoc() && ByValVA.getValNo() == ValNo &&
7255                "Unexpected location for pass-by-value argument.");
7256         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7257       }
7258 
7259       if (LoadOffset == ByValSize)
7260         continue;
7261 
7262       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7263       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7264              "Unexpected register residue for by-value argument.");
7265 
7266       // Initialize the final register residue.
7267       // Any residue that occupies the final by-val arg register must be
7268       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7269       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7270       // 2 and 1 byte loads.
7271       SDValue ResidueVal;
7272       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7273         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7274         const MVT VT =
7275             N == 1 ? MVT::i8
7276                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7277         SDValue Load = GetLoad(VT, LoadOffset);
7278         MemOpChains.push_back(Load.getValue(1));
7279         LoadOffset += N;
7280         Bytes += N;
7281 
7282         // By-val arguments are passed left-justfied in register.
7283         // Every load here needs to be shifted, otherwise a full register load
7284         // should have been used.
7285         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7286                "Unexpected load emitted during handling of pass-by-value "
7287                "argument.");
7288         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7289         EVT ShiftAmountTy =
7290             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7291         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7292         SDValue ShiftedLoad =
7293             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7294         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7295                                               ShiftedLoad)
7296                                 : ShiftedLoad;
7297       }
7298 
7299       const CCValAssign &ByValVA = ArgLocs[I++];
7300       assert(ByValVA.isRegLoc() && ByValVA.getValNo() == ValNo &&
7301              "Additional register location expected for by-value argument.");
7302       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7303       continue;
7304     }
7305 
7306     CCValAssign &VA = ArgLocs[I++];
7307     const MVT LocVT = VA.getLocVT();
7308     const MVT ValVT = VA.getValVT();
7309 
7310     switch (VA.getLocInfo()) {
7311     default:
7312       report_fatal_error("Unexpected argument extension type.");
7313     case CCValAssign::Full:
7314       break;
7315     case CCValAssign::ZExt:
7316       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7317       break;
7318     case CCValAssign::SExt:
7319       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7320       break;
7321     }
7322 
7323     if (VA.isRegLoc() && !VA.needsCustom()) {
7324       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7325       continue;
7326     }
7327 
7328     if (VA.isMemLoc()) {
7329       SDValue PtrOff =
7330           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7331       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7332       MemOpChains.push_back(
7333           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7334 
7335       continue;
7336     }
7337 
7338     // Custom handling is used for GPR initializations for vararg float
7339     // arguments.
7340     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7341            ValVT.isFloatingPoint() && LocVT.isInteger() &&
7342            "Unexpected register handling for calling convention.");
7343 
7344     SDValue ArgAsInt =
7345         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7346 
7347     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7348       // f32 in 32-bit GPR
7349       // f64 in 64-bit GPR
7350       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7351     else if (Arg.getValueType().getSizeInBits() < LocVT.getSizeInBits())
7352       // f32 in 64-bit GPR.
7353       RegsToPass.push_back(std::make_pair(
7354           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7355     else {
7356       // f64 in two 32-bit GPRs
7357       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7358       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7359              "Unexpected custom register for argument!");
7360       CCValAssign &GPR1 = VA;
7361       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7362                                      DAG.getConstant(32, dl, MVT::i8));
7363       RegsToPass.push_back(std::make_pair(
7364           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7365 
7366       if (I != E) {
7367         // If only 1 GPR was available, there will only be one custom GPR and
7368         // the argument will also pass in memory.
7369         CCValAssign &PeekArg = ArgLocs[I];
7370         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7371           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7372           CCValAssign &GPR2 = ArgLocs[I++];
7373           RegsToPass.push_back(std::make_pair(
7374               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7375         }
7376       }
7377     }
7378   }
7379 
7380   if (!MemOpChains.empty())
7381     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7382 
7383   // For indirect calls, we need to save the TOC base to the stack for
7384   // restoration after the call.
7385   if (CFlags.IsIndirect) {
7386     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7387     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7388     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7389     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7390     const unsigned TOCSaveOffset =
7391         Subtarget.getFrameLowering()->getTOCSaveOffset();
7392 
7393     setUsesTOCBasePtr(DAG);
7394     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7395     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7396     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7397     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7398     Chain = DAG.getStore(
7399         Val.getValue(1), dl, Val, AddPtr,
7400         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7401   }
7402 
7403   // Build a sequence of copy-to-reg nodes chained together with token chain
7404   // and flag operands which copy the outgoing args into the appropriate regs.
7405   SDValue InFlag;
7406   for (auto Reg : RegsToPass) {
7407     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7408     InFlag = Chain.getValue(1);
7409   }
7410 
7411   const int SPDiff = 0;
7412   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7413                     Callee, SPDiff, NumBytes, Ins, InVals, CS);
7414 }
7415 
7416 bool
7417 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7418                                   MachineFunction &MF, bool isVarArg,
7419                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7420                                   LLVMContext &Context) const {
7421   SmallVector<CCValAssign, 16> RVLocs;
7422   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7423   return CCInfo.CheckReturn(
7424       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7425                 ? RetCC_PPC_Cold
7426                 : RetCC_PPC);
7427 }
7428 
7429 SDValue
7430 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7431                                bool isVarArg,
7432                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7433                                const SmallVectorImpl<SDValue> &OutVals,
7434                                const SDLoc &dl, SelectionDAG &DAG) const {
7435   SmallVector<CCValAssign, 16> RVLocs;
7436   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7437                  *DAG.getContext());
7438   CCInfo.AnalyzeReturn(Outs,
7439                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7440                            ? RetCC_PPC_Cold
7441                            : RetCC_PPC);
7442 
7443   SDValue Flag;
7444   SmallVector<SDValue, 4> RetOps(1, Chain);
7445 
7446   // Copy the result values into the output registers.
7447   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7448     CCValAssign &VA = RVLocs[i];
7449     assert(VA.isRegLoc() && "Can only return in registers!");
7450 
7451     SDValue Arg = OutVals[RealResIdx];
7452 
7453     switch (VA.getLocInfo()) {
7454     default: llvm_unreachable("Unknown loc info!");
7455     case CCValAssign::Full: break;
7456     case CCValAssign::AExt:
7457       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7458       break;
7459     case CCValAssign::ZExt:
7460       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7461       break;
7462     case CCValAssign::SExt:
7463       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7464       break;
7465     }
7466     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7467       bool isLittleEndian = Subtarget.isLittleEndian();
7468       // Legalize ret f64 -> ret 2 x i32.
7469       SDValue SVal =
7470           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7471                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7472       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7473       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7474       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7475                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7476       Flag = Chain.getValue(1);
7477       VA = RVLocs[++i]; // skip ahead to next loc
7478       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7479     } else
7480       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7481     Flag = Chain.getValue(1);
7482     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7483   }
7484 
7485   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
7486   const MCPhysReg *I =
7487     TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
7488   if (I) {
7489     for (; *I; ++I) {
7490 
7491       if (PPC::G8RCRegClass.contains(*I))
7492         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
7493       else if (PPC::F8RCRegClass.contains(*I))
7494         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
7495       else if (PPC::CRRCRegClass.contains(*I))
7496         RetOps.push_back(DAG.getRegister(*I, MVT::i1));
7497       else if (PPC::VRRCRegClass.contains(*I))
7498         RetOps.push_back(DAG.getRegister(*I, MVT::Other));
7499       else
7500         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
7501     }
7502   }
7503 
7504   RetOps[0] = Chain;  // Update chain.
7505 
7506   // Add the flag if we have it.
7507   if (Flag.getNode())
7508     RetOps.push_back(Flag);
7509 
7510   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7511 }
7512 
7513 SDValue
7514 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7515                                                 SelectionDAG &DAG) const {
7516   SDLoc dl(Op);
7517 
7518   // Get the correct type for integers.
7519   EVT IntVT = Op.getValueType();
7520 
7521   // Get the inputs.
7522   SDValue Chain = Op.getOperand(0);
7523   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7524   // Build a DYNAREAOFFSET node.
7525   SDValue Ops[2] = {Chain, FPSIdx};
7526   SDVTList VTs = DAG.getVTList(IntVT);
7527   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7528 }
7529 
7530 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7531                                              SelectionDAG &DAG) const {
7532   // When we pop the dynamic allocation we need to restore the SP link.
7533   SDLoc dl(Op);
7534 
7535   // Get the correct type for pointers.
7536   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7537 
7538   // Construct the stack pointer operand.
7539   bool isPPC64 = Subtarget.isPPC64();
7540   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7541   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7542 
7543   // Get the operands for the STACKRESTORE.
7544   SDValue Chain = Op.getOperand(0);
7545   SDValue SaveSP = Op.getOperand(1);
7546 
7547   // Load the old link SP.
7548   SDValue LoadLinkSP =
7549       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7550 
7551   // Restore the stack pointer.
7552   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7553 
7554   // Store the old link SP.
7555   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7556 }
7557 
7558 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7559   MachineFunction &MF = DAG.getMachineFunction();
7560   bool isPPC64 = Subtarget.isPPC64();
7561   EVT PtrVT = getPointerTy(MF.getDataLayout());
7562 
7563   // Get current frame pointer save index.  The users of this index will be
7564   // primarily DYNALLOC instructions.
7565   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7566   int RASI = FI->getReturnAddrSaveIndex();
7567 
7568   // If the frame pointer save index hasn't been defined yet.
7569   if (!RASI) {
7570     // Find out what the fix offset of the frame pointer save area.
7571     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7572     // Allocate the frame index for frame pointer save area.
7573     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7574     // Save the result.
7575     FI->setReturnAddrSaveIndex(RASI);
7576   }
7577   return DAG.getFrameIndex(RASI, PtrVT);
7578 }
7579 
7580 SDValue
7581 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7582   MachineFunction &MF = DAG.getMachineFunction();
7583   bool isPPC64 = Subtarget.isPPC64();
7584   EVT PtrVT = getPointerTy(MF.getDataLayout());
7585 
7586   // Get current frame pointer save index.  The users of this index will be
7587   // primarily DYNALLOC instructions.
7588   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7589   int FPSI = FI->getFramePointerSaveIndex();
7590 
7591   // If the frame pointer save index hasn't been defined yet.
7592   if (!FPSI) {
7593     // Find out what the fix offset of the frame pointer save area.
7594     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7595     // Allocate the frame index for frame pointer save area.
7596     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7597     // Save the result.
7598     FI->setFramePointerSaveIndex(FPSI);
7599   }
7600   return DAG.getFrameIndex(FPSI, PtrVT);
7601 }
7602 
7603 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7604                                                    SelectionDAG &DAG) const {
7605   // Get the inputs.
7606   SDValue Chain = Op.getOperand(0);
7607   SDValue Size  = Op.getOperand(1);
7608   SDLoc dl(Op);
7609 
7610   // Get the correct type for pointers.
7611   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7612   // Negate the size.
7613   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7614                                 DAG.getConstant(0, dl, PtrVT), Size);
7615   // Construct a node for the frame pointer save index.
7616   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7617   // Build a DYNALLOC node.
7618   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7619   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7620   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7621 }
7622 
7623 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7624                                                      SelectionDAG &DAG) const {
7625   MachineFunction &MF = DAG.getMachineFunction();
7626 
7627   bool isPPC64 = Subtarget.isPPC64();
7628   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7629 
7630   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7631   return DAG.getFrameIndex(FI, PtrVT);
7632 }
7633 
7634 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7635                                                SelectionDAG &DAG) const {
7636   SDLoc DL(Op);
7637   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7638                      DAG.getVTList(MVT::i32, MVT::Other),
7639                      Op.getOperand(0), Op.getOperand(1));
7640 }
7641 
7642 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7643                                                 SelectionDAG &DAG) const {
7644   SDLoc DL(Op);
7645   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7646                      Op.getOperand(0), Op.getOperand(1));
7647 }
7648 
7649 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7650   if (Op.getValueType().isVector())
7651     return LowerVectorLoad(Op, DAG);
7652 
7653   assert(Op.getValueType() == MVT::i1 &&
7654          "Custom lowering only for i1 loads");
7655 
7656   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7657 
7658   SDLoc dl(Op);
7659   LoadSDNode *LD = cast<LoadSDNode>(Op);
7660 
7661   SDValue Chain = LD->getChain();
7662   SDValue BasePtr = LD->getBasePtr();
7663   MachineMemOperand *MMO = LD->getMemOperand();
7664 
7665   SDValue NewLD =
7666       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7667                      BasePtr, MVT::i8, MMO);
7668   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7669 
7670   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7671   return DAG.getMergeValues(Ops, dl);
7672 }
7673 
7674 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7675   if (Op.getOperand(1).getValueType().isVector())
7676     return LowerVectorStore(Op, DAG);
7677 
7678   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7679          "Custom lowering only for i1 stores");
7680 
7681   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7682 
7683   SDLoc dl(Op);
7684   StoreSDNode *ST = cast<StoreSDNode>(Op);
7685 
7686   SDValue Chain = ST->getChain();
7687   SDValue BasePtr = ST->getBasePtr();
7688   SDValue Value = ST->getValue();
7689   MachineMemOperand *MMO = ST->getMemOperand();
7690 
7691   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7692                       Value);
7693   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7694 }
7695 
7696 // FIXME: Remove this once the ANDI glue bug is fixed:
7697 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7698   assert(Op.getValueType() == MVT::i1 &&
7699          "Custom lowering only for i1 results");
7700 
7701   SDLoc DL(Op);
7702   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7703 }
7704 
7705 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7706                                                SelectionDAG &DAG) const {
7707 
7708   // Implements a vector truncate that fits in a vector register as a shuffle.
7709   // We want to legalize vector truncates down to where the source fits in
7710   // a vector register (and target is therefore smaller than vector register
7711   // size).  At that point legalization will try to custom lower the sub-legal
7712   // result and get here - where we can contain the truncate as a single target
7713   // operation.
7714 
7715   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7716   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7717   //
7718   // We will implement it for big-endian ordering as this (where x denotes
7719   // undefined):
7720   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7721   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7722   //
7723   // The same operation in little-endian ordering will be:
7724   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7725   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7726 
7727   assert(Op.getValueType().isVector() && "Vector type expected.");
7728 
7729   SDLoc DL(Op);
7730   SDValue N1 = Op.getOperand(0);
7731   unsigned SrcSize = N1.getValueType().getSizeInBits();
7732   assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector");
7733   SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7734 
7735   EVT TrgVT = Op.getValueType();
7736   unsigned TrgNumElts = TrgVT.getVectorNumElements();
7737   EVT EltVT = TrgVT.getVectorElementType();
7738   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7739   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7740 
7741   // First list the elements we want to keep.
7742   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7743   SmallVector<int, 16> ShuffV;
7744   if (Subtarget.isLittleEndian())
7745     for (unsigned i = 0; i < TrgNumElts; ++i)
7746       ShuffV.push_back(i * SizeMult);
7747   else
7748     for (unsigned i = 1; i <= TrgNumElts; ++i)
7749       ShuffV.push_back(i * SizeMult - 1);
7750 
7751   // Populate the remaining elements with undefs.
7752   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7753     // ShuffV.push_back(i + WideNumElts);
7754     ShuffV.push_back(WideNumElts + 1);
7755 
7756   SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc);
7757   return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV);
7758 }
7759 
7760 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7761 /// possible.
7762 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7763   // Not FP? Not a fsel.
7764   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
7765       !Op.getOperand(2).getValueType().isFloatingPoint())
7766     return Op;
7767 
7768   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7769 
7770   EVT ResVT = Op.getValueType();
7771   EVT CmpVT = Op.getOperand(0).getValueType();
7772   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7773   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
7774   SDLoc dl(Op);
7775 
7776   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7777   // presence of infinities.
7778   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7779     switch (CC) {
7780     default:
7781       break;
7782     case ISD::SETOGT:
7783     case ISD::SETGT:
7784       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7785     case ISD::SETOLT:
7786     case ISD::SETLT:
7787       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7788     }
7789   }
7790 
7791   // We might be able to do better than this under some circumstances, but in
7792   // general, fsel-based lowering of select is a finite-math-only optimization.
7793   // For more information, see section F.3 of the 2.06 ISA specification.
7794   // With ISA 3.0
7795   if (!DAG.getTarget().Options.NoInfsFPMath ||
7796       !DAG.getTarget().Options.NoNaNsFPMath)
7797     return Op;
7798 
7799   // TODO: Propagate flags from the select rather than global settings.
7800   SDNodeFlags Flags;
7801   Flags.setNoInfs(true);
7802   Flags.setNoNaNs(true);
7803 
7804   // If the RHS of the comparison is a 0.0, we don't need to do the
7805   // subtraction at all.
7806   SDValue Sel1;
7807   if (isFloatingPointZero(RHS))
7808     switch (CC) {
7809     default: break;       // SETUO etc aren't handled by fsel.
7810     case ISD::SETNE:
7811       std::swap(TV, FV);
7812       LLVM_FALLTHROUGH;
7813     case ISD::SETEQ:
7814       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7815         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7816       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7817       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7818         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7819       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7820                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7821     case ISD::SETULT:
7822     case ISD::SETLT:
7823       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7824       LLVM_FALLTHROUGH;
7825     case ISD::SETOGE:
7826     case ISD::SETGE:
7827       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7828         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7829       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7830     case ISD::SETUGT:
7831     case ISD::SETGT:
7832       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7833       LLVM_FALLTHROUGH;
7834     case ISD::SETOLE:
7835     case ISD::SETLE:
7836       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7837         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7838       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7839                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7840     }
7841 
7842   SDValue Cmp;
7843   switch (CC) {
7844   default: break;       // SETUO etc aren't handled by fsel.
7845   case ISD::SETNE:
7846     std::swap(TV, FV);
7847     LLVM_FALLTHROUGH;
7848   case ISD::SETEQ:
7849     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7850     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7851       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7852     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7853     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7854       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7855     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7856                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7857   case ISD::SETULT:
7858   case ISD::SETLT:
7859     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7860     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7861       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7862     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7863   case ISD::SETOGE:
7864   case ISD::SETGE:
7865     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7866     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7867       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7868     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7869   case ISD::SETUGT:
7870   case ISD::SETGT:
7871     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7872     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7873       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7874     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7875   case ISD::SETOLE:
7876   case ISD::SETLE:
7877     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7878     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7879       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7880     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7881   }
7882   return Op;
7883 }
7884 
7885 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7886                                                SelectionDAG &DAG,
7887                                                const SDLoc &dl) const {
7888   assert(Op.getOperand(0).getValueType().isFloatingPoint());
7889   SDValue Src = Op.getOperand(0);
7890   if (Src.getValueType() == MVT::f32)
7891     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7892 
7893   SDValue Tmp;
7894   switch (Op.getSimpleValueType().SimpleTy) {
7895   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7896   case MVT::i32:
7897     Tmp = DAG.getNode(
7898         Op.getOpcode() == ISD::FP_TO_SINT
7899             ? PPCISD::FCTIWZ
7900             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7901         dl, MVT::f64, Src);
7902     break;
7903   case MVT::i64:
7904     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7905            "i64 FP_TO_UINT is supported only with FPCVT");
7906     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7907                                                         PPCISD::FCTIDUZ,
7908                       dl, MVT::f64, Src);
7909     break;
7910   }
7911 
7912   // Convert the FP value to an int value through memory.
7913   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7914     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
7915   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7916   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7917   MachinePointerInfo MPI =
7918       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7919 
7920   // Emit a store to the stack slot.
7921   SDValue Chain;
7922   if (i32Stack) {
7923     MachineFunction &MF = DAG.getMachineFunction();
7924     MachineMemOperand *MMO =
7925         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(4));
7926     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
7927     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7928               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7929   } else
7930     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
7931 
7932   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
7933   // add in a bias on big endian.
7934   if (Op.getValueType() == MVT::i32 && !i32Stack) {
7935     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7936                         DAG.getConstant(4, dl, FIPtr.getValueType()));
7937     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7938   }
7939 
7940   RLI.Chain = Chain;
7941   RLI.Ptr = FIPtr;
7942   RLI.MPI = MPI;
7943 }
7944 
7945 /// Custom lowers floating point to integer conversions to use
7946 /// the direct move instructions available in ISA 2.07 to avoid the
7947 /// need for load/store combinations.
7948 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7949                                                     SelectionDAG &DAG,
7950                                                     const SDLoc &dl) const {
7951   assert(Op.getOperand(0).getValueType().isFloatingPoint());
7952   SDValue Src = Op.getOperand(0);
7953 
7954   if (Src.getValueType() == MVT::f32)
7955     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7956 
7957   SDValue Tmp;
7958   switch (Op.getSimpleValueType().SimpleTy) {
7959   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7960   case MVT::i32:
7961     Tmp = DAG.getNode(
7962         Op.getOpcode() == ISD::FP_TO_SINT
7963             ? PPCISD::FCTIWZ
7964             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7965         dl, MVT::f64, Src);
7966     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
7967     break;
7968   case MVT::i64:
7969     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7970            "i64 FP_TO_UINT is supported only with FPCVT");
7971     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7972                                                         PPCISD::FCTIDUZ,
7973                       dl, MVT::f64, Src);
7974     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
7975     break;
7976   }
7977   return Tmp;
7978 }
7979 
7980 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7981                                           const SDLoc &dl) const {
7982 
7983   // FP to INT conversions are legal for f128.
7984   if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128))
7985     return Op;
7986 
7987   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
7988   // PPC (the libcall is not available).
7989   if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
7990     if (Op.getValueType() == MVT::i32) {
7991       if (Op.getOpcode() == ISD::FP_TO_SINT) {
7992         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7993                                  MVT::f64, Op.getOperand(0),
7994                                  DAG.getIntPtrConstant(0, dl));
7995         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7996                                  MVT::f64, Op.getOperand(0),
7997                                  DAG.getIntPtrConstant(1, dl));
7998 
7999         // Add the two halves of the long double in round-to-zero mode.
8000         SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8001 
8002         // Now use a smaller FP_TO_SINT.
8003         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8004       }
8005       if (Op.getOpcode() == ISD::FP_TO_UINT) {
8006         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8007         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8008         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
8009         //  X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8010         // FIXME: generated code sucks.
8011         // TODO: Are there fast-math-flags to propagate to this FSUB?
8012         SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
8013                                    Op.getOperand(0), Tmp);
8014         True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8015         True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
8016                            DAG.getConstant(0x80000000, dl, MVT::i32));
8017         SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
8018                                     Op.getOperand(0));
8019         return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
8020                                ISD::SETGE);
8021       }
8022     }
8023 
8024     return SDValue();
8025   }
8026 
8027   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8028     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8029 
8030   ReuseLoadInfo RLI;
8031   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8032 
8033   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8034                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8035 }
8036 
8037 // We're trying to insert a regular store, S, and then a load, L. If the
8038 // incoming value, O, is a load, we might just be able to have our load use the
8039 // address used by O. However, we don't know if anything else will store to
8040 // that address before we can load from it. To prevent this situation, we need
8041 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8042 // the same chain operand as O, we create a token factor from the chain results
8043 // of O and L, and we replace all uses of O's chain result with that token
8044 // factor (see spliceIntoChain below for this last part).
8045 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8046                                             ReuseLoadInfo &RLI,
8047                                             SelectionDAG &DAG,
8048                                             ISD::LoadExtType ET) const {
8049   SDLoc dl(Op);
8050   if (ET == ISD::NON_EXTLOAD &&
8051       (Op.getOpcode() == ISD::FP_TO_UINT ||
8052        Op.getOpcode() == ISD::FP_TO_SINT) &&
8053       isOperationLegalOrCustom(Op.getOpcode(),
8054                                Op.getOperand(0).getValueType())) {
8055 
8056     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8057     return true;
8058   }
8059 
8060   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8061   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8062       LD->isNonTemporal())
8063     return false;
8064   if (LD->getMemoryVT() != MemVT)
8065     return false;
8066 
8067   RLI.Ptr = LD->getBasePtr();
8068   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8069     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8070            "Non-pre-inc AM on PPC?");
8071     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8072                           LD->getOffset());
8073   }
8074 
8075   RLI.Chain = LD->getChain();
8076   RLI.MPI = LD->getPointerInfo();
8077   RLI.IsDereferenceable = LD->isDereferenceable();
8078   RLI.IsInvariant = LD->isInvariant();
8079   RLI.Alignment = LD->getAlign();
8080   RLI.AAInfo = LD->getAAInfo();
8081   RLI.Ranges = LD->getRanges();
8082 
8083   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8084   return true;
8085 }
8086 
8087 // Given the head of the old chain, ResChain, insert a token factor containing
8088 // it and NewResChain, and make users of ResChain now be users of that token
8089 // factor.
8090 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8091 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8092                                         SDValue NewResChain,
8093                                         SelectionDAG &DAG) const {
8094   if (!ResChain)
8095     return;
8096 
8097   SDLoc dl(NewResChain);
8098 
8099   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8100                            NewResChain, DAG.getUNDEF(MVT::Other));
8101   assert(TF.getNode() != NewResChain.getNode() &&
8102          "A new TF really is required here");
8103 
8104   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8105   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8106 }
8107 
8108 /// Analyze profitability of direct move
8109 /// prefer float load to int load plus direct move
8110 /// when there is no integer use of int load
8111 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8112   SDNode *Origin = Op.getOperand(0).getNode();
8113   if (Origin->getOpcode() != ISD::LOAD)
8114     return true;
8115 
8116   // If there is no LXSIBZX/LXSIHZX, like Power8,
8117   // prefer direct move if the memory size is 1 or 2 bytes.
8118   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8119   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8120     return true;
8121 
8122   for (SDNode::use_iterator UI = Origin->use_begin(),
8123                             UE = Origin->use_end();
8124        UI != UE; ++UI) {
8125 
8126     // Only look at the users of the loaded value.
8127     if (UI.getUse().get().getResNo() != 0)
8128       continue;
8129 
8130     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8131         UI->getOpcode() != ISD::UINT_TO_FP)
8132       return true;
8133   }
8134 
8135   return false;
8136 }
8137 
8138 /// Custom lowers integer to floating point conversions to use
8139 /// the direct move instructions available in ISA 2.07 to avoid the
8140 /// need for load/store combinations.
8141 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8142                                                     SelectionDAG &DAG,
8143                                                     const SDLoc &dl) const {
8144   assert((Op.getValueType() == MVT::f32 ||
8145           Op.getValueType() == MVT::f64) &&
8146          "Invalid floating point type as target of conversion");
8147   assert(Subtarget.hasFPCVT() &&
8148          "Int to FP conversions with direct moves require FPCVT");
8149   SDValue FP;
8150   SDValue Src = Op.getOperand(0);
8151   bool SinglePrec = Op.getValueType() == MVT::f32;
8152   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8153   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
8154   unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
8155                              (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
8156 
8157   if (WordInt) {
8158     FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
8159                      dl, MVT::f64, Src);
8160     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8161   }
8162   else {
8163     FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
8164     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8165   }
8166 
8167   return FP;
8168 }
8169 
8170 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8171 
8172   EVT VecVT = Vec.getValueType();
8173   assert(VecVT.isVector() && "Expected a vector type.");
8174   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8175 
8176   EVT EltVT = VecVT.getVectorElementType();
8177   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8178   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8179 
8180   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8181   SmallVector<SDValue, 16> Ops(NumConcat);
8182   Ops[0] = Vec;
8183   SDValue UndefVec = DAG.getUNDEF(VecVT);
8184   for (unsigned i = 1; i < NumConcat; ++i)
8185     Ops[i] = UndefVec;
8186 
8187   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8188 }
8189 
8190 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8191                                                 const SDLoc &dl) const {
8192 
8193   unsigned Opc = Op.getOpcode();
8194   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
8195          "Unexpected conversion type");
8196   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8197          "Supports conversions to v2f64/v4f32 only.");
8198 
8199   bool SignedConv = Opc == ISD::SINT_TO_FP;
8200   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8201 
8202   SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
8203   EVT WideVT = Wide.getValueType();
8204   unsigned WideNumElts = WideVT.getVectorNumElements();
8205   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8206 
8207   SmallVector<int, 16> ShuffV;
8208   for (unsigned i = 0; i < WideNumElts; ++i)
8209     ShuffV.push_back(i + WideNumElts);
8210 
8211   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8212   int SaveElts = FourEltRes ? 4 : 2;
8213   if (Subtarget.isLittleEndian())
8214     for (int i = 0; i < SaveElts; i++)
8215       ShuffV[i * Stride] = i;
8216   else
8217     for (int i = 1; i <= SaveElts; i++)
8218       ShuffV[i * Stride - 1] = i - 1;
8219 
8220   SDValue ShuffleSrc2 =
8221       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8222   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8223 
8224   SDValue Extend;
8225   if (SignedConv) {
8226     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8227     EVT ExtVT = Op.getOperand(0).getValueType();
8228     if (Subtarget.hasP9Altivec())
8229       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8230                                IntermediateVT.getVectorNumElements());
8231 
8232     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8233                          DAG.getValueType(ExtVT));
8234   } else
8235     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8236 
8237   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8238 }
8239 
8240 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8241                                           SelectionDAG &DAG) const {
8242   SDLoc dl(Op);
8243 
8244   EVT InVT = Op.getOperand(0).getValueType();
8245   EVT OutVT = Op.getValueType();
8246   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8247       isOperationCustom(Op.getOpcode(), InVT))
8248     return LowerINT_TO_FPVector(Op, DAG, dl);
8249 
8250   // Conversions to f128 are legal.
8251   if (EnableQuadPrecision && (Op.getValueType() == MVT::f128))
8252     return Op;
8253 
8254   if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
8255     if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
8256       return SDValue();
8257 
8258     SDValue Value = Op.getOperand(0);
8259     // The values are now known to be -1 (false) or 1 (true). To convert this
8260     // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
8261     // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8262     Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8263 
8264     SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8265 
8266     Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8267 
8268     if (Op.getValueType() != MVT::v4f64)
8269       Value = DAG.getNode(ISD::FP_ROUND, dl,
8270                           Op.getValueType(), Value,
8271                           DAG.getIntPtrConstant(1, dl));
8272     return Value;
8273   }
8274 
8275   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8276   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8277     return SDValue();
8278 
8279   if (Op.getOperand(0).getValueType() == MVT::i1)
8280     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
8281                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
8282                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
8283 
8284   // If we have direct moves, we can do all the conversion, skip the store/load
8285   // however, without FPCVT we can't do most conversions.
8286   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8287       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8288     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8289 
8290   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
8291          "UINT_TO_FP is supported only with FPCVT");
8292 
8293   // If we have FCFIDS, then use it when converting to single-precision.
8294   // Otherwise, convert to double-precision and then round.
8295   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8296                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
8297                                                             : PPCISD::FCFIDS)
8298                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
8299                                                             : PPCISD::FCFID);
8300   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8301                   ? MVT::f32
8302                   : MVT::f64;
8303 
8304   if (Op.getOperand(0).getValueType() == MVT::i64) {
8305     SDValue SINT = Op.getOperand(0);
8306     // When converting to single-precision, we actually need to convert
8307     // to double-precision first and then round to single-precision.
8308     // To avoid double-rounding effects during that operation, we have
8309     // to prepare the input operand.  Bits that might be truncated when
8310     // converting to double-precision are replaced by a bit that won't
8311     // be lost at this stage, but is below the single-precision rounding
8312     // position.
8313     //
8314     // However, if -enable-unsafe-fp-math is in effect, accept double
8315     // rounding to avoid the extra overhead.
8316     if (Op.getValueType() == MVT::f32 &&
8317         !Subtarget.hasFPCVT() &&
8318         !DAG.getTarget().Options.UnsafeFPMath) {
8319 
8320       // Twiddle input to make sure the low 11 bits are zero.  (If this
8321       // is the case, we are guaranteed the value will fit into the 53 bit
8322       // mantissa of an IEEE double-precision value without rounding.)
8323       // If any of those low 11 bits were not zero originally, make sure
8324       // bit 12 (value 2048) is set instead, so that the final rounding
8325       // to single-precision gets the correct result.
8326       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8327                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8328       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8329                           Round, DAG.getConstant(2047, dl, MVT::i64));
8330       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8331       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8332                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8333 
8334       // However, we cannot use that value unconditionally: if the magnitude
8335       // of the input value is small, the bit-twiddling we did above might
8336       // end up visibly changing the output.  Fortunately, in that case, we
8337       // don't need to twiddle bits since the original input will convert
8338       // exactly to double-precision floating-point already.  Therefore,
8339       // construct a conditional to use the original value if the top 11
8340       // bits are all sign-bit copies, and use the rounded value computed
8341       // above otherwise.
8342       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8343                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8344       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8345                          Cond, DAG.getConstant(1, dl, MVT::i64));
8346       Cond = DAG.getSetCC(
8347           dl,
8348           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8349           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8350 
8351       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8352     }
8353 
8354     ReuseLoadInfo RLI;
8355     SDValue Bits;
8356 
8357     MachineFunction &MF = DAG.getMachineFunction();
8358     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8359       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8360                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8361       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8362     } else if (Subtarget.hasLFIWAX() &&
8363                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8364       MachineMemOperand *MMO =
8365         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8366                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8367       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8368       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8369                                      DAG.getVTList(MVT::f64, MVT::Other),
8370                                      Ops, MVT::i32, MMO);
8371       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8372     } else if (Subtarget.hasFPCVT() &&
8373                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8374       MachineMemOperand *MMO =
8375         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8376                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8377       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8378       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8379                                      DAG.getVTList(MVT::f64, MVT::Other),
8380                                      Ops, MVT::i32, MMO);
8381       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8382     } else if (((Subtarget.hasLFIWAX() &&
8383                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8384                 (Subtarget.hasFPCVT() &&
8385                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8386                SINT.getOperand(0).getValueType() == MVT::i32) {
8387       MachineFrameInfo &MFI = MF.getFrameInfo();
8388       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8389 
8390       int FrameIdx = MFI.CreateStackObject(4, 4, false);
8391       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8392 
8393       SDValue Store =
8394           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
8395                        MachinePointerInfo::getFixedStack(
8396                            DAG.getMachineFunction(), FrameIdx));
8397 
8398       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8399              "Expected an i32 store");
8400 
8401       RLI.Ptr = FIdx;
8402       RLI.Chain = Store;
8403       RLI.MPI =
8404           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8405       RLI.Alignment = Align(4);
8406 
8407       MachineMemOperand *MMO =
8408         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8409                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8410       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8411       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8412                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8413                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8414                                      Ops, MVT::i32, MMO);
8415     } else
8416       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8417 
8418     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
8419 
8420     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8421       FP = DAG.getNode(ISD::FP_ROUND, dl,
8422                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
8423     return FP;
8424   }
8425 
8426   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
8427          "Unhandled INT_TO_FP type in custom expander!");
8428   // Since we only generate this in 64-bit mode, we can take advantage of
8429   // 64-bit registers.  In particular, sign extend the input value into the
8430   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8431   // then lfd it and fcfid it.
8432   MachineFunction &MF = DAG.getMachineFunction();
8433   MachineFrameInfo &MFI = MF.getFrameInfo();
8434   EVT PtrVT = getPointerTy(MF.getDataLayout());
8435 
8436   SDValue Ld;
8437   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8438     ReuseLoadInfo RLI;
8439     bool ReusingLoad;
8440     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
8441                                             DAG))) {
8442       int FrameIdx = MFI.CreateStackObject(4, 4, false);
8443       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8444 
8445       SDValue Store =
8446           DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
8447                        MachinePointerInfo::getFixedStack(
8448                            DAG.getMachineFunction(), FrameIdx));
8449 
8450       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8451              "Expected an i32 store");
8452 
8453       RLI.Ptr = FIdx;
8454       RLI.Chain = Store;
8455       RLI.MPI =
8456           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8457       RLI.Alignment = Align(4);
8458     }
8459 
8460     MachineMemOperand *MMO =
8461       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8462                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8463     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8464     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
8465                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
8466                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
8467                                  Ops, MVT::i32, MMO);
8468     if (ReusingLoad)
8469       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8470   } else {
8471     assert(Subtarget.isPPC64() &&
8472            "i32->FP without LFIWAX supported only on PPC64");
8473 
8474     int FrameIdx = MFI.CreateStackObject(8, 8, false);
8475     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8476 
8477     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
8478                                 Op.getOperand(0));
8479 
8480     // STD the extended value into the stack slot.
8481     SDValue Store = DAG.getStore(
8482         DAG.getEntryNode(), dl, Ext64, FIdx,
8483         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8484 
8485     // Load the value as a double.
8486     Ld = DAG.getLoad(
8487         MVT::f64, dl, Store, FIdx,
8488         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8489   }
8490 
8491   // FCFID it and return it.
8492   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
8493   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8494     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8495                      DAG.getIntPtrConstant(0, dl));
8496   return FP;
8497 }
8498 
8499 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8500                                             SelectionDAG &DAG) const {
8501   SDLoc dl(Op);
8502   /*
8503    The rounding mode is in bits 30:31 of FPSR, and has the following
8504    settings:
8505      00 Round to nearest
8506      01 Round to 0
8507      10 Round to +inf
8508      11 Round to -inf
8509 
8510   FLT_ROUNDS, on the other hand, expects the following:
8511     -1 Undefined
8512      0 Round to 0
8513      1 Round to nearest
8514      2 Round to +inf
8515      3 Round to -inf
8516 
8517   To perform the conversion, we do:
8518     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8519   */
8520 
8521   MachineFunction &MF = DAG.getMachineFunction();
8522   EVT VT = Op.getValueType();
8523   EVT PtrVT = getPointerTy(MF.getDataLayout());
8524 
8525   // Save FP Control Word to register
8526   SDValue Chain = Op.getOperand(0);
8527   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8528   Chain = MFFS.getValue(1);
8529 
8530   // Save FP register to stack slot
8531   int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false);
8532   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8533   Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8534 
8535   // Load FP Control Word from low 32 bits of stack slot.
8536   SDValue Four = DAG.getConstant(4, dl, PtrVT);
8537   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8538   SDValue CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8539   Chain = CWD.getValue(1);
8540 
8541   // Transform as necessary
8542   SDValue CWD1 =
8543     DAG.getNode(ISD::AND, dl, MVT::i32,
8544                 CWD, DAG.getConstant(3, dl, MVT::i32));
8545   SDValue CWD2 =
8546     DAG.getNode(ISD::SRL, dl, MVT::i32,
8547                 DAG.getNode(ISD::AND, dl, MVT::i32,
8548                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8549                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8550                             DAG.getConstant(3, dl, MVT::i32)),
8551                 DAG.getConstant(1, dl, MVT::i32));
8552 
8553   SDValue RetVal =
8554     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8555 
8556   RetVal =
8557       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8558                   dl, VT, RetVal);
8559 
8560   return DAG.getMergeValues({RetVal, Chain}, dl);
8561 }
8562 
8563 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8564   EVT VT = Op.getValueType();
8565   unsigned BitWidth = VT.getSizeInBits();
8566   SDLoc dl(Op);
8567   assert(Op.getNumOperands() == 3 &&
8568          VT == Op.getOperand(1).getValueType() &&
8569          "Unexpected SHL!");
8570 
8571   // Expand into a bunch of logical ops.  Note that these ops
8572   // depend on the PPC behavior for oversized shift amounts.
8573   SDValue Lo = Op.getOperand(0);
8574   SDValue Hi = Op.getOperand(1);
8575   SDValue Amt = Op.getOperand(2);
8576   EVT AmtVT = Amt.getValueType();
8577 
8578   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8579                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8580   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8581   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8582   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8583   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8584                              DAG.getConstant(-BitWidth, dl, AmtVT));
8585   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8586   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8587   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8588   SDValue OutOps[] = { OutLo, OutHi };
8589   return DAG.getMergeValues(OutOps, dl);
8590 }
8591 
8592 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8593   EVT VT = Op.getValueType();
8594   SDLoc dl(Op);
8595   unsigned BitWidth = VT.getSizeInBits();
8596   assert(Op.getNumOperands() == 3 &&
8597          VT == Op.getOperand(1).getValueType() &&
8598          "Unexpected SRL!");
8599 
8600   // Expand into a bunch of logical ops.  Note that these ops
8601   // depend on the PPC behavior for oversized shift amounts.
8602   SDValue Lo = Op.getOperand(0);
8603   SDValue Hi = Op.getOperand(1);
8604   SDValue Amt = Op.getOperand(2);
8605   EVT AmtVT = Amt.getValueType();
8606 
8607   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8608                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8609   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8610   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8611   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8612   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8613                              DAG.getConstant(-BitWidth, dl, AmtVT));
8614   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8615   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8616   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8617   SDValue OutOps[] = { OutLo, OutHi };
8618   return DAG.getMergeValues(OutOps, dl);
8619 }
8620 
8621 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8622   SDLoc dl(Op);
8623   EVT VT = Op.getValueType();
8624   unsigned BitWidth = VT.getSizeInBits();
8625   assert(Op.getNumOperands() == 3 &&
8626          VT == Op.getOperand(1).getValueType() &&
8627          "Unexpected SRA!");
8628 
8629   // Expand into a bunch of logical ops, followed by a select_cc.
8630   SDValue Lo = Op.getOperand(0);
8631   SDValue Hi = Op.getOperand(1);
8632   SDValue Amt = Op.getOperand(2);
8633   EVT AmtVT = Amt.getValueType();
8634 
8635   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8636                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8637   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8638   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8639   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8640   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8641                              DAG.getConstant(-BitWidth, dl, AmtVT));
8642   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8643   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8644   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8645                                   Tmp4, Tmp6, ISD::SETLE);
8646   SDValue OutOps[] = { OutLo, OutHi };
8647   return DAG.getMergeValues(OutOps, dl);
8648 }
8649 
8650 //===----------------------------------------------------------------------===//
8651 // Vector related lowering.
8652 //
8653 
8654 /// BuildSplatI - Build a canonical splati of Val with an element size of
8655 /// SplatSize.  Cast the result to VT.
8656 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
8657                            SelectionDAG &DAG, const SDLoc &dl) {
8658   static const MVT VTys[] = { // canonical VT to use for each size.
8659     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8660   };
8661 
8662   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8663 
8664   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
8665   if (Val == -1)
8666     SplatSize = 1;
8667 
8668   EVT CanonicalVT = VTys[SplatSize-1];
8669 
8670   // Build a canonical splat for this value.
8671   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8672 }
8673 
8674 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8675 /// specified intrinsic ID.
8676 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8677                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
8678   if (DestVT == MVT::Other) DestVT = Op.getValueType();
8679   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8680                      DAG.getConstant(IID, dl, MVT::i32), Op);
8681 }
8682 
8683 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8684 /// specified intrinsic ID.
8685 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8686                                 SelectionDAG &DAG, const SDLoc &dl,
8687                                 EVT DestVT = MVT::Other) {
8688   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8689   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8690                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8691 }
8692 
8693 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8694 /// specified intrinsic ID.
8695 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8696                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8697                                 EVT DestVT = MVT::Other) {
8698   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8699   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8700                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8701 }
8702 
8703 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8704 /// amount.  The result has the specified value type.
8705 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8706                            SelectionDAG &DAG, const SDLoc &dl) {
8707   // Force LHS/RHS to be the right type.
8708   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8709   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8710 
8711   int Ops[16];
8712   for (unsigned i = 0; i != 16; ++i)
8713     Ops[i] = i + Amt;
8714   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8715   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8716 }
8717 
8718 /// Do we have an efficient pattern in a .td file for this node?
8719 ///
8720 /// \param V - pointer to the BuildVectorSDNode being matched
8721 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8722 ///
8723 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8724 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8725 /// the opposite is true (expansion is beneficial) are:
8726 /// - The node builds a vector out of integers that are not 32 or 64-bits
8727 /// - The node builds a vector out of constants
8728 /// - The node is a "load-and-splat"
8729 /// In all other cases, we will choose to keep the BUILD_VECTOR.
8730 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8731                                             bool HasDirectMove,
8732                                             bool HasP8Vector) {
8733   EVT VecVT = V->getValueType(0);
8734   bool RightType = VecVT == MVT::v2f64 ||
8735     (HasP8Vector && VecVT == MVT::v4f32) ||
8736     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8737   if (!RightType)
8738     return false;
8739 
8740   bool IsSplat = true;
8741   bool IsLoad = false;
8742   SDValue Op0 = V->getOperand(0);
8743 
8744   // This function is called in a block that confirms the node is not a constant
8745   // splat. So a constant BUILD_VECTOR here means the vector is built out of
8746   // different constants.
8747   if (V->isConstant())
8748     return false;
8749   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8750     if (V->getOperand(i).isUndef())
8751       return false;
8752     // We want to expand nodes that represent load-and-splat even if the
8753     // loaded value is a floating point truncation or conversion to int.
8754     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8755         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8756          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8757         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8758          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8759         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8760          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8761       IsLoad = true;
8762     // If the operands are different or the input is not a load and has more
8763     // uses than just this BV node, then it isn't a splat.
8764     if (V->getOperand(i) != Op0 ||
8765         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8766       IsSplat = false;
8767   }
8768   return !(IsSplat && IsLoad);
8769 }
8770 
8771 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8772 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8773 
8774   SDLoc dl(Op);
8775   SDValue Op0 = Op->getOperand(0);
8776 
8777   if (!EnableQuadPrecision ||
8778       (Op.getValueType() != MVT::f128 ) ||
8779       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8780       (Op0.getOperand(0).getValueType() !=  MVT::i64) ||
8781       (Op0.getOperand(1).getValueType() != MVT::i64))
8782     return SDValue();
8783 
8784   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8785                      Op0.getOperand(1));
8786 }
8787 
8788 static const SDValue *getNormalLoadInput(const SDValue &Op) {
8789   const SDValue *InputLoad = &Op;
8790   if (InputLoad->getOpcode() == ISD::BITCAST)
8791     InputLoad = &InputLoad->getOperand(0);
8792   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR)
8793     InputLoad = &InputLoad->getOperand(0);
8794   if (InputLoad->getOpcode() != ISD::LOAD)
8795     return nullptr;
8796   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8797   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
8798 }
8799 
8800 // If this is a case we can't handle, return null and let the default
8801 // expansion code take care of it.  If we CAN select this case, and if it
8802 // selects to a single instruction, return Op.  Otherwise, if we can codegen
8803 // this case more efficiently than a constant pool load, lower it to the
8804 // sequence of ops that should be used.
8805 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
8806                                              SelectionDAG &DAG) const {
8807   SDLoc dl(Op);
8808   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
8809   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
8810 
8811   if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
8812     // We first build an i32 vector, load it into a QPX register,
8813     // then convert it to a floating-point vector and compare it
8814     // to a zero vector to get the boolean result.
8815     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8816     int FrameIdx = MFI.CreateStackObject(16, 16, false);
8817     MachinePointerInfo PtrInfo =
8818         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8819     EVT PtrVT = getPointerTy(DAG.getDataLayout());
8820     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8821 
8822     assert(BVN->getNumOperands() == 4 &&
8823       "BUILD_VECTOR for v4i1 does not have 4 operands");
8824 
8825     bool IsConst = true;
8826     for (unsigned i = 0; i < 4; ++i) {
8827       if (BVN->getOperand(i).isUndef()) continue;
8828       if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
8829         IsConst = false;
8830         break;
8831       }
8832     }
8833 
8834     if (IsConst) {
8835       Constant *One =
8836         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
8837       Constant *NegOne =
8838         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
8839 
8840       Constant *CV[4];
8841       for (unsigned i = 0; i < 4; ++i) {
8842         if (BVN->getOperand(i).isUndef())
8843           CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
8844         else if (isNullConstant(BVN->getOperand(i)))
8845           CV[i] = NegOne;
8846         else
8847           CV[i] = One;
8848       }
8849 
8850       Constant *CP = ConstantVector::get(CV);
8851       SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()),
8852                                           16 /* alignment */);
8853 
8854       SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
8855       SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
8856       return DAG.getMemIntrinsicNode(
8857           PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
8858           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
8859     }
8860 
8861     SmallVector<SDValue, 4> Stores;
8862     for (unsigned i = 0; i < 4; ++i) {
8863       if (BVN->getOperand(i).isUndef()) continue;
8864 
8865       unsigned Offset = 4*i;
8866       SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
8867       Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
8868 
8869       unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
8870       if (StoreSize > 4) {
8871         Stores.push_back(
8872             DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
8873                               PtrInfo.getWithOffset(Offset), MVT::i32));
8874       } else {
8875         SDValue StoreValue = BVN->getOperand(i);
8876         if (StoreSize < 4)
8877           StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
8878 
8879         Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
8880                                       PtrInfo.getWithOffset(Offset)));
8881       }
8882     }
8883 
8884     SDValue StoreChain;
8885     if (!Stores.empty())
8886       StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
8887     else
8888       StoreChain = DAG.getEntryNode();
8889 
8890     // Now load from v4i32 into the QPX register; this will extend it to
8891     // v4i64 but not yet convert it to a floating point. Nevertheless, this
8892     // is typed as v4f64 because the QPX register integer states are not
8893     // explicitly represented.
8894 
8895     SDValue Ops[] = {StoreChain,
8896                      DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
8897                      FIdx};
8898     SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
8899 
8900     SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
8901       dl, VTs, Ops, MVT::v4i32, PtrInfo);
8902     LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
8903       DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
8904       LoadedVect);
8905 
8906     SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
8907 
8908     return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
8909   }
8910 
8911   // All other QPX vectors are handled by generic code.
8912   if (Subtarget.hasQPX())
8913     return SDValue();
8914 
8915   // Check if this is a splat of a constant value.
8916   APInt APSplatBits, APSplatUndef;
8917   unsigned SplatBitSize;
8918   bool HasAnyUndefs;
8919   if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
8920                              HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
8921       SplatBitSize > 32) {
8922 
8923     const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0));
8924     // Handle load-and-splat patterns as we have instructions that will do this
8925     // in one go.
8926     if (InputLoad && DAG.isSplatValue(Op, true)) {
8927       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8928 
8929       // We have handling for 4 and 8 byte elements.
8930       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
8931 
8932       // Checking for a single use of this load, we have to check for vector
8933       // width (128 bits) / ElementSize uses (since each operand of the
8934       // BUILD_VECTOR is a separate use of the value.
8935       if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
8936           ((Subtarget.hasVSX() && ElementSize == 64) ||
8937            (Subtarget.hasP9Vector() && ElementSize == 32))) {
8938         SDValue Ops[] = {
8939           LD->getChain(),    // Chain
8940           LD->getBasePtr(),  // Ptr
8941           DAG.getValueType(Op.getValueType()) // VT
8942         };
8943         return
8944           DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
8945                                   DAG.getVTList(Op.getValueType(), MVT::Other),
8946                                   Ops, LD->getMemoryVT(), LD->getMemOperand());
8947       }
8948     }
8949 
8950     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
8951     // lowered to VSX instructions under certain conditions.
8952     // Without VSX, there is no pattern more efficient than expanding the node.
8953     if (Subtarget.hasVSX() &&
8954         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
8955                                         Subtarget.hasP8Vector()))
8956       return Op;
8957     return SDValue();
8958   }
8959 
8960   unsigned SplatBits = APSplatBits.getZExtValue();
8961   unsigned SplatUndef = APSplatUndef.getZExtValue();
8962   unsigned SplatSize = SplatBitSize / 8;
8963 
8964   // First, handle single instruction cases.
8965 
8966   // All zeros?
8967   if (SplatBits == 0) {
8968     // Canonicalize all zero vectors to be v4i32.
8969     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
8970       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
8971       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
8972     }
8973     return Op;
8974   }
8975 
8976   // We have XXSPLTIB for constant splats one byte wide
8977   // FIXME: SplatBits is an unsigned int being cast to an int while passing it
8978   // as an argument to BuildSplatiI. Given SplatSize == 1 it is okay here.
8979   if (Subtarget.hasP9Vector() && SplatSize == 1)
8980     return BuildSplatI(SplatBits, SplatSize, Op.getValueType(), DAG, dl);
8981 
8982   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
8983   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
8984                     (32-SplatBitSize));
8985   if (SextVal >= -16 && SextVal <= 15)
8986     return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
8987 
8988   // Two instruction sequences.
8989 
8990   // If this value is in the range [-32,30] and is even, use:
8991   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
8992   // If this value is in the range [17,31] and is odd, use:
8993   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
8994   // If this value is in the range [-31,-17] and is odd, use:
8995   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
8996   // Note the last two are three-instruction sequences.
8997   if (SextVal >= -32 && SextVal <= 31) {
8998     // To avoid having these optimizations undone by constant folding,
8999     // we convert to a pseudo that will be expanded later into one of
9000     // the above forms.
9001     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9002     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9003               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9004     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9005     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9006     if (VT == Op.getValueType())
9007       return RetVal;
9008     else
9009       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9010   }
9011 
9012   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9013   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9014   // for fneg/fabs.
9015   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9016     // Make -1 and vspltisw -1:
9017     SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
9018 
9019     // Make the VSLW intrinsic, computing 0x8000_0000.
9020     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9021                                    OnesV, DAG, dl);
9022 
9023     // xor by OnesV to invert it.
9024     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9025     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9026   }
9027 
9028   // Check to see if this is a wide variety of vsplti*, binop self cases.
9029   static const signed char SplatCsts[] = {
9030     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9031     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9032   };
9033 
9034   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9035     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9036     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9037     int i = SplatCsts[idx];
9038 
9039     // Figure out what shift amount will be used by altivec if shifted by i in
9040     // this splat size.
9041     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9042 
9043     // vsplti + shl self.
9044     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9045       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
9046       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9047         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9048         Intrinsic::ppc_altivec_vslw
9049       };
9050       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9051       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9052     }
9053 
9054     // vsplti + srl self.
9055     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9056       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
9057       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9058         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9059         Intrinsic::ppc_altivec_vsrw
9060       };
9061       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9062       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9063     }
9064 
9065     // vsplti + sra self.
9066     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9067       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
9068       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9069         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
9070         Intrinsic::ppc_altivec_vsraw
9071       };
9072       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9073       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9074     }
9075 
9076     // vsplti + rol self.
9077     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9078                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9079       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
9080       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9081         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9082         Intrinsic::ppc_altivec_vrlw
9083       };
9084       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9085       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9086     }
9087 
9088     // t = vsplti c, result = vsldoi t, t, 1
9089     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9090       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
9091       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9092       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9093     }
9094     // t = vsplti c, result = vsldoi t, t, 2
9095     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9096       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
9097       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9098       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9099     }
9100     // t = vsplti c, result = vsldoi t, t, 3
9101     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9102       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
9103       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9104       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9105     }
9106   }
9107 
9108   return SDValue();
9109 }
9110 
9111 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9112 /// the specified operations to build the shuffle.
9113 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9114                                       SDValue RHS, SelectionDAG &DAG,
9115                                       const SDLoc &dl) {
9116   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9117   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9118   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9119 
9120   enum {
9121     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9122     OP_VMRGHW,
9123     OP_VMRGLW,
9124     OP_VSPLTISW0,
9125     OP_VSPLTISW1,
9126     OP_VSPLTISW2,
9127     OP_VSPLTISW3,
9128     OP_VSLDOI4,
9129     OP_VSLDOI8,
9130     OP_VSLDOI12
9131   };
9132 
9133   if (OpNum == OP_COPY) {
9134     if (LHSID == (1*9+2)*9+3) return LHS;
9135     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9136     return RHS;
9137   }
9138 
9139   SDValue OpLHS, OpRHS;
9140   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9141   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9142 
9143   int ShufIdxs[16];
9144   switch (OpNum) {
9145   default: llvm_unreachable("Unknown i32 permute!");
9146   case OP_VMRGHW:
9147     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9148     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9149     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9150     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9151     break;
9152   case OP_VMRGLW:
9153     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9154     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9155     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9156     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9157     break;
9158   case OP_VSPLTISW0:
9159     for (unsigned i = 0; i != 16; ++i)
9160       ShufIdxs[i] = (i&3)+0;
9161     break;
9162   case OP_VSPLTISW1:
9163     for (unsigned i = 0; i != 16; ++i)
9164       ShufIdxs[i] = (i&3)+4;
9165     break;
9166   case OP_VSPLTISW2:
9167     for (unsigned i = 0; i != 16; ++i)
9168       ShufIdxs[i] = (i&3)+8;
9169     break;
9170   case OP_VSPLTISW3:
9171     for (unsigned i = 0; i != 16; ++i)
9172       ShufIdxs[i] = (i&3)+12;
9173     break;
9174   case OP_VSLDOI4:
9175     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9176   case OP_VSLDOI8:
9177     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9178   case OP_VSLDOI12:
9179     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9180   }
9181   EVT VT = OpLHS.getValueType();
9182   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9183   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9184   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9185   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9186 }
9187 
9188 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9189 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9190 /// SDValue.
9191 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9192                                            SelectionDAG &DAG) const {
9193   const unsigned BytesInVector = 16;
9194   bool IsLE = Subtarget.isLittleEndian();
9195   SDLoc dl(N);
9196   SDValue V1 = N->getOperand(0);
9197   SDValue V2 = N->getOperand(1);
9198   unsigned ShiftElts = 0, InsertAtByte = 0;
9199   bool Swap = false;
9200 
9201   // Shifts required to get the byte we want at element 7.
9202   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9203                                    0, 15, 14, 13, 12, 11, 10, 9};
9204   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9205                                 1, 2,  3,  4,  5,  6,  7,  8};
9206 
9207   ArrayRef<int> Mask = N->getMask();
9208   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9209 
9210   // For each mask element, find out if we're just inserting something
9211   // from V2 into V1 or vice versa.
9212   // Possible permutations inserting an element from V2 into V1:
9213   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9214   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9215   //   ...
9216   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9217   // Inserting from V1 into V2 will be similar, except mask range will be
9218   // [16,31].
9219 
9220   bool FoundCandidate = false;
9221   // If both vector operands for the shuffle are the same vector, the mask
9222   // will contain only elements from the first one and the second one will be
9223   // undef.
9224   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9225   // Go through the mask of half-words to find an element that's being moved
9226   // from one vector to the other.
9227   for (unsigned i = 0; i < BytesInVector; ++i) {
9228     unsigned CurrentElement = Mask[i];
9229     // If 2nd operand is undefined, we should only look for element 7 in the
9230     // Mask.
9231     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9232       continue;
9233 
9234     bool OtherElementsInOrder = true;
9235     // Examine the other elements in the Mask to see if they're in original
9236     // order.
9237     for (unsigned j = 0; j < BytesInVector; ++j) {
9238       if (j == i)
9239         continue;
9240       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9241       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9242       // in which we always assume we're always picking from the 1st operand.
9243       int MaskOffset =
9244           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9245       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9246         OtherElementsInOrder = false;
9247         break;
9248       }
9249     }
9250     // If other elements are in original order, we record the number of shifts
9251     // we need to get the element we want into element 7. Also record which byte
9252     // in the vector we should insert into.
9253     if (OtherElementsInOrder) {
9254       // If 2nd operand is undefined, we assume no shifts and no swapping.
9255       if (V2.isUndef()) {
9256         ShiftElts = 0;
9257         Swap = false;
9258       } else {
9259         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9260         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9261                          : BigEndianShifts[CurrentElement & 0xF];
9262         Swap = CurrentElement < BytesInVector;
9263       }
9264       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9265       FoundCandidate = true;
9266       break;
9267     }
9268   }
9269 
9270   if (!FoundCandidate)
9271     return SDValue();
9272 
9273   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9274   // optionally with VECSHL if shift is required.
9275   if (Swap)
9276     std::swap(V1, V2);
9277   if (V2.isUndef())
9278     V2 = V1;
9279   if (ShiftElts) {
9280     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9281                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9282     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9283                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9284   }
9285   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9286                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9287 }
9288 
9289 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9290 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9291 /// SDValue.
9292 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9293                                            SelectionDAG &DAG) const {
9294   const unsigned NumHalfWords = 8;
9295   const unsigned BytesInVector = NumHalfWords * 2;
9296   // Check that the shuffle is on half-words.
9297   if (!isNByteElemShuffleMask(N, 2, 1))
9298     return SDValue();
9299 
9300   bool IsLE = Subtarget.isLittleEndian();
9301   SDLoc dl(N);
9302   SDValue V1 = N->getOperand(0);
9303   SDValue V2 = N->getOperand(1);
9304   unsigned ShiftElts = 0, InsertAtByte = 0;
9305   bool Swap = false;
9306 
9307   // Shifts required to get the half-word we want at element 3.
9308   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9309   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9310 
9311   uint32_t Mask = 0;
9312   uint32_t OriginalOrderLow = 0x1234567;
9313   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9314   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9315   // 32-bit space, only need 4-bit nibbles per element.
9316   for (unsigned i = 0; i < NumHalfWords; ++i) {
9317     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9318     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9319   }
9320 
9321   // For each mask element, find out if we're just inserting something
9322   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9323   // from V2 into V1:
9324   //   X, 1, 2, 3, 4, 5, 6, 7
9325   //   0, X, 2, 3, 4, 5, 6, 7
9326   //   0, 1, X, 3, 4, 5, 6, 7
9327   //   0, 1, 2, X, 4, 5, 6, 7
9328   //   0, 1, 2, 3, X, 5, 6, 7
9329   //   0, 1, 2, 3, 4, X, 6, 7
9330   //   0, 1, 2, 3, 4, 5, X, 7
9331   //   0, 1, 2, 3, 4, 5, 6, X
9332   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9333 
9334   bool FoundCandidate = false;
9335   // Go through the mask of half-words to find an element that's being moved
9336   // from one vector to the other.
9337   for (unsigned i = 0; i < NumHalfWords; ++i) {
9338     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9339     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9340     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9341     uint32_t TargetOrder = 0x0;
9342 
9343     // If both vector operands for the shuffle are the same vector, the mask
9344     // will contain only elements from the first one and the second one will be
9345     // undef.
9346     if (V2.isUndef()) {
9347       ShiftElts = 0;
9348       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9349       TargetOrder = OriginalOrderLow;
9350       Swap = false;
9351       // Skip if not the correct element or mask of other elements don't equal
9352       // to our expected order.
9353       if (MaskOneElt == VINSERTHSrcElem &&
9354           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9355         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9356         FoundCandidate = true;
9357         break;
9358       }
9359     } else { // If both operands are defined.
9360       // Target order is [8,15] if the current mask is between [0,7].
9361       TargetOrder =
9362           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9363       // Skip if mask of other elements don't equal our expected order.
9364       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9365         // We only need the last 3 bits for the number of shifts.
9366         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9367                          : BigEndianShifts[MaskOneElt & 0x7];
9368         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9369         Swap = MaskOneElt < NumHalfWords;
9370         FoundCandidate = true;
9371         break;
9372       }
9373     }
9374   }
9375 
9376   if (!FoundCandidate)
9377     return SDValue();
9378 
9379   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9380   // optionally with VECSHL if shift is required.
9381   if (Swap)
9382     std::swap(V1, V2);
9383   if (V2.isUndef())
9384     V2 = V1;
9385   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9386   if (ShiftElts) {
9387     // Double ShiftElts because we're left shifting on v16i8 type.
9388     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9389                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9390     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9391     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9392                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9393     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9394   }
9395   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9396   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9397                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9398   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9399 }
9400 
9401 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9402 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9403 /// return the code it can be lowered into.  Worst case, it can always be
9404 /// lowered into a vperm.
9405 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9406                                                SelectionDAG &DAG) const {
9407   SDLoc dl(Op);
9408   SDValue V1 = Op.getOperand(0);
9409   SDValue V2 = Op.getOperand(1);
9410   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9411   EVT VT = Op.getValueType();
9412   bool isLittleEndian = Subtarget.isLittleEndian();
9413 
9414   unsigned ShiftElts, InsertAtByte;
9415   bool Swap = false;
9416 
9417   // If this is a load-and-splat, we can do that with a single instruction
9418   // in some cases. However if the load has multiple uses, we don't want to
9419   // combine it because that will just produce multiple loads.
9420   const SDValue *InputLoad = getNormalLoadInput(V1);
9421   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9422       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9423       InputLoad->hasOneUse()) {
9424     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9425     int SplatIdx =
9426       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9427 
9428     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9429     // For 4-byte load-and-splat, we need Power9.
9430     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9431       uint64_t Offset = 0;
9432       if (IsFourByte)
9433         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9434       else
9435         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9436       SDValue BasePtr = LD->getBasePtr();
9437       if (Offset != 0)
9438         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9439                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9440       SDValue Ops[] = {
9441         LD->getChain(),    // Chain
9442         BasePtr,           // BasePtr
9443         DAG.getValueType(Op.getValueType()) // VT
9444       };
9445       SDVTList VTL =
9446         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9447       SDValue LdSplt =
9448         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9449                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9450       if (LdSplt.getValueType() != SVOp->getValueType(0))
9451         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9452       return LdSplt;
9453     }
9454   }
9455   if (Subtarget.hasP9Vector() &&
9456       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9457                            isLittleEndian)) {
9458     if (Swap)
9459       std::swap(V1, V2);
9460     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9461     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9462     if (ShiftElts) {
9463       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9464                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9465       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9466                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9467       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9468     }
9469     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9470                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9471     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9472   }
9473 
9474   if (Subtarget.hasP9Altivec()) {
9475     SDValue NewISDNode;
9476     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9477       return NewISDNode;
9478 
9479     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9480       return NewISDNode;
9481   }
9482 
9483   if (Subtarget.hasVSX() &&
9484       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9485     if (Swap)
9486       std::swap(V1, V2);
9487     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9488     SDValue Conv2 =
9489         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9490 
9491     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9492                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9493     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9494   }
9495 
9496   if (Subtarget.hasVSX() &&
9497     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9498     if (Swap)
9499       std::swap(V1, V2);
9500     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9501     SDValue Conv2 =
9502         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9503 
9504     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9505                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9506     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9507   }
9508 
9509   if (Subtarget.hasP9Vector()) {
9510      if (PPC::isXXBRHShuffleMask(SVOp)) {
9511       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9512       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9513       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9514     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9515       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9516       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9517       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9518     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9519       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9520       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9521       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9522     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9523       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9524       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9525       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9526     }
9527   }
9528 
9529   if (Subtarget.hasVSX()) {
9530     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9531       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9532 
9533       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9534       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9535                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
9536       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9537     }
9538 
9539     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9540     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9541       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9542       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9543       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9544     }
9545   }
9546 
9547   if (Subtarget.hasQPX()) {
9548     if (VT.getVectorNumElements() != 4)
9549       return SDValue();
9550 
9551     if (V2.isUndef()) V2 = V1;
9552 
9553     int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
9554     if (AlignIdx != -1) {
9555       return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
9556                          DAG.getConstant(AlignIdx, dl, MVT::i32));
9557     } else if (SVOp->isSplat()) {
9558       int SplatIdx = SVOp->getSplatIndex();
9559       if (SplatIdx >= 4) {
9560         std::swap(V1, V2);
9561         SplatIdx -= 4;
9562       }
9563 
9564       return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
9565                          DAG.getConstant(SplatIdx, dl, MVT::i32));
9566     }
9567 
9568     // Lower this into a qvgpci/qvfperm pair.
9569 
9570     // Compute the qvgpci literal
9571     unsigned idx = 0;
9572     for (unsigned i = 0; i < 4; ++i) {
9573       int m = SVOp->getMaskElt(i);
9574       unsigned mm = m >= 0 ? (unsigned) m : i;
9575       idx |= mm << (3-i)*3;
9576     }
9577 
9578     SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
9579                              DAG.getConstant(idx, dl, MVT::i32));
9580     return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
9581   }
9582 
9583   // Cases that are handled by instructions that take permute immediates
9584   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9585   // selected by the instruction selector.
9586   if (V2.isUndef()) {
9587     if (PPC::isSplatShuffleMask(SVOp, 1) ||
9588         PPC::isSplatShuffleMask(SVOp, 2) ||
9589         PPC::isSplatShuffleMask(SVOp, 4) ||
9590         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9591         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9592         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9593         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9594         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9595         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9596         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9597         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9598         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9599         (Subtarget.hasP8Altivec() && (
9600          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9601          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9602          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9603       return Op;
9604     }
9605   }
9606 
9607   // Altivec has a variety of "shuffle immediates" that take two vector inputs
9608   // and produce a fixed permutation.  If any of these match, do not lower to
9609   // VPERM.
9610   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9611   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9612       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9613       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9614       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9615       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9616       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9617       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9618       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9619       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9620       (Subtarget.hasP8Altivec() && (
9621        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9622        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9623        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9624     return Op;
9625 
9626   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
9627   // perfect shuffle table to emit an optimal matching sequence.
9628   ArrayRef<int> PermMask = SVOp->getMask();
9629 
9630   unsigned PFIndexes[4];
9631   bool isFourElementShuffle = true;
9632   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9633     unsigned EltNo = 8;   // Start out undef.
9634     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
9635       if (PermMask[i*4+j] < 0)
9636         continue;   // Undef, ignore it.
9637 
9638       unsigned ByteSource = PermMask[i*4+j];
9639       if ((ByteSource & 3) != j) {
9640         isFourElementShuffle = false;
9641         break;
9642       }
9643 
9644       if (EltNo == 8) {
9645         EltNo = ByteSource/4;
9646       } else if (EltNo != ByteSource/4) {
9647         isFourElementShuffle = false;
9648         break;
9649       }
9650     }
9651     PFIndexes[i] = EltNo;
9652   }
9653 
9654   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9655   // perfect shuffle vector to determine if it is cost effective to do this as
9656   // discrete instructions, or whether we should use a vperm.
9657   // For now, we skip this for little endian until such time as we have a
9658   // little-endian perfect shuffle table.
9659   if (isFourElementShuffle && !isLittleEndian) {
9660     // Compute the index in the perfect shuffle table.
9661     unsigned PFTableIndex =
9662       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9663 
9664     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9665     unsigned Cost  = (PFEntry >> 30);
9666 
9667     // Determining when to avoid vperm is tricky.  Many things affect the cost
9668     // of vperm, particularly how many times the perm mask needs to be computed.
9669     // For example, if the perm mask can be hoisted out of a loop or is already
9670     // used (perhaps because there are multiple permutes with the same shuffle
9671     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
9672     // the loop requires an extra register.
9673     //
9674     // As a compromise, we only emit discrete instructions if the shuffle can be
9675     // generated in 3 or fewer operations.  When we have loop information
9676     // available, if this block is within a loop, we should avoid using vperm
9677     // for 3-operation perms and use a constant pool load instead.
9678     if (Cost < 3)
9679       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9680   }
9681 
9682   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9683   // vector that will get spilled to the constant pool.
9684   if (V2.isUndef()) V2 = V1;
9685 
9686   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9687   // that it is in input element units, not in bytes.  Convert now.
9688 
9689   // For little endian, the order of the input vectors is reversed, and
9690   // the permutation mask is complemented with respect to 31.  This is
9691   // necessary to produce proper semantics with the big-endian-biased vperm
9692   // instruction.
9693   EVT EltVT = V1.getValueType().getVectorElementType();
9694   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9695 
9696   SmallVector<SDValue, 16> ResultMask;
9697   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9698     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9699 
9700     for (unsigned j = 0; j != BytesPerElement; ++j)
9701       if (isLittleEndian)
9702         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9703                                              dl, MVT::i32));
9704       else
9705         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9706                                              MVT::i32));
9707   }
9708 
9709   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9710   if (isLittleEndian)
9711     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9712                        V2, V1, VPermMask);
9713   else
9714     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9715                        V1, V2, VPermMask);
9716 }
9717 
9718 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9719 /// vector comparison.  If it is, return true and fill in Opc/isDot with
9720 /// information about the intrinsic.
9721 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9722                                  bool &isDot, const PPCSubtarget &Subtarget) {
9723   unsigned IntrinsicID =
9724       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9725   CompareOpc = -1;
9726   isDot = false;
9727   switch (IntrinsicID) {
9728   default:
9729     return false;
9730   // Comparison predicates.
9731   case Intrinsic::ppc_altivec_vcmpbfp_p:
9732     CompareOpc = 966;
9733     isDot = true;
9734     break;
9735   case Intrinsic::ppc_altivec_vcmpeqfp_p:
9736     CompareOpc = 198;
9737     isDot = true;
9738     break;
9739   case Intrinsic::ppc_altivec_vcmpequb_p:
9740     CompareOpc = 6;
9741     isDot = true;
9742     break;
9743   case Intrinsic::ppc_altivec_vcmpequh_p:
9744     CompareOpc = 70;
9745     isDot = true;
9746     break;
9747   case Intrinsic::ppc_altivec_vcmpequw_p:
9748     CompareOpc = 134;
9749     isDot = true;
9750     break;
9751   case Intrinsic::ppc_altivec_vcmpequd_p:
9752     if (Subtarget.hasP8Altivec()) {
9753       CompareOpc = 199;
9754       isDot = true;
9755     } else
9756       return false;
9757     break;
9758   case Intrinsic::ppc_altivec_vcmpneb_p:
9759   case Intrinsic::ppc_altivec_vcmpneh_p:
9760   case Intrinsic::ppc_altivec_vcmpnew_p:
9761   case Intrinsic::ppc_altivec_vcmpnezb_p:
9762   case Intrinsic::ppc_altivec_vcmpnezh_p:
9763   case Intrinsic::ppc_altivec_vcmpnezw_p:
9764     if (Subtarget.hasP9Altivec()) {
9765       switch (IntrinsicID) {
9766       default:
9767         llvm_unreachable("Unknown comparison intrinsic.");
9768       case Intrinsic::ppc_altivec_vcmpneb_p:
9769         CompareOpc = 7;
9770         break;
9771       case Intrinsic::ppc_altivec_vcmpneh_p:
9772         CompareOpc = 71;
9773         break;
9774       case Intrinsic::ppc_altivec_vcmpnew_p:
9775         CompareOpc = 135;
9776         break;
9777       case Intrinsic::ppc_altivec_vcmpnezb_p:
9778         CompareOpc = 263;
9779         break;
9780       case Intrinsic::ppc_altivec_vcmpnezh_p:
9781         CompareOpc = 327;
9782         break;
9783       case Intrinsic::ppc_altivec_vcmpnezw_p:
9784         CompareOpc = 391;
9785         break;
9786       }
9787       isDot = true;
9788     } else
9789       return false;
9790     break;
9791   case Intrinsic::ppc_altivec_vcmpgefp_p:
9792     CompareOpc = 454;
9793     isDot = true;
9794     break;
9795   case Intrinsic::ppc_altivec_vcmpgtfp_p:
9796     CompareOpc = 710;
9797     isDot = true;
9798     break;
9799   case Intrinsic::ppc_altivec_vcmpgtsb_p:
9800     CompareOpc = 774;
9801     isDot = true;
9802     break;
9803   case Intrinsic::ppc_altivec_vcmpgtsh_p:
9804     CompareOpc = 838;
9805     isDot = true;
9806     break;
9807   case Intrinsic::ppc_altivec_vcmpgtsw_p:
9808     CompareOpc = 902;
9809     isDot = true;
9810     break;
9811   case Intrinsic::ppc_altivec_vcmpgtsd_p:
9812     if (Subtarget.hasP8Altivec()) {
9813       CompareOpc = 967;
9814       isDot = true;
9815     } else
9816       return false;
9817     break;
9818   case Intrinsic::ppc_altivec_vcmpgtub_p:
9819     CompareOpc = 518;
9820     isDot = true;
9821     break;
9822   case Intrinsic::ppc_altivec_vcmpgtuh_p:
9823     CompareOpc = 582;
9824     isDot = true;
9825     break;
9826   case Intrinsic::ppc_altivec_vcmpgtuw_p:
9827     CompareOpc = 646;
9828     isDot = true;
9829     break;
9830   case Intrinsic::ppc_altivec_vcmpgtud_p:
9831     if (Subtarget.hasP8Altivec()) {
9832       CompareOpc = 711;
9833       isDot = true;
9834     } else
9835       return false;
9836     break;
9837 
9838   // VSX predicate comparisons use the same infrastructure
9839   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9840   case Intrinsic::ppc_vsx_xvcmpgedp_p:
9841   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9842   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9843   case Intrinsic::ppc_vsx_xvcmpgesp_p:
9844   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9845     if (Subtarget.hasVSX()) {
9846       switch (IntrinsicID) {
9847       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9848         CompareOpc = 99;
9849         break;
9850       case Intrinsic::ppc_vsx_xvcmpgedp_p:
9851         CompareOpc = 115;
9852         break;
9853       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9854         CompareOpc = 107;
9855         break;
9856       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9857         CompareOpc = 67;
9858         break;
9859       case Intrinsic::ppc_vsx_xvcmpgesp_p:
9860         CompareOpc = 83;
9861         break;
9862       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9863         CompareOpc = 75;
9864         break;
9865       }
9866       isDot = true;
9867     } else
9868       return false;
9869     break;
9870 
9871   // Normal Comparisons.
9872   case Intrinsic::ppc_altivec_vcmpbfp:
9873     CompareOpc = 966;
9874     break;
9875   case Intrinsic::ppc_altivec_vcmpeqfp:
9876     CompareOpc = 198;
9877     break;
9878   case Intrinsic::ppc_altivec_vcmpequb:
9879     CompareOpc = 6;
9880     break;
9881   case Intrinsic::ppc_altivec_vcmpequh:
9882     CompareOpc = 70;
9883     break;
9884   case Intrinsic::ppc_altivec_vcmpequw:
9885     CompareOpc = 134;
9886     break;
9887   case Intrinsic::ppc_altivec_vcmpequd:
9888     if (Subtarget.hasP8Altivec())
9889       CompareOpc = 199;
9890     else
9891       return false;
9892     break;
9893   case Intrinsic::ppc_altivec_vcmpneb:
9894   case Intrinsic::ppc_altivec_vcmpneh:
9895   case Intrinsic::ppc_altivec_vcmpnew:
9896   case Intrinsic::ppc_altivec_vcmpnezb:
9897   case Intrinsic::ppc_altivec_vcmpnezh:
9898   case Intrinsic::ppc_altivec_vcmpnezw:
9899     if (Subtarget.hasP9Altivec())
9900       switch (IntrinsicID) {
9901       default:
9902         llvm_unreachable("Unknown comparison intrinsic.");
9903       case Intrinsic::ppc_altivec_vcmpneb:
9904         CompareOpc = 7;
9905         break;
9906       case Intrinsic::ppc_altivec_vcmpneh:
9907         CompareOpc = 71;
9908         break;
9909       case Intrinsic::ppc_altivec_vcmpnew:
9910         CompareOpc = 135;
9911         break;
9912       case Intrinsic::ppc_altivec_vcmpnezb:
9913         CompareOpc = 263;
9914         break;
9915       case Intrinsic::ppc_altivec_vcmpnezh:
9916         CompareOpc = 327;
9917         break;
9918       case Intrinsic::ppc_altivec_vcmpnezw:
9919         CompareOpc = 391;
9920         break;
9921       }
9922     else
9923       return false;
9924     break;
9925   case Intrinsic::ppc_altivec_vcmpgefp:
9926     CompareOpc = 454;
9927     break;
9928   case Intrinsic::ppc_altivec_vcmpgtfp:
9929     CompareOpc = 710;
9930     break;
9931   case Intrinsic::ppc_altivec_vcmpgtsb:
9932     CompareOpc = 774;
9933     break;
9934   case Intrinsic::ppc_altivec_vcmpgtsh:
9935     CompareOpc = 838;
9936     break;
9937   case Intrinsic::ppc_altivec_vcmpgtsw:
9938     CompareOpc = 902;
9939     break;
9940   case Intrinsic::ppc_altivec_vcmpgtsd:
9941     if (Subtarget.hasP8Altivec())
9942       CompareOpc = 967;
9943     else
9944       return false;
9945     break;
9946   case Intrinsic::ppc_altivec_vcmpgtub:
9947     CompareOpc = 518;
9948     break;
9949   case Intrinsic::ppc_altivec_vcmpgtuh:
9950     CompareOpc = 582;
9951     break;
9952   case Intrinsic::ppc_altivec_vcmpgtuw:
9953     CompareOpc = 646;
9954     break;
9955   case Intrinsic::ppc_altivec_vcmpgtud:
9956     if (Subtarget.hasP8Altivec())
9957       CompareOpc = 711;
9958     else
9959       return false;
9960     break;
9961   }
9962   return true;
9963 }
9964 
9965 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
9966 /// lower, do it, otherwise return null.
9967 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
9968                                                    SelectionDAG &DAG) const {
9969   unsigned IntrinsicID =
9970     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
9971 
9972   SDLoc dl(Op);
9973 
9974   if (IntrinsicID == Intrinsic::thread_pointer) {
9975     // Reads the thread pointer register, used for __builtin_thread_pointer.
9976     if (Subtarget.isPPC64())
9977       return DAG.getRegister(PPC::X13, MVT::i64);
9978     return DAG.getRegister(PPC::R2, MVT::i32);
9979   }
9980 
9981   // If this is a lowered altivec predicate compare, CompareOpc is set to the
9982   // opcode number of the comparison.
9983   int CompareOpc;
9984   bool isDot;
9985   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
9986     return SDValue();    // Don't custom lower most intrinsics.
9987 
9988   // If this is a non-dot comparison, make the VCMP node and we are done.
9989   if (!isDot) {
9990     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
9991                               Op.getOperand(1), Op.getOperand(2),
9992                               DAG.getConstant(CompareOpc, dl, MVT::i32));
9993     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
9994   }
9995 
9996   // Create the PPCISD altivec 'dot' comparison node.
9997   SDValue Ops[] = {
9998     Op.getOperand(2),  // LHS
9999     Op.getOperand(3),  // RHS
10000     DAG.getConstant(CompareOpc, dl, MVT::i32)
10001   };
10002   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10003   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
10004 
10005   // Now that we have the comparison, emit a copy from the CR to a GPR.
10006   // This is flagged to the above dot comparison.
10007   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10008                                 DAG.getRegister(PPC::CR6, MVT::i32),
10009                                 CompNode.getValue(1));
10010 
10011   // Unpack the result based on how the target uses it.
10012   unsigned BitNo;   // Bit # of CR6.
10013   bool InvertBit;   // Invert result?
10014   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10015   default:  // Can't happen, don't crash on invalid number though.
10016   case 0:   // Return the value of the EQ bit of CR6.
10017     BitNo = 0; InvertBit = false;
10018     break;
10019   case 1:   // Return the inverted value of the EQ bit of CR6.
10020     BitNo = 0; InvertBit = true;
10021     break;
10022   case 2:   // Return the value of the LT bit of CR6.
10023     BitNo = 2; InvertBit = false;
10024     break;
10025   case 3:   // Return the inverted value of the LT bit of CR6.
10026     BitNo = 2; InvertBit = true;
10027     break;
10028   }
10029 
10030   // Shift the bit into the low position.
10031   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10032                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10033   // Isolate the bit.
10034   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10035                       DAG.getConstant(1, dl, MVT::i32));
10036 
10037   // If we are supposed to, toggle the bit.
10038   if (InvertBit)
10039     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10040                         DAG.getConstant(1, dl, MVT::i32));
10041   return Flags;
10042 }
10043 
10044 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10045                                                SelectionDAG &DAG) const {
10046   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10047   // the beginning of the argument list.
10048   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10049   SDLoc DL(Op);
10050   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10051   case Intrinsic::ppc_cfence: {
10052     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10053     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10054     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10055                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10056                                                   Op.getOperand(ArgStart + 1)),
10057                                       Op.getOperand(0)),
10058                    0);
10059   }
10060   default:
10061     break;
10062   }
10063   return SDValue();
10064 }
10065 
10066 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const {
10067   // Check for a DIV with the same operands as this REM.
10068   for (auto UI : Op.getOperand(1)->uses()) {
10069     if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) ||
10070         (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV))
10071       if (UI->getOperand(0) == Op.getOperand(0) &&
10072           UI->getOperand(1) == Op.getOperand(1))
10073         return SDValue();
10074   }
10075   return Op;
10076 }
10077 
10078 // Lower scalar BSWAP64 to xxbrd.
10079 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10080   SDLoc dl(Op);
10081   // MTVSRDD
10082   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10083                    Op.getOperand(0));
10084   // XXBRD
10085   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10086   // MFVSRD
10087   int VectorIndex = 0;
10088   if (Subtarget.isLittleEndian())
10089     VectorIndex = 1;
10090   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10091                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10092   return Op;
10093 }
10094 
10095 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10096 // compared to a value that is atomically loaded (atomic loads zero-extend).
10097 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10098                                                 SelectionDAG &DAG) const {
10099   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10100          "Expecting an atomic compare-and-swap here.");
10101   SDLoc dl(Op);
10102   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10103   EVT MemVT = AtomicNode->getMemoryVT();
10104   if (MemVT.getSizeInBits() >= 32)
10105     return Op;
10106 
10107   SDValue CmpOp = Op.getOperand(2);
10108   // If this is already correctly zero-extended, leave it alone.
10109   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10110   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10111     return Op;
10112 
10113   // Clear the high bits of the compare operand.
10114   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10115   SDValue NewCmpOp =
10116     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10117                 DAG.getConstant(MaskVal, dl, MVT::i32));
10118 
10119   // Replace the existing compare operand with the properly zero-extended one.
10120   SmallVector<SDValue, 4> Ops;
10121   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10122     Ops.push_back(AtomicNode->getOperand(i));
10123   Ops[2] = NewCmpOp;
10124   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10125   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10126   auto NodeTy =
10127     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10128   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10129 }
10130 
10131 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10132                                                  SelectionDAG &DAG) const {
10133   SDLoc dl(Op);
10134   // Create a stack slot that is 16-byte aligned.
10135   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10136   int FrameIdx = MFI.CreateStackObject(16, 16, false);
10137   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10138   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10139 
10140   // Store the input value into Value#0 of the stack slot.
10141   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10142                                MachinePointerInfo());
10143   // Load it out.
10144   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10145 }
10146 
10147 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10148                                                   SelectionDAG &DAG) const {
10149   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10150          "Should only be called for ISD::INSERT_VECTOR_ELT");
10151 
10152   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10153   // We have legal lowering for constant indices but not for variable ones.
10154   if (!C)
10155     return SDValue();
10156 
10157   EVT VT = Op.getValueType();
10158   SDLoc dl(Op);
10159   SDValue V1 = Op.getOperand(0);
10160   SDValue V2 = Op.getOperand(1);
10161   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10162   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10163     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10164     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10165     unsigned InsertAtElement = C->getZExtValue();
10166     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10167     if (Subtarget.isLittleEndian()) {
10168       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10169     }
10170     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10171                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10172   }
10173   return Op;
10174 }
10175 
10176 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
10177                                                    SelectionDAG &DAG) const {
10178   SDLoc dl(Op);
10179   SDNode *N = Op.getNode();
10180 
10181   assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
10182          "Unknown extract_vector_elt type");
10183 
10184   SDValue Value = N->getOperand(0);
10185 
10186   // The first part of this is like the store lowering except that we don't
10187   // need to track the chain.
10188 
10189   // The values are now known to be -1 (false) or 1 (true). To convert this
10190   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10191   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10192   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10193 
10194   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10195   // understand how to form the extending load.
10196   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10197 
10198   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10199 
10200   // Now convert to an integer and store.
10201   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10202     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10203     Value);
10204 
10205   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10206   int FrameIdx = MFI.CreateStackObject(16, 16, false);
10207   MachinePointerInfo PtrInfo =
10208       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10209   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10210   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10211 
10212   SDValue StoreChain = DAG.getEntryNode();
10213   SDValue Ops[] = {StoreChain,
10214                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10215                    Value, FIdx};
10216   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10217 
10218   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10219     dl, VTs, Ops, MVT::v4i32, PtrInfo);
10220 
10221   // Extract the value requested.
10222   unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10223   SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10224   Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10225 
10226   SDValue IntVal =
10227       DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
10228 
10229   if (!Subtarget.useCRBits())
10230     return IntVal;
10231 
10232   return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
10233 }
10234 
10235 /// Lowering for QPX v4i1 loads
10236 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10237                                            SelectionDAG &DAG) const {
10238   SDLoc dl(Op);
10239   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10240   SDValue LoadChain = LN->getChain();
10241   SDValue BasePtr = LN->getBasePtr();
10242 
10243   if (Op.getValueType() == MVT::v4f64 ||
10244       Op.getValueType() == MVT::v4f32) {
10245     EVT MemVT = LN->getMemoryVT();
10246     unsigned Alignment = LN->getAlignment();
10247 
10248     // If this load is properly aligned, then it is legal.
10249     if (Alignment >= MemVT.getStoreSize())
10250       return Op;
10251 
10252     EVT ScalarVT = Op.getValueType().getScalarType(),
10253         ScalarMemVT = MemVT.getScalarType();
10254     unsigned Stride = ScalarMemVT.getStoreSize();
10255 
10256     SDValue Vals[4], LoadChains[4];
10257     for (unsigned Idx = 0; Idx < 4; ++Idx) {
10258       SDValue Load;
10259       if (ScalarVT != ScalarMemVT)
10260         Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
10261                               BasePtr,
10262                               LN->getPointerInfo().getWithOffset(Idx * Stride),
10263                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10264                               LN->getMemOperand()->getFlags(), LN->getAAInfo());
10265       else
10266         Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
10267                            LN->getPointerInfo().getWithOffset(Idx * Stride),
10268                            MinAlign(Alignment, Idx * Stride),
10269                            LN->getMemOperand()->getFlags(), LN->getAAInfo());
10270 
10271       if (Idx == 0 && LN->isIndexed()) {
10272         assert(LN->getAddressingMode() == ISD::PRE_INC &&
10273                "Unknown addressing mode on vector load");
10274         Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
10275                                   LN->getAddressingMode());
10276       }
10277 
10278       Vals[Idx] = Load;
10279       LoadChains[Idx] = Load.getValue(1);
10280 
10281       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10282                             DAG.getConstant(Stride, dl,
10283                                             BasePtr.getValueType()));
10284     }
10285 
10286     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10287     SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
10288 
10289     if (LN->isIndexed()) {
10290       SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
10291       return DAG.getMergeValues(RetOps, dl);
10292     }
10293 
10294     SDValue RetOps[] = { Value, TF };
10295     return DAG.getMergeValues(RetOps, dl);
10296   }
10297 
10298   assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
10299   assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
10300 
10301   // To lower v4i1 from a byte array, we load the byte elements of the
10302   // vector and then reuse the BUILD_VECTOR logic.
10303 
10304   SDValue VectElmts[4], VectElmtChains[4];
10305   for (unsigned i = 0; i < 4; ++i) {
10306     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10307     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10308 
10309     VectElmts[i] = DAG.getExtLoad(
10310         ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
10311         LN->getPointerInfo().getWithOffset(i), MVT::i8,
10312         /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
10313     VectElmtChains[i] = VectElmts[i].getValue(1);
10314   }
10315 
10316   LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
10317   SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
10318 
10319   SDValue RVals[] = { Value, LoadChain };
10320   return DAG.getMergeValues(RVals, dl);
10321 }
10322 
10323 /// Lowering for QPX v4i1 stores
10324 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10325                                             SelectionDAG &DAG) const {
10326   SDLoc dl(Op);
10327   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10328   SDValue StoreChain = SN->getChain();
10329   SDValue BasePtr = SN->getBasePtr();
10330   SDValue Value = SN->getValue();
10331 
10332   if (Value.getValueType() == MVT::v4f64 ||
10333       Value.getValueType() == MVT::v4f32) {
10334     EVT MemVT = SN->getMemoryVT();
10335     unsigned Alignment = SN->getAlignment();
10336 
10337     // If this store is properly aligned, then it is legal.
10338     if (Alignment >= MemVT.getStoreSize())
10339       return Op;
10340 
10341     EVT ScalarVT = Value.getValueType().getScalarType(),
10342         ScalarMemVT = MemVT.getScalarType();
10343     unsigned Stride = ScalarMemVT.getStoreSize();
10344 
10345     SDValue Stores[4];
10346     for (unsigned Idx = 0; Idx < 4; ++Idx) {
10347       SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
10348                                DAG.getVectorIdxConstant(Idx, dl));
10349       SDValue Store;
10350       if (ScalarVT != ScalarMemVT)
10351         Store =
10352             DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
10353                               SN->getPointerInfo().getWithOffset(Idx * Stride),
10354                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10355                               SN->getMemOperand()->getFlags(), SN->getAAInfo());
10356       else
10357         Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
10358                              SN->getPointerInfo().getWithOffset(Idx * Stride),
10359                              MinAlign(Alignment, Idx * Stride),
10360                              SN->getMemOperand()->getFlags(), SN->getAAInfo());
10361 
10362       if (Idx == 0 && SN->isIndexed()) {
10363         assert(SN->getAddressingMode() == ISD::PRE_INC &&
10364                "Unknown addressing mode on vector store");
10365         Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
10366                                     SN->getAddressingMode());
10367       }
10368 
10369       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10370                             DAG.getConstant(Stride, dl,
10371                                             BasePtr.getValueType()));
10372       Stores[Idx] = Store;
10373     }
10374 
10375     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10376 
10377     if (SN->isIndexed()) {
10378       SDValue RetOps[] = { TF, Stores[0].getValue(1) };
10379       return DAG.getMergeValues(RetOps, dl);
10380     }
10381 
10382     return TF;
10383   }
10384 
10385   assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
10386   assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
10387 
10388   // The values are now known to be -1 (false) or 1 (true). To convert this
10389   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10390   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10391   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10392 
10393   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10394   // understand how to form the extending load.
10395   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10396 
10397   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10398 
10399   // Now convert to an integer and store.
10400   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10401     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10402     Value);
10403 
10404   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10405   int FrameIdx = MFI.CreateStackObject(16, 16, false);
10406   MachinePointerInfo PtrInfo =
10407       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10408   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10409   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10410 
10411   SDValue Ops[] = {StoreChain,
10412                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10413                    Value, FIdx};
10414   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10415 
10416   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10417     dl, VTs, Ops, MVT::v4i32, PtrInfo);
10418 
10419   // Move data into the byte array.
10420   SDValue Loads[4], LoadChains[4];
10421   for (unsigned i = 0; i < 4; ++i) {
10422     unsigned Offset = 4*i;
10423     SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10424     Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10425 
10426     Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
10427                            PtrInfo.getWithOffset(Offset));
10428     LoadChains[i] = Loads[i].getValue(1);
10429   }
10430 
10431   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10432 
10433   SDValue Stores[4];
10434   for (unsigned i = 0; i < 4; ++i) {
10435     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10436     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10437 
10438     Stores[i] = DAG.getTruncStore(
10439         StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
10440         MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
10441         SN->getAAInfo());
10442   }
10443 
10444   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10445 
10446   return StoreChain;
10447 }
10448 
10449 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10450   SDLoc dl(Op);
10451   if (Op.getValueType() == MVT::v4i32) {
10452     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10453 
10454     SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
10455     SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
10456 
10457     SDValue RHSSwap =   // = vrlw RHS, 16
10458       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10459 
10460     // Shrinkify inputs to v8i16.
10461     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10462     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10463     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10464 
10465     // Low parts multiplied together, generating 32-bit results (we ignore the
10466     // top parts).
10467     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10468                                         LHS, RHS, DAG, dl, MVT::v4i32);
10469 
10470     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10471                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10472     // Shift the high parts up 16 bits.
10473     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10474                               Neg16, DAG, dl);
10475     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10476   } else if (Op.getValueType() == MVT::v16i8) {
10477     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10478     bool isLittleEndian = Subtarget.isLittleEndian();
10479 
10480     // Multiply the even 8-bit parts, producing 16-bit sums.
10481     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10482                                            LHS, RHS, DAG, dl, MVT::v8i16);
10483     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10484 
10485     // Multiply the odd 8-bit parts, producing 16-bit sums.
10486     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10487                                           LHS, RHS, DAG, dl, MVT::v8i16);
10488     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10489 
10490     // Merge the results together.  Because vmuleub and vmuloub are
10491     // instructions with a big-endian bias, we must reverse the
10492     // element numbering and reverse the meaning of "odd" and "even"
10493     // when generating little endian code.
10494     int Ops[16];
10495     for (unsigned i = 0; i != 8; ++i) {
10496       if (isLittleEndian) {
10497         Ops[i*2  ] = 2*i;
10498         Ops[i*2+1] = 2*i+16;
10499       } else {
10500         Ops[i*2  ] = 2*i+1;
10501         Ops[i*2+1] = 2*i+1+16;
10502       }
10503     }
10504     if (isLittleEndian)
10505       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10506     else
10507       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10508   } else {
10509     llvm_unreachable("Unknown mul to lower!");
10510   }
10511 }
10512 
10513 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
10514 
10515   assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
10516 
10517   EVT VT = Op.getValueType();
10518   assert(VT.isVector() &&
10519          "Only set vector abs as custom, scalar abs shouldn't reach here!");
10520   assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
10521           VT == MVT::v16i8) &&
10522          "Unexpected vector element type!");
10523   assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
10524          "Current subtarget doesn't support smax v2i64!");
10525 
10526   // For vector abs, it can be lowered to:
10527   // abs x
10528   // ==>
10529   // y = -x
10530   // smax(x, y)
10531 
10532   SDLoc dl(Op);
10533   SDValue X = Op.getOperand(0);
10534   SDValue Zero = DAG.getConstant(0, dl, VT);
10535   SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
10536 
10537   // SMAX patch https://reviews.llvm.org/D47332
10538   // hasn't landed yet, so use intrinsic first here.
10539   // TODO: Should use SMAX directly once SMAX patch landed
10540   Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
10541   if (VT == MVT::v2i64)
10542     BifID = Intrinsic::ppc_altivec_vmaxsd;
10543   else if (VT == MVT::v8i16)
10544     BifID = Intrinsic::ppc_altivec_vmaxsh;
10545   else if (VT == MVT::v16i8)
10546     BifID = Intrinsic::ppc_altivec_vmaxsb;
10547 
10548   return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
10549 }
10550 
10551 // Custom lowering for fpext vf32 to v2f64
10552 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10553 
10554   assert(Op.getOpcode() == ISD::FP_EXTEND &&
10555          "Should only be called for ISD::FP_EXTEND");
10556 
10557   // We only want to custom lower an extend from v2f32 to v2f64.
10558   if (Op.getValueType() != MVT::v2f64 ||
10559       Op.getOperand(0).getValueType() != MVT::v2f32)
10560     return SDValue();
10561 
10562   SDLoc dl(Op);
10563   SDValue Op0 = Op.getOperand(0);
10564 
10565   switch (Op0.getOpcode()) {
10566   default:
10567     return SDValue();
10568   case ISD::EXTRACT_SUBVECTOR: {
10569     assert(Op0.getNumOperands() == 2 &&
10570            isa<ConstantSDNode>(Op0->getOperand(1)) &&
10571            "Node should have 2 operands with second one being a constant!");
10572 
10573     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10574       return SDValue();
10575 
10576     // Custom lower is only done for high or low doubleword.
10577     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10578     if (Idx % 2 != 0)
10579       return SDValue();
10580 
10581     // Since input is v4f32, at this point Idx is either 0 or 2.
10582     // Shift to get the doubleword position we want.
10583     int DWord = Idx >> 1;
10584 
10585     // High and low word positions are different on little endian.
10586     if (Subtarget.isLittleEndian())
10587       DWord ^= 0x1;
10588 
10589     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10590                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10591   }
10592   case ISD::FADD:
10593   case ISD::FMUL:
10594   case ISD::FSUB: {
10595     SDValue NewLoad[2];
10596     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10597       // Ensure both input are loads.
10598       SDValue LdOp = Op0.getOperand(i);
10599       if (LdOp.getOpcode() != ISD::LOAD)
10600         return SDValue();
10601       // Generate new load node.
10602       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10603       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10604       NewLoad[i] = DAG.getMemIntrinsicNode(
10605           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10606           LD->getMemoryVT(), LD->getMemOperand());
10607     }
10608     SDValue NewOp =
10609         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10610                     NewLoad[1], Op0.getNode()->getFlags());
10611     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10612                        DAG.getConstant(0, dl, MVT::i32));
10613   }
10614   case ISD::LOAD: {
10615     LoadSDNode *LD = cast<LoadSDNode>(Op0);
10616     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10617     SDValue NewLd = DAG.getMemIntrinsicNode(
10618         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10619         LD->getMemoryVT(), LD->getMemOperand());
10620     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10621                        DAG.getConstant(0, dl, MVT::i32));
10622   }
10623   }
10624   llvm_unreachable("ERROR:Should return for all cases within swtich.");
10625 }
10626 
10627 /// LowerOperation - Provide custom lowering hooks for some operations.
10628 ///
10629 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10630   switch (Op.getOpcode()) {
10631   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10632   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
10633   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
10634   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
10635   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
10636   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
10637   case ISD::SETCC:              return LowerSETCC(Op, DAG);
10638   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
10639   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
10640 
10641   // Variable argument lowering.
10642   case ISD::VASTART:            return LowerVASTART(Op, DAG);
10643   case ISD::VAARG:              return LowerVAARG(Op, DAG);
10644   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
10645 
10646   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
10647   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10648   case ISD::GET_DYNAMIC_AREA_OFFSET:
10649     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10650 
10651   // Exception handling lowering.
10652   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
10653   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
10654   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
10655 
10656   case ISD::LOAD:               return LowerLOAD(Op, DAG);
10657   case ISD::STORE:              return LowerSTORE(Op, DAG);
10658   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
10659   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
10660   case ISD::FP_TO_UINT:
10661   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10662   case ISD::UINT_TO_FP:
10663   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
10664   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
10665 
10666   // Lower 64-bit shifts.
10667   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
10668   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
10669   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
10670 
10671   // Vector-related lowering.
10672   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
10673   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
10674   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10675   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
10676   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
10677   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
10678   case ISD::MUL:                return LowerMUL(Op, DAG);
10679   case ISD::ABS:                return LowerABS(Op, DAG);
10680   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
10681 
10682   // For counter-based loop handling.
10683   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
10684 
10685   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
10686 
10687   // Frame & Return address.
10688   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
10689   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
10690 
10691   case ISD::INTRINSIC_VOID:
10692     return LowerINTRINSIC_VOID(Op, DAG);
10693   case ISD::SREM:
10694   case ISD::UREM:
10695     return LowerREM(Op, DAG);
10696   case ISD::BSWAP:
10697     return LowerBSWAP(Op, DAG);
10698   case ISD::ATOMIC_CMP_SWAP:
10699     return LowerATOMIC_CMP_SWAP(Op, DAG);
10700   }
10701 }
10702 
10703 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10704                                            SmallVectorImpl<SDValue>&Results,
10705                                            SelectionDAG &DAG) const {
10706   SDLoc dl(N);
10707   switch (N->getOpcode()) {
10708   default:
10709     llvm_unreachable("Do not know how to custom type legalize this operation!");
10710   case ISD::READCYCLECOUNTER: {
10711     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10712     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10713 
10714     Results.push_back(
10715         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
10716     Results.push_back(RTB.getValue(2));
10717     break;
10718   }
10719   case ISD::INTRINSIC_W_CHAIN: {
10720     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10721         Intrinsic::loop_decrement)
10722       break;
10723 
10724     assert(N->getValueType(0) == MVT::i1 &&
10725            "Unexpected result type for CTR decrement intrinsic");
10726     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10727                                  N->getValueType(0));
10728     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10729     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10730                                  N->getOperand(1));
10731 
10732     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10733     Results.push_back(NewInt.getValue(1));
10734     break;
10735   }
10736   case ISD::VAARG: {
10737     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10738       return;
10739 
10740     EVT VT = N->getValueType(0);
10741 
10742     if (VT == MVT::i64) {
10743       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10744 
10745       Results.push_back(NewNode);
10746       Results.push_back(NewNode.getValue(1));
10747     }
10748     return;
10749   }
10750   case ISD::FP_TO_SINT:
10751   case ISD::FP_TO_UINT:
10752     // LowerFP_TO_INT() can only handle f32 and f64.
10753     if (N->getOperand(0).getValueType() == MVT::ppcf128)
10754       return;
10755     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10756     return;
10757   case ISD::TRUNCATE: {
10758     EVT TrgVT = N->getValueType(0);
10759     EVT OpVT = N->getOperand(0).getValueType();
10760     if (TrgVT.isVector() &&
10761         isOperationCustom(N->getOpcode(), TrgVT) &&
10762         OpVT.getSizeInBits() <= 128 &&
10763         isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits()))
10764       Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG));
10765     return;
10766   }
10767   case ISD::BITCAST:
10768     // Don't handle bitcast here.
10769     return;
10770   }
10771 }
10772 
10773 //===----------------------------------------------------------------------===//
10774 //  Other Lowering Code
10775 //===----------------------------------------------------------------------===//
10776 
10777 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10778   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10779   Function *Func = Intrinsic::getDeclaration(M, Id);
10780   return Builder.CreateCall(Func, {});
10781 }
10782 
10783 // The mappings for emitLeading/TrailingFence is taken from
10784 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10785 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10786                                                  Instruction *Inst,
10787                                                  AtomicOrdering Ord) const {
10788   if (Ord == AtomicOrdering::SequentiallyConsistent)
10789     return callIntrinsic(Builder, Intrinsic::ppc_sync);
10790   if (isReleaseOrStronger(Ord))
10791     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10792   return nullptr;
10793 }
10794 
10795 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10796                                                   Instruction *Inst,
10797                                                   AtomicOrdering Ord) const {
10798   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10799     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10800     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10801     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10802     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10803       return Builder.CreateCall(
10804           Intrinsic::getDeclaration(
10805               Builder.GetInsertBlock()->getParent()->getParent(),
10806               Intrinsic::ppc_cfence, {Inst->getType()}),
10807           {Inst});
10808     // FIXME: Can use isync for rmw operation.
10809     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10810   }
10811   return nullptr;
10812 }
10813 
10814 MachineBasicBlock *
10815 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10816                                     unsigned AtomicSize,
10817                                     unsigned BinOpcode,
10818                                     unsigned CmpOpcode,
10819                                     unsigned CmpPred) const {
10820   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10821   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10822 
10823   auto LoadMnemonic = PPC::LDARX;
10824   auto StoreMnemonic = PPC::STDCX;
10825   switch (AtomicSize) {
10826   default:
10827     llvm_unreachable("Unexpected size of atomic entity");
10828   case 1:
10829     LoadMnemonic = PPC::LBARX;
10830     StoreMnemonic = PPC::STBCX;
10831     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10832     break;
10833   case 2:
10834     LoadMnemonic = PPC::LHARX;
10835     StoreMnemonic = PPC::STHCX;
10836     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10837     break;
10838   case 4:
10839     LoadMnemonic = PPC::LWARX;
10840     StoreMnemonic = PPC::STWCX;
10841     break;
10842   case 8:
10843     LoadMnemonic = PPC::LDARX;
10844     StoreMnemonic = PPC::STDCX;
10845     break;
10846   }
10847 
10848   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10849   MachineFunction *F = BB->getParent();
10850   MachineFunction::iterator It = ++BB->getIterator();
10851 
10852   Register dest = MI.getOperand(0).getReg();
10853   Register ptrA = MI.getOperand(1).getReg();
10854   Register ptrB = MI.getOperand(2).getReg();
10855   Register incr = MI.getOperand(3).getReg();
10856   DebugLoc dl = MI.getDebugLoc();
10857 
10858   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10859   MachineBasicBlock *loop2MBB =
10860     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10861   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10862   F->insert(It, loopMBB);
10863   if (CmpOpcode)
10864     F->insert(It, loop2MBB);
10865   F->insert(It, exitMBB);
10866   exitMBB->splice(exitMBB->begin(), BB,
10867                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10868   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10869 
10870   MachineRegisterInfo &RegInfo = F->getRegInfo();
10871   Register TmpReg = (!BinOpcode) ? incr :
10872     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10873                                            : &PPC::GPRCRegClass);
10874 
10875   //  thisMBB:
10876   //   ...
10877   //   fallthrough --> loopMBB
10878   BB->addSuccessor(loopMBB);
10879 
10880   //  loopMBB:
10881   //   l[wd]arx dest, ptr
10882   //   add r0, dest, incr
10883   //   st[wd]cx. r0, ptr
10884   //   bne- loopMBB
10885   //   fallthrough --> exitMBB
10886 
10887   // For max/min...
10888   //  loopMBB:
10889   //   l[wd]arx dest, ptr
10890   //   cmpl?[wd] incr, dest
10891   //   bgt exitMBB
10892   //  loop2MBB:
10893   //   st[wd]cx. dest, ptr
10894   //   bne- loopMBB
10895   //   fallthrough --> exitMBB
10896 
10897   BB = loopMBB;
10898   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10899     .addReg(ptrA).addReg(ptrB);
10900   if (BinOpcode)
10901     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10902   if (CmpOpcode) {
10903     // Signed comparisons of byte or halfword values must be sign-extended.
10904     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10905       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10906       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10907               ExtReg).addReg(dest);
10908       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10909         .addReg(incr).addReg(ExtReg);
10910     } else
10911       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10912         .addReg(incr).addReg(dest);
10913 
10914     BuildMI(BB, dl, TII->get(PPC::BCC))
10915       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
10916     BB->addSuccessor(loop2MBB);
10917     BB->addSuccessor(exitMBB);
10918     BB = loop2MBB;
10919   }
10920   BuildMI(BB, dl, TII->get(StoreMnemonic))
10921     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
10922   BuildMI(BB, dl, TII->get(PPC::BCC))
10923     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
10924   BB->addSuccessor(loopMBB);
10925   BB->addSuccessor(exitMBB);
10926 
10927   //  exitMBB:
10928   //   ...
10929   BB = exitMBB;
10930   return BB;
10931 }
10932 
10933 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
10934     MachineInstr &MI, MachineBasicBlock *BB,
10935     bool is8bit, // operation
10936     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
10937   // If we support part-word atomic mnemonics, just use them
10938   if (Subtarget.hasPartwordAtomics())
10939     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
10940                             CmpPred);
10941 
10942   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10943   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10944   // In 64 bit mode we have to use 64 bits for addresses, even though the
10945   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
10946   // registers without caring whether they're 32 or 64, but here we're
10947   // doing actual arithmetic on the addresses.
10948   bool is64bit = Subtarget.isPPC64();
10949   bool isLittleEndian = Subtarget.isLittleEndian();
10950   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10951 
10952   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10953   MachineFunction *F = BB->getParent();
10954   MachineFunction::iterator It = ++BB->getIterator();
10955 
10956   Register dest = MI.getOperand(0).getReg();
10957   Register ptrA = MI.getOperand(1).getReg();
10958   Register ptrB = MI.getOperand(2).getReg();
10959   Register incr = MI.getOperand(3).getReg();
10960   DebugLoc dl = MI.getDebugLoc();
10961 
10962   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10963   MachineBasicBlock *loop2MBB =
10964       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10965   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10966   F->insert(It, loopMBB);
10967   if (CmpOpcode)
10968     F->insert(It, loop2MBB);
10969   F->insert(It, exitMBB);
10970   exitMBB->splice(exitMBB->begin(), BB,
10971                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10972   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10973 
10974   MachineRegisterInfo &RegInfo = F->getRegInfo();
10975   const TargetRegisterClass *RC =
10976       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10977   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
10978 
10979   Register PtrReg = RegInfo.createVirtualRegister(RC);
10980   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
10981   Register ShiftReg =
10982       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
10983   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
10984   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
10985   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
10986   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
10987   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
10988   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
10989   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
10990   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
10991   Register Ptr1Reg;
10992   Register TmpReg =
10993       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
10994 
10995   //  thisMBB:
10996   //   ...
10997   //   fallthrough --> loopMBB
10998   BB->addSuccessor(loopMBB);
10999 
11000   // The 4-byte load must be aligned, while a char or short may be
11001   // anywhere in the word.  Hence all this nasty bookkeeping code.
11002   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11003   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11004   //   xori shift, shift1, 24 [16]
11005   //   rlwinm ptr, ptr1, 0, 0, 29
11006   //   slw incr2, incr, shift
11007   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11008   //   slw mask, mask2, shift
11009   //  loopMBB:
11010   //   lwarx tmpDest, ptr
11011   //   add tmp, tmpDest, incr2
11012   //   andc tmp2, tmpDest, mask
11013   //   and tmp3, tmp, mask
11014   //   or tmp4, tmp3, tmp2
11015   //   stwcx. tmp4, ptr
11016   //   bne- loopMBB
11017   //   fallthrough --> exitMBB
11018   //   srw dest, tmpDest, shift
11019   if (ptrA != ZeroReg) {
11020     Ptr1Reg = RegInfo.createVirtualRegister(RC);
11021     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11022         .addReg(ptrA)
11023         .addReg(ptrB);
11024   } else {
11025     Ptr1Reg = ptrB;
11026   }
11027   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11028   // mode.
11029   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11030       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11031       .addImm(3)
11032       .addImm(27)
11033       .addImm(is8bit ? 28 : 27);
11034   if (!isLittleEndian)
11035     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11036         .addReg(Shift1Reg)
11037         .addImm(is8bit ? 24 : 16);
11038   if (is64bit)
11039     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11040         .addReg(Ptr1Reg)
11041         .addImm(0)
11042         .addImm(61);
11043   else
11044     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11045         .addReg(Ptr1Reg)
11046         .addImm(0)
11047         .addImm(0)
11048         .addImm(29);
11049   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11050   if (is8bit)
11051     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11052   else {
11053     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11054     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11055         .addReg(Mask3Reg)
11056         .addImm(65535);
11057   }
11058   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11059       .addReg(Mask2Reg)
11060       .addReg(ShiftReg);
11061 
11062   BB = loopMBB;
11063   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11064       .addReg(ZeroReg)
11065       .addReg(PtrReg);
11066   if (BinOpcode)
11067     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11068         .addReg(Incr2Reg)
11069         .addReg(TmpDestReg);
11070   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11071       .addReg(TmpDestReg)
11072       .addReg(MaskReg);
11073   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11074   if (CmpOpcode) {
11075     // For unsigned comparisons, we can directly compare the shifted values.
11076     // For signed comparisons we shift and sign extend.
11077     Register SReg = RegInfo.createVirtualRegister(GPRC);
11078     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11079         .addReg(TmpDestReg)
11080         .addReg(MaskReg);
11081     unsigned ValueReg = SReg;
11082     unsigned CmpReg = Incr2Reg;
11083     if (CmpOpcode == PPC::CMPW) {
11084       ValueReg = RegInfo.createVirtualRegister(GPRC);
11085       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11086           .addReg(SReg)
11087           .addReg(ShiftReg);
11088       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11089       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11090           .addReg(ValueReg);
11091       ValueReg = ValueSReg;
11092       CmpReg = incr;
11093     }
11094     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11095         .addReg(CmpReg)
11096         .addReg(ValueReg);
11097     BuildMI(BB, dl, TII->get(PPC::BCC))
11098         .addImm(CmpPred)
11099         .addReg(PPC::CR0)
11100         .addMBB(exitMBB);
11101     BB->addSuccessor(loop2MBB);
11102     BB->addSuccessor(exitMBB);
11103     BB = loop2MBB;
11104   }
11105   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11106   BuildMI(BB, dl, TII->get(PPC::STWCX))
11107       .addReg(Tmp4Reg)
11108       .addReg(ZeroReg)
11109       .addReg(PtrReg);
11110   BuildMI(BB, dl, TII->get(PPC::BCC))
11111       .addImm(PPC::PRED_NE)
11112       .addReg(PPC::CR0)
11113       .addMBB(loopMBB);
11114   BB->addSuccessor(loopMBB);
11115   BB->addSuccessor(exitMBB);
11116 
11117   //  exitMBB:
11118   //   ...
11119   BB = exitMBB;
11120   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11121       .addReg(TmpDestReg)
11122       .addReg(ShiftReg);
11123   return BB;
11124 }
11125 
11126 llvm::MachineBasicBlock *
11127 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11128                                     MachineBasicBlock *MBB) const {
11129   DebugLoc DL = MI.getDebugLoc();
11130   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11131   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11132 
11133   MachineFunction *MF = MBB->getParent();
11134   MachineRegisterInfo &MRI = MF->getRegInfo();
11135 
11136   const BasicBlock *BB = MBB->getBasicBlock();
11137   MachineFunction::iterator I = ++MBB->getIterator();
11138 
11139   Register DstReg = MI.getOperand(0).getReg();
11140   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11141   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11142   Register mainDstReg = MRI.createVirtualRegister(RC);
11143   Register restoreDstReg = MRI.createVirtualRegister(RC);
11144 
11145   MVT PVT = getPointerTy(MF->getDataLayout());
11146   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11147          "Invalid Pointer Size!");
11148   // For v = setjmp(buf), we generate
11149   //
11150   // thisMBB:
11151   //  SjLjSetup mainMBB
11152   //  bl mainMBB
11153   //  v_restore = 1
11154   //  b sinkMBB
11155   //
11156   // mainMBB:
11157   //  buf[LabelOffset] = LR
11158   //  v_main = 0
11159   //
11160   // sinkMBB:
11161   //  v = phi(main, restore)
11162   //
11163 
11164   MachineBasicBlock *thisMBB = MBB;
11165   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11166   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11167   MF->insert(I, mainMBB);
11168   MF->insert(I, sinkMBB);
11169 
11170   MachineInstrBuilder MIB;
11171 
11172   // Transfer the remainder of BB and its successor edges to sinkMBB.
11173   sinkMBB->splice(sinkMBB->begin(), MBB,
11174                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11175   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11176 
11177   // Note that the structure of the jmp_buf used here is not compatible
11178   // with that used by libc, and is not designed to be. Specifically, it
11179   // stores only those 'reserved' registers that LLVM does not otherwise
11180   // understand how to spill. Also, by convention, by the time this
11181   // intrinsic is called, Clang has already stored the frame address in the
11182   // first slot of the buffer and stack address in the third. Following the
11183   // X86 target code, we'll store the jump address in the second slot. We also
11184   // need to save the TOC pointer (R2) to handle jumps between shared
11185   // libraries, and that will be stored in the fourth slot. The thread
11186   // identifier (R13) is not affected.
11187 
11188   // thisMBB:
11189   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11190   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11191   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11192 
11193   // Prepare IP either in reg.
11194   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11195   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11196   Register BufReg = MI.getOperand(1).getReg();
11197 
11198   if (Subtarget.is64BitELFABI()) {
11199     setUsesTOCBasePtr(*MBB->getParent());
11200     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11201               .addReg(PPC::X2)
11202               .addImm(TOCOffset)
11203               .addReg(BufReg)
11204               .cloneMemRefs(MI);
11205   }
11206 
11207   // Naked functions never have a base pointer, and so we use r1. For all
11208   // other functions, this decision must be delayed until during PEI.
11209   unsigned BaseReg;
11210   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11211     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11212   else
11213     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11214 
11215   MIB = BuildMI(*thisMBB, MI, DL,
11216                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11217             .addReg(BaseReg)
11218             .addImm(BPOffset)
11219             .addReg(BufReg)
11220             .cloneMemRefs(MI);
11221 
11222   // Setup
11223   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11224   MIB.addRegMask(TRI->getNoPreservedMask());
11225 
11226   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11227 
11228   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11229           .addMBB(mainMBB);
11230   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11231 
11232   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11233   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11234 
11235   // mainMBB:
11236   //  mainDstReg = 0
11237   MIB =
11238       BuildMI(mainMBB, DL,
11239               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11240 
11241   // Store IP
11242   if (Subtarget.isPPC64()) {
11243     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11244             .addReg(LabelReg)
11245             .addImm(LabelOffset)
11246             .addReg(BufReg);
11247   } else {
11248     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11249             .addReg(LabelReg)
11250             .addImm(LabelOffset)
11251             .addReg(BufReg);
11252   }
11253   MIB.cloneMemRefs(MI);
11254 
11255   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11256   mainMBB->addSuccessor(sinkMBB);
11257 
11258   // sinkMBB:
11259   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11260           TII->get(PPC::PHI), DstReg)
11261     .addReg(mainDstReg).addMBB(mainMBB)
11262     .addReg(restoreDstReg).addMBB(thisMBB);
11263 
11264   MI.eraseFromParent();
11265   return sinkMBB;
11266 }
11267 
11268 MachineBasicBlock *
11269 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11270                                      MachineBasicBlock *MBB) const {
11271   DebugLoc DL = MI.getDebugLoc();
11272   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11273 
11274   MachineFunction *MF = MBB->getParent();
11275   MachineRegisterInfo &MRI = MF->getRegInfo();
11276 
11277   MVT PVT = getPointerTy(MF->getDataLayout());
11278   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11279          "Invalid Pointer Size!");
11280 
11281   const TargetRegisterClass *RC =
11282     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11283   Register Tmp = MRI.createVirtualRegister(RC);
11284   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11285   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11286   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11287   unsigned BP =
11288       (PVT == MVT::i64)
11289           ? PPC::X30
11290           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11291                                                               : PPC::R30);
11292 
11293   MachineInstrBuilder MIB;
11294 
11295   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11296   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11297   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11298   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11299 
11300   Register BufReg = MI.getOperand(0).getReg();
11301 
11302   // Reload FP (the jumped-to function may not have had a
11303   // frame pointer, and if so, then its r31 will be restored
11304   // as necessary).
11305   if (PVT == MVT::i64) {
11306     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11307             .addImm(0)
11308             .addReg(BufReg);
11309   } else {
11310     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11311             .addImm(0)
11312             .addReg(BufReg);
11313   }
11314   MIB.cloneMemRefs(MI);
11315 
11316   // Reload IP
11317   if (PVT == MVT::i64) {
11318     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11319             .addImm(LabelOffset)
11320             .addReg(BufReg);
11321   } else {
11322     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11323             .addImm(LabelOffset)
11324             .addReg(BufReg);
11325   }
11326   MIB.cloneMemRefs(MI);
11327 
11328   // Reload SP
11329   if (PVT == MVT::i64) {
11330     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11331             .addImm(SPOffset)
11332             .addReg(BufReg);
11333   } else {
11334     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11335             .addImm(SPOffset)
11336             .addReg(BufReg);
11337   }
11338   MIB.cloneMemRefs(MI);
11339 
11340   // Reload BP
11341   if (PVT == MVT::i64) {
11342     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11343             .addImm(BPOffset)
11344             .addReg(BufReg);
11345   } else {
11346     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11347             .addImm(BPOffset)
11348             .addReg(BufReg);
11349   }
11350   MIB.cloneMemRefs(MI);
11351 
11352   // Reload TOC
11353   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11354     setUsesTOCBasePtr(*MBB->getParent());
11355     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11356               .addImm(TOCOffset)
11357               .addReg(BufReg)
11358               .cloneMemRefs(MI);
11359   }
11360 
11361   // Jump
11362   BuildMI(*MBB, MI, DL,
11363           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11364   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11365 
11366   MI.eraseFromParent();
11367   return MBB;
11368 }
11369 
11370 MachineBasicBlock *
11371 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11372                                                MachineBasicBlock *BB) const {
11373   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11374       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11375     if (Subtarget.is64BitELFABI() &&
11376         MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11377       // Call lowering should have added an r2 operand to indicate a dependence
11378       // on the TOC base pointer value. It can't however, because there is no
11379       // way to mark the dependence as implicit there, and so the stackmap code
11380       // will confuse it with a regular operand. Instead, add the dependence
11381       // here.
11382       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11383     }
11384 
11385     return emitPatchPoint(MI, BB);
11386   }
11387 
11388   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11389       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11390     return emitEHSjLjSetJmp(MI, BB);
11391   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11392              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11393     return emitEHSjLjLongJmp(MI, BB);
11394   }
11395 
11396   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11397 
11398   // To "insert" these instructions we actually have to insert their
11399   // control-flow patterns.
11400   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11401   MachineFunction::iterator It = ++BB->getIterator();
11402 
11403   MachineFunction *F = BB->getParent();
11404 
11405   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11406       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11407       MI.getOpcode() == PPC::SELECT_I8) {
11408     SmallVector<MachineOperand, 2> Cond;
11409     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11410         MI.getOpcode() == PPC::SELECT_CC_I8)
11411       Cond.push_back(MI.getOperand(4));
11412     else
11413       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11414     Cond.push_back(MI.getOperand(1));
11415 
11416     DebugLoc dl = MI.getDebugLoc();
11417     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11418                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11419   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11420              MI.getOpcode() == PPC::SELECT_CC_F8 ||
11421              MI.getOpcode() == PPC::SELECT_CC_F16 ||
11422              MI.getOpcode() == PPC::SELECT_CC_QFRC ||
11423              MI.getOpcode() == PPC::SELECT_CC_QSRC ||
11424              MI.getOpcode() == PPC::SELECT_CC_QBRC ||
11425              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11426              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11427              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11428              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11429              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11430              MI.getOpcode() == PPC::SELECT_CC_SPE ||
11431              MI.getOpcode() == PPC::SELECT_F4 ||
11432              MI.getOpcode() == PPC::SELECT_F8 ||
11433              MI.getOpcode() == PPC::SELECT_F16 ||
11434              MI.getOpcode() == PPC::SELECT_QFRC ||
11435              MI.getOpcode() == PPC::SELECT_QSRC ||
11436              MI.getOpcode() == PPC::SELECT_QBRC ||
11437              MI.getOpcode() == PPC::SELECT_SPE ||
11438              MI.getOpcode() == PPC::SELECT_SPE4 ||
11439              MI.getOpcode() == PPC::SELECT_VRRC ||
11440              MI.getOpcode() == PPC::SELECT_VSFRC ||
11441              MI.getOpcode() == PPC::SELECT_VSSRC ||
11442              MI.getOpcode() == PPC::SELECT_VSRC) {
11443     // The incoming instruction knows the destination vreg to set, the
11444     // condition code register to branch on, the true/false values to
11445     // select between, and a branch opcode to use.
11446 
11447     //  thisMBB:
11448     //  ...
11449     //   TrueVal = ...
11450     //   cmpTY ccX, r1, r2
11451     //   bCC copy1MBB
11452     //   fallthrough --> copy0MBB
11453     MachineBasicBlock *thisMBB = BB;
11454     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11455     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11456     DebugLoc dl = MI.getDebugLoc();
11457     F->insert(It, copy0MBB);
11458     F->insert(It, sinkMBB);
11459 
11460     // Transfer the remainder of BB and its successor edges to sinkMBB.
11461     sinkMBB->splice(sinkMBB->begin(), BB,
11462                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11463     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11464 
11465     // Next, add the true and fallthrough blocks as its successors.
11466     BB->addSuccessor(copy0MBB);
11467     BB->addSuccessor(sinkMBB);
11468 
11469     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11470         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11471         MI.getOpcode() == PPC::SELECT_F16 ||
11472         MI.getOpcode() == PPC::SELECT_SPE4 ||
11473         MI.getOpcode() == PPC::SELECT_SPE ||
11474         MI.getOpcode() == PPC::SELECT_QFRC ||
11475         MI.getOpcode() == PPC::SELECT_QSRC ||
11476         MI.getOpcode() == PPC::SELECT_QBRC ||
11477         MI.getOpcode() == PPC::SELECT_VRRC ||
11478         MI.getOpcode() == PPC::SELECT_VSFRC ||
11479         MI.getOpcode() == PPC::SELECT_VSSRC ||
11480         MI.getOpcode() == PPC::SELECT_VSRC) {
11481       BuildMI(BB, dl, TII->get(PPC::BC))
11482           .addReg(MI.getOperand(1).getReg())
11483           .addMBB(sinkMBB);
11484     } else {
11485       unsigned SelectPred = MI.getOperand(4).getImm();
11486       BuildMI(BB, dl, TII->get(PPC::BCC))
11487           .addImm(SelectPred)
11488           .addReg(MI.getOperand(1).getReg())
11489           .addMBB(sinkMBB);
11490     }
11491 
11492     //  copy0MBB:
11493     //   %FalseValue = ...
11494     //   # fallthrough to sinkMBB
11495     BB = copy0MBB;
11496 
11497     // Update machine-CFG edges
11498     BB->addSuccessor(sinkMBB);
11499 
11500     //  sinkMBB:
11501     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11502     //  ...
11503     BB = sinkMBB;
11504     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11505         .addReg(MI.getOperand(3).getReg())
11506         .addMBB(copy0MBB)
11507         .addReg(MI.getOperand(2).getReg())
11508         .addMBB(thisMBB);
11509   } else if (MI.getOpcode() == PPC::ReadTB) {
11510     // To read the 64-bit time-base register on a 32-bit target, we read the
11511     // two halves. Should the counter have wrapped while it was being read, we
11512     // need to try again.
11513     // ...
11514     // readLoop:
11515     // mfspr Rx,TBU # load from TBU
11516     // mfspr Ry,TB  # load from TB
11517     // mfspr Rz,TBU # load from TBU
11518     // cmpw crX,Rx,Rz # check if 'old'='new'
11519     // bne readLoop   # branch if they're not equal
11520     // ...
11521 
11522     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11523     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11524     DebugLoc dl = MI.getDebugLoc();
11525     F->insert(It, readMBB);
11526     F->insert(It, sinkMBB);
11527 
11528     // Transfer the remainder of BB and its successor edges to sinkMBB.
11529     sinkMBB->splice(sinkMBB->begin(), BB,
11530                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11531     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11532 
11533     BB->addSuccessor(readMBB);
11534     BB = readMBB;
11535 
11536     MachineRegisterInfo &RegInfo = F->getRegInfo();
11537     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11538     Register LoReg = MI.getOperand(0).getReg();
11539     Register HiReg = MI.getOperand(1).getReg();
11540 
11541     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11542     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11543     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11544 
11545     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11546 
11547     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11548         .addReg(HiReg)
11549         .addReg(ReadAgainReg);
11550     BuildMI(BB, dl, TII->get(PPC::BCC))
11551         .addImm(PPC::PRED_NE)
11552         .addReg(CmpReg)
11553         .addMBB(readMBB);
11554 
11555     BB->addSuccessor(readMBB);
11556     BB->addSuccessor(sinkMBB);
11557   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11558     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11559   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11560     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11561   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11562     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11563   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11564     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11565 
11566   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11567     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11568   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11569     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11570   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11571     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11572   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11573     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11574 
11575   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11576     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11577   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11578     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11579   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11580     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11581   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11582     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11583 
11584   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11585     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11586   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11587     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11588   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11589     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11590   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11591     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11592 
11593   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11594     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11595   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11596     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11597   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11598     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11599   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11600     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11601 
11602   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11603     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11604   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11605     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11606   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11607     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11608   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11609     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11610 
11611   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11612     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11613   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11614     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11615   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11616     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11617   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11618     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11619 
11620   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11621     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11622   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11623     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11624   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11625     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11626   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11627     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11628 
11629   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11630     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11631   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11632     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11633   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11634     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
11635   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11636     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
11637 
11638   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11639     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
11640   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11641     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
11642   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11643     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
11644   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11645     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
11646 
11647   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11648     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
11649   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11650     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
11651   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11652     BB = EmitAtomicBinary(MI, BB, 4, 0);
11653   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11654     BB = EmitAtomicBinary(MI, BB, 8, 0);
11655   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11656            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11657            (Subtarget.hasPartwordAtomics() &&
11658             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11659            (Subtarget.hasPartwordAtomics() &&
11660             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11661     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11662 
11663     auto LoadMnemonic = PPC::LDARX;
11664     auto StoreMnemonic = PPC::STDCX;
11665     switch (MI.getOpcode()) {
11666     default:
11667       llvm_unreachable("Compare and swap of unknown size");
11668     case PPC::ATOMIC_CMP_SWAP_I8:
11669       LoadMnemonic = PPC::LBARX;
11670       StoreMnemonic = PPC::STBCX;
11671       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11672       break;
11673     case PPC::ATOMIC_CMP_SWAP_I16:
11674       LoadMnemonic = PPC::LHARX;
11675       StoreMnemonic = PPC::STHCX;
11676       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11677       break;
11678     case PPC::ATOMIC_CMP_SWAP_I32:
11679       LoadMnemonic = PPC::LWARX;
11680       StoreMnemonic = PPC::STWCX;
11681       break;
11682     case PPC::ATOMIC_CMP_SWAP_I64:
11683       LoadMnemonic = PPC::LDARX;
11684       StoreMnemonic = PPC::STDCX;
11685       break;
11686     }
11687     Register dest = MI.getOperand(0).getReg();
11688     Register ptrA = MI.getOperand(1).getReg();
11689     Register ptrB = MI.getOperand(2).getReg();
11690     Register oldval = MI.getOperand(3).getReg();
11691     Register newval = MI.getOperand(4).getReg();
11692     DebugLoc dl = MI.getDebugLoc();
11693 
11694     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11695     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11696     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11697     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11698     F->insert(It, loop1MBB);
11699     F->insert(It, loop2MBB);
11700     F->insert(It, midMBB);
11701     F->insert(It, exitMBB);
11702     exitMBB->splice(exitMBB->begin(), BB,
11703                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11704     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11705 
11706     //  thisMBB:
11707     //   ...
11708     //   fallthrough --> loopMBB
11709     BB->addSuccessor(loop1MBB);
11710 
11711     // loop1MBB:
11712     //   l[bhwd]arx dest, ptr
11713     //   cmp[wd] dest, oldval
11714     //   bne- midMBB
11715     // loop2MBB:
11716     //   st[bhwd]cx. newval, ptr
11717     //   bne- loopMBB
11718     //   b exitBB
11719     // midMBB:
11720     //   st[bhwd]cx. dest, ptr
11721     // exitBB:
11722     BB = loop1MBB;
11723     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11724     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11725         .addReg(oldval)
11726         .addReg(dest);
11727     BuildMI(BB, dl, TII->get(PPC::BCC))
11728         .addImm(PPC::PRED_NE)
11729         .addReg(PPC::CR0)
11730         .addMBB(midMBB);
11731     BB->addSuccessor(loop2MBB);
11732     BB->addSuccessor(midMBB);
11733 
11734     BB = loop2MBB;
11735     BuildMI(BB, dl, TII->get(StoreMnemonic))
11736         .addReg(newval)
11737         .addReg(ptrA)
11738         .addReg(ptrB);
11739     BuildMI(BB, dl, TII->get(PPC::BCC))
11740         .addImm(PPC::PRED_NE)
11741         .addReg(PPC::CR0)
11742         .addMBB(loop1MBB);
11743     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11744     BB->addSuccessor(loop1MBB);
11745     BB->addSuccessor(exitMBB);
11746 
11747     BB = midMBB;
11748     BuildMI(BB, dl, TII->get(StoreMnemonic))
11749         .addReg(dest)
11750         .addReg(ptrA)
11751         .addReg(ptrB);
11752     BB->addSuccessor(exitMBB);
11753 
11754     //  exitMBB:
11755     //   ...
11756     BB = exitMBB;
11757   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
11758              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
11759     // We must use 64-bit registers for addresses when targeting 64-bit,
11760     // since we're actually doing arithmetic on them.  Other registers
11761     // can be 32-bit.
11762     bool is64bit = Subtarget.isPPC64();
11763     bool isLittleEndian = Subtarget.isLittleEndian();
11764     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
11765 
11766     Register dest = MI.getOperand(0).getReg();
11767     Register ptrA = MI.getOperand(1).getReg();
11768     Register ptrB = MI.getOperand(2).getReg();
11769     Register oldval = MI.getOperand(3).getReg();
11770     Register newval = MI.getOperand(4).getReg();
11771     DebugLoc dl = MI.getDebugLoc();
11772 
11773     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11774     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11775     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11776     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11777     F->insert(It, loop1MBB);
11778     F->insert(It, loop2MBB);
11779     F->insert(It, midMBB);
11780     F->insert(It, exitMBB);
11781     exitMBB->splice(exitMBB->begin(), BB,
11782                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11783     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11784 
11785     MachineRegisterInfo &RegInfo = F->getRegInfo();
11786     const TargetRegisterClass *RC =
11787         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11788     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11789 
11790     Register PtrReg = RegInfo.createVirtualRegister(RC);
11791     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11792     Register ShiftReg =
11793         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11794     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
11795     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
11796     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
11797     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
11798     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11799     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11800     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11801     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11802     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11803     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11804     Register Ptr1Reg;
11805     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
11806     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11807     //  thisMBB:
11808     //   ...
11809     //   fallthrough --> loopMBB
11810     BB->addSuccessor(loop1MBB);
11811 
11812     // The 4-byte load must be aligned, while a char or short may be
11813     // anywhere in the word.  Hence all this nasty bookkeeping code.
11814     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11815     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11816     //   xori shift, shift1, 24 [16]
11817     //   rlwinm ptr, ptr1, 0, 0, 29
11818     //   slw newval2, newval, shift
11819     //   slw oldval2, oldval,shift
11820     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11821     //   slw mask, mask2, shift
11822     //   and newval3, newval2, mask
11823     //   and oldval3, oldval2, mask
11824     // loop1MBB:
11825     //   lwarx tmpDest, ptr
11826     //   and tmp, tmpDest, mask
11827     //   cmpw tmp, oldval3
11828     //   bne- midMBB
11829     // loop2MBB:
11830     //   andc tmp2, tmpDest, mask
11831     //   or tmp4, tmp2, newval3
11832     //   stwcx. tmp4, ptr
11833     //   bne- loop1MBB
11834     //   b exitBB
11835     // midMBB:
11836     //   stwcx. tmpDest, ptr
11837     // exitBB:
11838     //   srw dest, tmpDest, shift
11839     if (ptrA != ZeroReg) {
11840       Ptr1Reg = RegInfo.createVirtualRegister(RC);
11841       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11842           .addReg(ptrA)
11843           .addReg(ptrB);
11844     } else {
11845       Ptr1Reg = ptrB;
11846     }
11847 
11848     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11849     // mode.
11850     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11851         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11852         .addImm(3)
11853         .addImm(27)
11854         .addImm(is8bit ? 28 : 27);
11855     if (!isLittleEndian)
11856       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11857           .addReg(Shift1Reg)
11858           .addImm(is8bit ? 24 : 16);
11859     if (is64bit)
11860       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11861           .addReg(Ptr1Reg)
11862           .addImm(0)
11863           .addImm(61);
11864     else
11865       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11866           .addReg(Ptr1Reg)
11867           .addImm(0)
11868           .addImm(0)
11869           .addImm(29);
11870     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
11871         .addReg(newval)
11872         .addReg(ShiftReg);
11873     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
11874         .addReg(oldval)
11875         .addReg(ShiftReg);
11876     if (is8bit)
11877       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11878     else {
11879       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11880       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11881           .addReg(Mask3Reg)
11882           .addImm(65535);
11883     }
11884     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11885         .addReg(Mask2Reg)
11886         .addReg(ShiftReg);
11887     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
11888         .addReg(NewVal2Reg)
11889         .addReg(MaskReg);
11890     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
11891         .addReg(OldVal2Reg)
11892         .addReg(MaskReg);
11893 
11894     BB = loop1MBB;
11895     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11896         .addReg(ZeroReg)
11897         .addReg(PtrReg);
11898     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
11899         .addReg(TmpDestReg)
11900         .addReg(MaskReg);
11901     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
11902         .addReg(TmpReg)
11903         .addReg(OldVal3Reg);
11904     BuildMI(BB, dl, TII->get(PPC::BCC))
11905         .addImm(PPC::PRED_NE)
11906         .addReg(PPC::CR0)
11907         .addMBB(midMBB);
11908     BB->addSuccessor(loop2MBB);
11909     BB->addSuccessor(midMBB);
11910 
11911     BB = loop2MBB;
11912     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11913         .addReg(TmpDestReg)
11914         .addReg(MaskReg);
11915     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
11916         .addReg(Tmp2Reg)
11917         .addReg(NewVal3Reg);
11918     BuildMI(BB, dl, TII->get(PPC::STWCX))
11919         .addReg(Tmp4Reg)
11920         .addReg(ZeroReg)
11921         .addReg(PtrReg);
11922     BuildMI(BB, dl, TII->get(PPC::BCC))
11923         .addImm(PPC::PRED_NE)
11924         .addReg(PPC::CR0)
11925         .addMBB(loop1MBB);
11926     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11927     BB->addSuccessor(loop1MBB);
11928     BB->addSuccessor(exitMBB);
11929 
11930     BB = midMBB;
11931     BuildMI(BB, dl, TII->get(PPC::STWCX))
11932         .addReg(TmpDestReg)
11933         .addReg(ZeroReg)
11934         .addReg(PtrReg);
11935     BB->addSuccessor(exitMBB);
11936 
11937     //  exitMBB:
11938     //   ...
11939     BB = exitMBB;
11940     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11941         .addReg(TmpReg)
11942         .addReg(ShiftReg);
11943   } else if (MI.getOpcode() == PPC::FADDrtz) {
11944     // This pseudo performs an FADD with rounding mode temporarily forced
11945     // to round-to-zero.  We emit this via custom inserter since the FPSCR
11946     // is not modeled at the SelectionDAG level.
11947     Register Dest = MI.getOperand(0).getReg();
11948     Register Src1 = MI.getOperand(1).getReg();
11949     Register Src2 = MI.getOperand(2).getReg();
11950     DebugLoc dl = MI.getDebugLoc();
11951 
11952     MachineRegisterInfo &RegInfo = F->getRegInfo();
11953     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11954 
11955     // Save FPSCR value.
11956     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
11957 
11958     // Set rounding mode to round-to-zero.
11959     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
11960     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
11961 
11962     // Perform addition.
11963     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
11964 
11965     // Restore FPSCR value.
11966     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
11967   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11968              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
11969              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11970              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
11971     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11972                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
11973                           ? PPC::ANDI8_rec
11974                           : PPC::ANDI_rec;
11975     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11976                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
11977 
11978     MachineRegisterInfo &RegInfo = F->getRegInfo();
11979     Register Dest = RegInfo.createVirtualRegister(
11980         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
11981 
11982     DebugLoc Dl = MI.getDebugLoc();
11983     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
11984         .addReg(MI.getOperand(1).getReg())
11985         .addImm(1);
11986     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11987             MI.getOperand(0).getReg())
11988         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
11989   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
11990     DebugLoc Dl = MI.getDebugLoc();
11991     MachineRegisterInfo &RegInfo = F->getRegInfo();
11992     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11993     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
11994     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11995             MI.getOperand(0).getReg())
11996         .addReg(CRReg);
11997   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
11998     DebugLoc Dl = MI.getDebugLoc();
11999     unsigned Imm = MI.getOperand(1).getImm();
12000     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12001     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12002             MI.getOperand(0).getReg())
12003         .addReg(PPC::CR0EQ);
12004   } else if (MI.getOpcode() == PPC::SETRNDi) {
12005     DebugLoc dl = MI.getDebugLoc();
12006     Register OldFPSCRReg = MI.getOperand(0).getReg();
12007 
12008     // Save FPSCR value.
12009     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12010 
12011     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12012     // the following settings:
12013     //   00 Round to nearest
12014     //   01 Round to 0
12015     //   10 Round to +inf
12016     //   11 Round to -inf
12017 
12018     // When the operand is immediate, using the two least significant bits of
12019     // the immediate to set the bits 62:63 of FPSCR.
12020     unsigned Mode = MI.getOperand(1).getImm();
12021     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12022       .addImm(31);
12023 
12024     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12025       .addImm(30);
12026   } else if (MI.getOpcode() == PPC::SETRND) {
12027     DebugLoc dl = MI.getDebugLoc();
12028 
12029     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12030     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12031     // If the target doesn't have DirectMove, we should use stack to do the
12032     // conversion, because the target doesn't have the instructions like mtvsrd
12033     // or mfvsrd to do this conversion directly.
12034     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12035       if (Subtarget.hasDirectMove()) {
12036         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12037           .addReg(SrcReg);
12038       } else {
12039         // Use stack to do the register copy.
12040         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12041         MachineRegisterInfo &RegInfo = F->getRegInfo();
12042         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12043         if (RC == &PPC::F8RCRegClass) {
12044           // Copy register from F8RCRegClass to G8RCRegclass.
12045           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12046                  "Unsupported RegClass.");
12047 
12048           StoreOp = PPC::STFD;
12049           LoadOp = PPC::LD;
12050         } else {
12051           // Copy register from G8RCRegClass to F8RCRegclass.
12052           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12053                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12054                  "Unsupported RegClass.");
12055         }
12056 
12057         MachineFrameInfo &MFI = F->getFrameInfo();
12058         int FrameIdx = MFI.CreateStackObject(8, 8, false);
12059 
12060         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12061             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12062             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12063             MFI.getObjectAlign(FrameIdx));
12064 
12065         // Store the SrcReg into the stack.
12066         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12067           .addReg(SrcReg)
12068           .addImm(0)
12069           .addFrameIndex(FrameIdx)
12070           .addMemOperand(MMOStore);
12071 
12072         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12073             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12074             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12075             MFI.getObjectAlign(FrameIdx));
12076 
12077         // Load from the stack where SrcReg is stored, and save to DestReg,
12078         // so we have done the RegClass conversion from RegClass::SrcReg to
12079         // RegClass::DestReg.
12080         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12081           .addImm(0)
12082           .addFrameIndex(FrameIdx)
12083           .addMemOperand(MMOLoad);
12084       }
12085     };
12086 
12087     Register OldFPSCRReg = MI.getOperand(0).getReg();
12088 
12089     // Save FPSCR value.
12090     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12091 
12092     // When the operand is gprc register, use two least significant bits of the
12093     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12094     //
12095     // copy OldFPSCRTmpReg, OldFPSCRReg
12096     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12097     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12098     // copy NewFPSCRReg, NewFPSCRTmpReg
12099     // mtfsf 255, NewFPSCRReg
12100     MachineOperand SrcOp = MI.getOperand(1);
12101     MachineRegisterInfo &RegInfo = F->getRegInfo();
12102     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12103 
12104     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12105 
12106     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12107     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12108 
12109     // The first operand of INSERT_SUBREG should be a register which has
12110     // subregisters, we only care about its RegClass, so we should use an
12111     // IMPLICIT_DEF register.
12112     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12113     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12114       .addReg(ImDefReg)
12115       .add(SrcOp)
12116       .addImm(1);
12117 
12118     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12119     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12120       .addReg(OldFPSCRTmpReg)
12121       .addReg(ExtSrcReg)
12122       .addImm(0)
12123       .addImm(62);
12124 
12125     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12126     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12127 
12128     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12129     // bits of FPSCR.
12130     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12131       .addImm(255)
12132       .addReg(NewFPSCRReg)
12133       .addImm(0)
12134       .addImm(0);
12135   } else {
12136     llvm_unreachable("Unexpected instr type to insert");
12137   }
12138 
12139   MI.eraseFromParent(); // The pseudo instruction is gone now.
12140   return BB;
12141 }
12142 
12143 //===----------------------------------------------------------------------===//
12144 // Target Optimization Hooks
12145 //===----------------------------------------------------------------------===//
12146 
12147 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12148   // For the estimates, convergence is quadratic, so we essentially double the
12149   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12150   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12151   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12152   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12153   if (VT.getScalarType() == MVT::f64)
12154     RefinementSteps++;
12155   return RefinementSteps;
12156 }
12157 
12158 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12159                                            int Enabled, int &RefinementSteps,
12160                                            bool &UseOneConstNR,
12161                                            bool Reciprocal) const {
12162   EVT VT = Operand.getValueType();
12163   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12164       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12165       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12166       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12167       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12168       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12169     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12170       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12171 
12172     // The Newton-Raphson computation with a single constant does not provide
12173     // enough accuracy on some CPUs.
12174     UseOneConstNR = !Subtarget.needsTwoConstNR();
12175     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12176   }
12177   return SDValue();
12178 }
12179 
12180 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12181                                             int Enabled,
12182                                             int &RefinementSteps) const {
12183   EVT VT = Operand.getValueType();
12184   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12185       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12186       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12187       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12188       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12189       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12190     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12191       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12192     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12193   }
12194   return SDValue();
12195 }
12196 
12197 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12198   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12199   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12200   // enabled for division), this functionality is redundant with the default
12201   // combiner logic (once the division -> reciprocal/multiply transformation
12202   // has taken place). As a result, this matters more for older cores than for
12203   // newer ones.
12204 
12205   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12206   // reciprocal if there are two or more FDIVs (for embedded cores with only
12207   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12208   switch (Subtarget.getCPUDirective()) {
12209   default:
12210     return 3;
12211   case PPC::DIR_440:
12212   case PPC::DIR_A2:
12213   case PPC::DIR_E500:
12214   case PPC::DIR_E500mc:
12215   case PPC::DIR_E5500:
12216     return 2;
12217   }
12218 }
12219 
12220 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12221 // collapsed, and so we need to look through chains of them.
12222 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12223                                      int64_t& Offset, SelectionDAG &DAG) {
12224   if (DAG.isBaseWithConstantOffset(Loc)) {
12225     Base = Loc.getOperand(0);
12226     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12227 
12228     // The base might itself be a base plus an offset, and if so, accumulate
12229     // that as well.
12230     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12231   }
12232 }
12233 
12234 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12235                             unsigned Bytes, int Dist,
12236                             SelectionDAG &DAG) {
12237   if (VT.getSizeInBits() / 8 != Bytes)
12238     return false;
12239 
12240   SDValue BaseLoc = Base->getBasePtr();
12241   if (Loc.getOpcode() == ISD::FrameIndex) {
12242     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12243       return false;
12244     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12245     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12246     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12247     int FS  = MFI.getObjectSize(FI);
12248     int BFS = MFI.getObjectSize(BFI);
12249     if (FS != BFS || FS != (int)Bytes) return false;
12250     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12251   }
12252 
12253   SDValue Base1 = Loc, Base2 = BaseLoc;
12254   int64_t Offset1 = 0, Offset2 = 0;
12255   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12256   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12257   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12258     return true;
12259 
12260   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12261   const GlobalValue *GV1 = nullptr;
12262   const GlobalValue *GV2 = nullptr;
12263   Offset1 = 0;
12264   Offset2 = 0;
12265   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12266   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12267   if (isGA1 && isGA2 && GV1 == GV2)
12268     return Offset1 == (Offset2 + Dist*Bytes);
12269   return false;
12270 }
12271 
12272 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12273 // not enforce equality of the chain operands.
12274 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12275                             unsigned Bytes, int Dist,
12276                             SelectionDAG &DAG) {
12277   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12278     EVT VT = LS->getMemoryVT();
12279     SDValue Loc = LS->getBasePtr();
12280     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12281   }
12282 
12283   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12284     EVT VT;
12285     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12286     default: return false;
12287     case Intrinsic::ppc_qpx_qvlfd:
12288     case Intrinsic::ppc_qpx_qvlfda:
12289       VT = MVT::v4f64;
12290       break;
12291     case Intrinsic::ppc_qpx_qvlfs:
12292     case Intrinsic::ppc_qpx_qvlfsa:
12293       VT = MVT::v4f32;
12294       break;
12295     case Intrinsic::ppc_qpx_qvlfcd:
12296     case Intrinsic::ppc_qpx_qvlfcda:
12297       VT = MVT::v2f64;
12298       break;
12299     case Intrinsic::ppc_qpx_qvlfcs:
12300     case Intrinsic::ppc_qpx_qvlfcsa:
12301       VT = MVT::v2f32;
12302       break;
12303     case Intrinsic::ppc_qpx_qvlfiwa:
12304     case Intrinsic::ppc_qpx_qvlfiwz:
12305     case Intrinsic::ppc_altivec_lvx:
12306     case Intrinsic::ppc_altivec_lvxl:
12307     case Intrinsic::ppc_vsx_lxvw4x:
12308     case Intrinsic::ppc_vsx_lxvw4x_be:
12309       VT = MVT::v4i32;
12310       break;
12311     case Intrinsic::ppc_vsx_lxvd2x:
12312     case Intrinsic::ppc_vsx_lxvd2x_be:
12313       VT = MVT::v2f64;
12314       break;
12315     case Intrinsic::ppc_altivec_lvebx:
12316       VT = MVT::i8;
12317       break;
12318     case Intrinsic::ppc_altivec_lvehx:
12319       VT = MVT::i16;
12320       break;
12321     case Intrinsic::ppc_altivec_lvewx:
12322       VT = MVT::i32;
12323       break;
12324     }
12325 
12326     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12327   }
12328 
12329   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12330     EVT VT;
12331     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12332     default: return false;
12333     case Intrinsic::ppc_qpx_qvstfd:
12334     case Intrinsic::ppc_qpx_qvstfda:
12335       VT = MVT::v4f64;
12336       break;
12337     case Intrinsic::ppc_qpx_qvstfs:
12338     case Intrinsic::ppc_qpx_qvstfsa:
12339       VT = MVT::v4f32;
12340       break;
12341     case Intrinsic::ppc_qpx_qvstfcd:
12342     case Intrinsic::ppc_qpx_qvstfcda:
12343       VT = MVT::v2f64;
12344       break;
12345     case Intrinsic::ppc_qpx_qvstfcs:
12346     case Intrinsic::ppc_qpx_qvstfcsa:
12347       VT = MVT::v2f32;
12348       break;
12349     case Intrinsic::ppc_qpx_qvstfiw:
12350     case Intrinsic::ppc_qpx_qvstfiwa:
12351     case Intrinsic::ppc_altivec_stvx:
12352     case Intrinsic::ppc_altivec_stvxl:
12353     case Intrinsic::ppc_vsx_stxvw4x:
12354       VT = MVT::v4i32;
12355       break;
12356     case Intrinsic::ppc_vsx_stxvd2x:
12357       VT = MVT::v2f64;
12358       break;
12359     case Intrinsic::ppc_vsx_stxvw4x_be:
12360       VT = MVT::v4i32;
12361       break;
12362     case Intrinsic::ppc_vsx_stxvd2x_be:
12363       VT = MVT::v2f64;
12364       break;
12365     case Intrinsic::ppc_altivec_stvebx:
12366       VT = MVT::i8;
12367       break;
12368     case Intrinsic::ppc_altivec_stvehx:
12369       VT = MVT::i16;
12370       break;
12371     case Intrinsic::ppc_altivec_stvewx:
12372       VT = MVT::i32;
12373       break;
12374     }
12375 
12376     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12377   }
12378 
12379   return false;
12380 }
12381 
12382 // Return true is there is a nearyby consecutive load to the one provided
12383 // (regardless of alignment). We search up and down the chain, looking though
12384 // token factors and other loads (but nothing else). As a result, a true result
12385 // indicates that it is safe to create a new consecutive load adjacent to the
12386 // load provided.
12387 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12388   SDValue Chain = LD->getChain();
12389   EVT VT = LD->getMemoryVT();
12390 
12391   SmallSet<SDNode *, 16> LoadRoots;
12392   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12393   SmallSet<SDNode *, 16> Visited;
12394 
12395   // First, search up the chain, branching to follow all token-factor operands.
12396   // If we find a consecutive load, then we're done, otherwise, record all
12397   // nodes just above the top-level loads and token factors.
12398   while (!Queue.empty()) {
12399     SDNode *ChainNext = Queue.pop_back_val();
12400     if (!Visited.insert(ChainNext).second)
12401       continue;
12402 
12403     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12404       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12405         return true;
12406 
12407       if (!Visited.count(ChainLD->getChain().getNode()))
12408         Queue.push_back(ChainLD->getChain().getNode());
12409     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12410       for (const SDUse &O : ChainNext->ops())
12411         if (!Visited.count(O.getNode()))
12412           Queue.push_back(O.getNode());
12413     } else
12414       LoadRoots.insert(ChainNext);
12415   }
12416 
12417   // Second, search down the chain, starting from the top-level nodes recorded
12418   // in the first phase. These top-level nodes are the nodes just above all
12419   // loads and token factors. Starting with their uses, recursively look though
12420   // all loads (just the chain uses) and token factors to find a consecutive
12421   // load.
12422   Visited.clear();
12423   Queue.clear();
12424 
12425   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12426        IE = LoadRoots.end(); I != IE; ++I) {
12427     Queue.push_back(*I);
12428 
12429     while (!Queue.empty()) {
12430       SDNode *LoadRoot = Queue.pop_back_val();
12431       if (!Visited.insert(LoadRoot).second)
12432         continue;
12433 
12434       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12435         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12436           return true;
12437 
12438       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12439            UE = LoadRoot->use_end(); UI != UE; ++UI)
12440         if (((isa<MemSDNode>(*UI) &&
12441             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12442             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12443           Queue.push_back(*UI);
12444     }
12445   }
12446 
12447   return false;
12448 }
12449 
12450 /// This function is called when we have proved that a SETCC node can be replaced
12451 /// by subtraction (and other supporting instructions) so that the result of
12452 /// comparison is kept in a GPR instead of CR. This function is purely for
12453 /// codegen purposes and has some flags to guide the codegen process.
12454 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12455                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12456   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12457 
12458   // Zero extend the operands to the largest legal integer. Originally, they
12459   // must be of a strictly smaller size.
12460   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12461                          DAG.getConstant(Size, DL, MVT::i32));
12462   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12463                          DAG.getConstant(Size, DL, MVT::i32));
12464 
12465   // Swap if needed. Depends on the condition code.
12466   if (Swap)
12467     std::swap(Op0, Op1);
12468 
12469   // Subtract extended integers.
12470   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12471 
12472   // Move the sign bit to the least significant position and zero out the rest.
12473   // Now the least significant bit carries the result of original comparison.
12474   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12475                              DAG.getConstant(Size - 1, DL, MVT::i32));
12476   auto Final = Shifted;
12477 
12478   // Complement the result if needed. Based on the condition code.
12479   if (Complement)
12480     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12481                         DAG.getConstant(1, DL, MVT::i64));
12482 
12483   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12484 }
12485 
12486 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12487                                                   DAGCombinerInfo &DCI) const {
12488   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12489 
12490   SelectionDAG &DAG = DCI.DAG;
12491   SDLoc DL(N);
12492 
12493   // Size of integers being compared has a critical role in the following
12494   // analysis, so we prefer to do this when all types are legal.
12495   if (!DCI.isAfterLegalizeDAG())
12496     return SDValue();
12497 
12498   // If all users of SETCC extend its value to a legal integer type
12499   // then we replace SETCC with a subtraction
12500   for (SDNode::use_iterator UI = N->use_begin(),
12501        UE = N->use_end(); UI != UE; ++UI) {
12502     if (UI->getOpcode() != ISD::ZERO_EXTEND)
12503       return SDValue();
12504   }
12505 
12506   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12507   auto OpSize = N->getOperand(0).getValueSizeInBits();
12508 
12509   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12510 
12511   if (OpSize < Size) {
12512     switch (CC) {
12513     default: break;
12514     case ISD::SETULT:
12515       return generateEquivalentSub(N, Size, false, false, DL, DAG);
12516     case ISD::SETULE:
12517       return generateEquivalentSub(N, Size, true, true, DL, DAG);
12518     case ISD::SETUGT:
12519       return generateEquivalentSub(N, Size, false, true, DL, DAG);
12520     case ISD::SETUGE:
12521       return generateEquivalentSub(N, Size, true, false, DL, DAG);
12522     }
12523   }
12524 
12525   return SDValue();
12526 }
12527 
12528 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12529                                                   DAGCombinerInfo &DCI) const {
12530   SelectionDAG &DAG = DCI.DAG;
12531   SDLoc dl(N);
12532 
12533   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12534   // If we're tracking CR bits, we need to be careful that we don't have:
12535   //   trunc(binary-ops(zext(x), zext(y)))
12536   // or
12537   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12538   // such that we're unnecessarily moving things into GPRs when it would be
12539   // better to keep them in CR bits.
12540 
12541   // Note that trunc here can be an actual i1 trunc, or can be the effective
12542   // truncation that comes from a setcc or select_cc.
12543   if (N->getOpcode() == ISD::TRUNCATE &&
12544       N->getValueType(0) != MVT::i1)
12545     return SDValue();
12546 
12547   if (N->getOperand(0).getValueType() != MVT::i32 &&
12548       N->getOperand(0).getValueType() != MVT::i64)
12549     return SDValue();
12550 
12551   if (N->getOpcode() == ISD::SETCC ||
12552       N->getOpcode() == ISD::SELECT_CC) {
12553     // If we're looking at a comparison, then we need to make sure that the
12554     // high bits (all except for the first) don't matter the result.
12555     ISD::CondCode CC =
12556       cast<CondCodeSDNode>(N->getOperand(
12557         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12558     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12559 
12560     if (ISD::isSignedIntSetCC(CC)) {
12561       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12562           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12563         return SDValue();
12564     } else if (ISD::isUnsignedIntSetCC(CC)) {
12565       if (!DAG.MaskedValueIsZero(N->getOperand(0),
12566                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12567           !DAG.MaskedValueIsZero(N->getOperand(1),
12568                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
12569         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12570                                              : SDValue());
12571     } else {
12572       // This is neither a signed nor an unsigned comparison, just make sure
12573       // that the high bits are equal.
12574       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12575       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12576 
12577       // We don't really care about what is known about the first bit (if
12578       // anything), so clear it in all masks prior to comparing them.
12579       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
12580       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
12581 
12582       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
12583         return SDValue();
12584     }
12585   }
12586 
12587   // We now know that the higher-order bits are irrelevant, we just need to
12588   // make sure that all of the intermediate operations are bit operations, and
12589   // all inputs are extensions.
12590   if (N->getOperand(0).getOpcode() != ISD::AND &&
12591       N->getOperand(0).getOpcode() != ISD::OR  &&
12592       N->getOperand(0).getOpcode() != ISD::XOR &&
12593       N->getOperand(0).getOpcode() != ISD::SELECT &&
12594       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12595       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12596       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12597       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12598       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12599     return SDValue();
12600 
12601   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12602       N->getOperand(1).getOpcode() != ISD::AND &&
12603       N->getOperand(1).getOpcode() != ISD::OR  &&
12604       N->getOperand(1).getOpcode() != ISD::XOR &&
12605       N->getOperand(1).getOpcode() != ISD::SELECT &&
12606       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
12607       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
12608       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
12609       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
12610       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
12611     return SDValue();
12612 
12613   SmallVector<SDValue, 4> Inputs;
12614   SmallVector<SDValue, 8> BinOps, PromOps;
12615   SmallPtrSet<SDNode *, 16> Visited;
12616 
12617   for (unsigned i = 0; i < 2; ++i) {
12618     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12619           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12620           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12621           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12622         isa<ConstantSDNode>(N->getOperand(i)))
12623       Inputs.push_back(N->getOperand(i));
12624     else
12625       BinOps.push_back(N->getOperand(i));
12626 
12627     if (N->getOpcode() == ISD::TRUNCATE)
12628       break;
12629   }
12630 
12631   // Visit all inputs, collect all binary operations (and, or, xor and
12632   // select) that are all fed by extensions.
12633   while (!BinOps.empty()) {
12634     SDValue BinOp = BinOps.back();
12635     BinOps.pop_back();
12636 
12637     if (!Visited.insert(BinOp.getNode()).second)
12638       continue;
12639 
12640     PromOps.push_back(BinOp);
12641 
12642     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12643       // The condition of the select is not promoted.
12644       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12645         continue;
12646       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12647         continue;
12648 
12649       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12650             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12651             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12652            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12653           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12654         Inputs.push_back(BinOp.getOperand(i));
12655       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12656                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12657                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12658                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12659                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
12660                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12661                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12662                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12663                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
12664         BinOps.push_back(BinOp.getOperand(i));
12665       } else {
12666         // We have an input that is not an extension or another binary
12667         // operation; we'll abort this transformation.
12668         return SDValue();
12669       }
12670     }
12671   }
12672 
12673   // Make sure that this is a self-contained cluster of operations (which
12674   // is not quite the same thing as saying that everything has only one
12675   // use).
12676   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12677     if (isa<ConstantSDNode>(Inputs[i]))
12678       continue;
12679 
12680     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12681                               UE = Inputs[i].getNode()->use_end();
12682          UI != UE; ++UI) {
12683       SDNode *User = *UI;
12684       if (User != N && !Visited.count(User))
12685         return SDValue();
12686 
12687       // Make sure that we're not going to promote the non-output-value
12688       // operand(s) or SELECT or SELECT_CC.
12689       // FIXME: Although we could sometimes handle this, and it does occur in
12690       // practice that one of the condition inputs to the select is also one of
12691       // the outputs, we currently can't deal with this.
12692       if (User->getOpcode() == ISD::SELECT) {
12693         if (User->getOperand(0) == Inputs[i])
12694           return SDValue();
12695       } else if (User->getOpcode() == ISD::SELECT_CC) {
12696         if (User->getOperand(0) == Inputs[i] ||
12697             User->getOperand(1) == Inputs[i])
12698           return SDValue();
12699       }
12700     }
12701   }
12702 
12703   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12704     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12705                               UE = PromOps[i].getNode()->use_end();
12706          UI != UE; ++UI) {
12707       SDNode *User = *UI;
12708       if (User != N && !Visited.count(User))
12709         return SDValue();
12710 
12711       // Make sure that we're not going to promote the non-output-value
12712       // operand(s) or SELECT or SELECT_CC.
12713       // FIXME: Although we could sometimes handle this, and it does occur in
12714       // practice that one of the condition inputs to the select is also one of
12715       // the outputs, we currently can't deal with this.
12716       if (User->getOpcode() == ISD::SELECT) {
12717         if (User->getOperand(0) == PromOps[i])
12718           return SDValue();
12719       } else if (User->getOpcode() == ISD::SELECT_CC) {
12720         if (User->getOperand(0) == PromOps[i] ||
12721             User->getOperand(1) == PromOps[i])
12722           return SDValue();
12723       }
12724     }
12725   }
12726 
12727   // Replace all inputs with the extension operand.
12728   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12729     // Constants may have users outside the cluster of to-be-promoted nodes,
12730     // and so we need to replace those as we do the promotions.
12731     if (isa<ConstantSDNode>(Inputs[i]))
12732       continue;
12733     else
12734       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
12735   }
12736 
12737   std::list<HandleSDNode> PromOpHandles;
12738   for (auto &PromOp : PromOps)
12739     PromOpHandles.emplace_back(PromOp);
12740 
12741   // Replace all operations (these are all the same, but have a different
12742   // (i1) return type). DAG.getNode will validate that the types of
12743   // a binary operator match, so go through the list in reverse so that
12744   // we've likely promoted both operands first. Any intermediate truncations or
12745   // extensions disappear.
12746   while (!PromOpHandles.empty()) {
12747     SDValue PromOp = PromOpHandles.back().getValue();
12748     PromOpHandles.pop_back();
12749 
12750     if (PromOp.getOpcode() == ISD::TRUNCATE ||
12751         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
12752         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
12753         PromOp.getOpcode() == ISD::ANY_EXTEND) {
12754       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
12755           PromOp.getOperand(0).getValueType() != MVT::i1) {
12756         // The operand is not yet ready (see comment below).
12757         PromOpHandles.emplace_front(PromOp);
12758         continue;
12759       }
12760 
12761       SDValue RepValue = PromOp.getOperand(0);
12762       if (isa<ConstantSDNode>(RepValue))
12763         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
12764 
12765       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
12766       continue;
12767     }
12768 
12769     unsigned C;
12770     switch (PromOp.getOpcode()) {
12771     default:             C = 0; break;
12772     case ISD::SELECT:    C = 1; break;
12773     case ISD::SELECT_CC: C = 2; break;
12774     }
12775 
12776     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12777          PromOp.getOperand(C).getValueType() != MVT::i1) ||
12778         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12779          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
12780       // The to-be-promoted operands of this node have not yet been
12781       // promoted (this should be rare because we're going through the
12782       // list backward, but if one of the operands has several users in
12783       // this cluster of to-be-promoted nodes, it is possible).
12784       PromOpHandles.emplace_front(PromOp);
12785       continue;
12786     }
12787 
12788     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12789                                 PromOp.getNode()->op_end());
12790 
12791     // If there are any constant inputs, make sure they're replaced now.
12792     for (unsigned i = 0; i < 2; ++i)
12793       if (isa<ConstantSDNode>(Ops[C+i]))
12794         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
12795 
12796     DAG.ReplaceAllUsesOfValueWith(PromOp,
12797       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
12798   }
12799 
12800   // Now we're left with the initial truncation itself.
12801   if (N->getOpcode() == ISD::TRUNCATE)
12802     return N->getOperand(0);
12803 
12804   // Otherwise, this is a comparison. The operands to be compared have just
12805   // changed type (to i1), but everything else is the same.
12806   return SDValue(N, 0);
12807 }
12808 
12809 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
12810                                                   DAGCombinerInfo &DCI) const {
12811   SelectionDAG &DAG = DCI.DAG;
12812   SDLoc dl(N);
12813 
12814   // If we're tracking CR bits, we need to be careful that we don't have:
12815   //   zext(binary-ops(trunc(x), trunc(y)))
12816   // or
12817   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
12818   // such that we're unnecessarily moving things into CR bits that can more
12819   // efficiently stay in GPRs. Note that if we're not certain that the high
12820   // bits are set as required by the final extension, we still may need to do
12821   // some masking to get the proper behavior.
12822 
12823   // This same functionality is important on PPC64 when dealing with
12824   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
12825   // the return values of functions. Because it is so similar, it is handled
12826   // here as well.
12827 
12828   if (N->getValueType(0) != MVT::i32 &&
12829       N->getValueType(0) != MVT::i64)
12830     return SDValue();
12831 
12832   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
12833         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
12834     return SDValue();
12835 
12836   if (N->getOperand(0).getOpcode() != ISD::AND &&
12837       N->getOperand(0).getOpcode() != ISD::OR  &&
12838       N->getOperand(0).getOpcode() != ISD::XOR &&
12839       N->getOperand(0).getOpcode() != ISD::SELECT &&
12840       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
12841     return SDValue();
12842 
12843   SmallVector<SDValue, 4> Inputs;
12844   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
12845   SmallPtrSet<SDNode *, 16> Visited;
12846 
12847   // Visit all inputs, collect all binary operations (and, or, xor and
12848   // select) that are all fed by truncations.
12849   while (!BinOps.empty()) {
12850     SDValue BinOp = BinOps.back();
12851     BinOps.pop_back();
12852 
12853     if (!Visited.insert(BinOp.getNode()).second)
12854       continue;
12855 
12856     PromOps.push_back(BinOp);
12857 
12858     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12859       // The condition of the select is not promoted.
12860       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12861         continue;
12862       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12863         continue;
12864 
12865       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12866           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12867         Inputs.push_back(BinOp.getOperand(i));
12868       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12869                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12870                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12871                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12872                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
12873         BinOps.push_back(BinOp.getOperand(i));
12874       } else {
12875         // We have an input that is not a truncation or another binary
12876         // operation; we'll abort this transformation.
12877         return SDValue();
12878       }
12879     }
12880   }
12881 
12882   // The operands of a select that must be truncated when the select is
12883   // promoted because the operand is actually part of the to-be-promoted set.
12884   DenseMap<SDNode *, EVT> SelectTruncOp[2];
12885 
12886   // Make sure that this is a self-contained cluster of operations (which
12887   // is not quite the same thing as saying that everything has only one
12888   // use).
12889   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12890     if (isa<ConstantSDNode>(Inputs[i]))
12891       continue;
12892 
12893     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12894                               UE = Inputs[i].getNode()->use_end();
12895          UI != UE; ++UI) {
12896       SDNode *User = *UI;
12897       if (User != N && !Visited.count(User))
12898         return SDValue();
12899 
12900       // If we're going to promote the non-output-value operand(s) or SELECT or
12901       // SELECT_CC, record them for truncation.
12902       if (User->getOpcode() == ISD::SELECT) {
12903         if (User->getOperand(0) == Inputs[i])
12904           SelectTruncOp[0].insert(std::make_pair(User,
12905                                     User->getOperand(0).getValueType()));
12906       } else if (User->getOpcode() == ISD::SELECT_CC) {
12907         if (User->getOperand(0) == Inputs[i])
12908           SelectTruncOp[0].insert(std::make_pair(User,
12909                                     User->getOperand(0).getValueType()));
12910         if (User->getOperand(1) == Inputs[i])
12911           SelectTruncOp[1].insert(std::make_pair(User,
12912                                     User->getOperand(1).getValueType()));
12913       }
12914     }
12915   }
12916 
12917   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12918     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12919                               UE = PromOps[i].getNode()->use_end();
12920          UI != UE; ++UI) {
12921       SDNode *User = *UI;
12922       if (User != N && !Visited.count(User))
12923         return SDValue();
12924 
12925       // If we're going to promote the non-output-value operand(s) or SELECT or
12926       // SELECT_CC, record them for truncation.
12927       if (User->getOpcode() == ISD::SELECT) {
12928         if (User->getOperand(0) == PromOps[i])
12929           SelectTruncOp[0].insert(std::make_pair(User,
12930                                     User->getOperand(0).getValueType()));
12931       } else if (User->getOpcode() == ISD::SELECT_CC) {
12932         if (User->getOperand(0) == PromOps[i])
12933           SelectTruncOp[0].insert(std::make_pair(User,
12934                                     User->getOperand(0).getValueType()));
12935         if (User->getOperand(1) == PromOps[i])
12936           SelectTruncOp[1].insert(std::make_pair(User,
12937                                     User->getOperand(1).getValueType()));
12938       }
12939     }
12940   }
12941 
12942   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
12943   bool ReallyNeedsExt = false;
12944   if (N->getOpcode() != ISD::ANY_EXTEND) {
12945     // If all of the inputs are not already sign/zero extended, then
12946     // we'll still need to do that at the end.
12947     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12948       if (isa<ConstantSDNode>(Inputs[i]))
12949         continue;
12950 
12951       unsigned OpBits =
12952         Inputs[i].getOperand(0).getValueSizeInBits();
12953       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
12954 
12955       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
12956            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
12957                                   APInt::getHighBitsSet(OpBits,
12958                                                         OpBits-PromBits))) ||
12959           (N->getOpcode() == ISD::SIGN_EXTEND &&
12960            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
12961              (OpBits-(PromBits-1)))) {
12962         ReallyNeedsExt = true;
12963         break;
12964       }
12965     }
12966   }
12967 
12968   // Replace all inputs, either with the truncation operand, or a
12969   // truncation or extension to the final output type.
12970   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12971     // Constant inputs need to be replaced with the to-be-promoted nodes that
12972     // use them because they might have users outside of the cluster of
12973     // promoted nodes.
12974     if (isa<ConstantSDNode>(Inputs[i]))
12975       continue;
12976 
12977     SDValue InSrc = Inputs[i].getOperand(0);
12978     if (Inputs[i].getValueType() == N->getValueType(0))
12979       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
12980     else if (N->getOpcode() == ISD::SIGN_EXTEND)
12981       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12982         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
12983     else if (N->getOpcode() == ISD::ZERO_EXTEND)
12984       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12985         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
12986     else
12987       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12988         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
12989   }
12990 
12991   std::list<HandleSDNode> PromOpHandles;
12992   for (auto &PromOp : PromOps)
12993     PromOpHandles.emplace_back(PromOp);
12994 
12995   // Replace all operations (these are all the same, but have a different
12996   // (promoted) return type). DAG.getNode will validate that the types of
12997   // a binary operator match, so go through the list in reverse so that
12998   // we've likely promoted both operands first.
12999   while (!PromOpHandles.empty()) {
13000     SDValue PromOp = PromOpHandles.back().getValue();
13001     PromOpHandles.pop_back();
13002 
13003     unsigned C;
13004     switch (PromOp.getOpcode()) {
13005     default:             C = 0; break;
13006     case ISD::SELECT:    C = 1; break;
13007     case ISD::SELECT_CC: C = 2; break;
13008     }
13009 
13010     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13011          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13012         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13013          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13014       // The to-be-promoted operands of this node have not yet been
13015       // promoted (this should be rare because we're going through the
13016       // list backward, but if one of the operands has several users in
13017       // this cluster of to-be-promoted nodes, it is possible).
13018       PromOpHandles.emplace_front(PromOp);
13019       continue;
13020     }
13021 
13022     // For SELECT and SELECT_CC nodes, we do a similar check for any
13023     // to-be-promoted comparison inputs.
13024     if (PromOp.getOpcode() == ISD::SELECT ||
13025         PromOp.getOpcode() == ISD::SELECT_CC) {
13026       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13027            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13028           (SelectTruncOp[1].count(PromOp.getNode()) &&
13029            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13030         PromOpHandles.emplace_front(PromOp);
13031         continue;
13032       }
13033     }
13034 
13035     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13036                                 PromOp.getNode()->op_end());
13037 
13038     // If this node has constant inputs, then they'll need to be promoted here.
13039     for (unsigned i = 0; i < 2; ++i) {
13040       if (!isa<ConstantSDNode>(Ops[C+i]))
13041         continue;
13042       if (Ops[C+i].getValueType() == N->getValueType(0))
13043         continue;
13044 
13045       if (N->getOpcode() == ISD::SIGN_EXTEND)
13046         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13047       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13048         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13049       else
13050         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13051     }
13052 
13053     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13054     // truncate them again to the original value type.
13055     if (PromOp.getOpcode() == ISD::SELECT ||
13056         PromOp.getOpcode() == ISD::SELECT_CC) {
13057       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13058       if (SI0 != SelectTruncOp[0].end())
13059         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13060       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13061       if (SI1 != SelectTruncOp[1].end())
13062         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13063     }
13064 
13065     DAG.ReplaceAllUsesOfValueWith(PromOp,
13066       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13067   }
13068 
13069   // Now we're left with the initial extension itself.
13070   if (!ReallyNeedsExt)
13071     return N->getOperand(0);
13072 
13073   // To zero extend, just mask off everything except for the first bit (in the
13074   // i1 case).
13075   if (N->getOpcode() == ISD::ZERO_EXTEND)
13076     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13077                        DAG.getConstant(APInt::getLowBitsSet(
13078                                          N->getValueSizeInBits(0), PromBits),
13079                                        dl, N->getValueType(0)));
13080 
13081   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13082          "Invalid extension type");
13083   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13084   SDValue ShiftCst =
13085       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13086   return DAG.getNode(
13087       ISD::SRA, dl, N->getValueType(0),
13088       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13089       ShiftCst);
13090 }
13091 
13092 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13093                                         DAGCombinerInfo &DCI) const {
13094   assert(N->getOpcode() == ISD::SETCC &&
13095          "Should be called with a SETCC node");
13096 
13097   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13098   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13099     SDValue LHS = N->getOperand(0);
13100     SDValue RHS = N->getOperand(1);
13101 
13102     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13103     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13104         LHS.hasOneUse())
13105       std::swap(LHS, RHS);
13106 
13107     // x == 0-y --> x+y == 0
13108     // x != 0-y --> x+y != 0
13109     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13110         RHS.hasOneUse()) {
13111       SDLoc DL(N);
13112       SelectionDAG &DAG = DCI.DAG;
13113       EVT VT = N->getValueType(0);
13114       EVT OpVT = LHS.getValueType();
13115       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13116       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13117     }
13118   }
13119 
13120   return DAGCombineTruncBoolExt(N, DCI);
13121 }
13122 
13123 // Is this an extending load from an f32 to an f64?
13124 static bool isFPExtLoad(SDValue Op) {
13125   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13126     return LD->getExtensionType() == ISD::EXTLOAD &&
13127       Op.getValueType() == MVT::f64;
13128   return false;
13129 }
13130 
13131 /// Reduces the number of fp-to-int conversion when building a vector.
13132 ///
13133 /// If this vector is built out of floating to integer conversions,
13134 /// transform it to a vector built out of floating point values followed by a
13135 /// single floating to integer conversion of the vector.
13136 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13137 /// becomes (fptosi (build_vector ($A, $B, ...)))
13138 SDValue PPCTargetLowering::
13139 combineElementTruncationToVectorTruncation(SDNode *N,
13140                                            DAGCombinerInfo &DCI) const {
13141   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13142          "Should be called with a BUILD_VECTOR node");
13143 
13144   SelectionDAG &DAG = DCI.DAG;
13145   SDLoc dl(N);
13146 
13147   SDValue FirstInput = N->getOperand(0);
13148   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13149          "The input operand must be an fp-to-int conversion.");
13150 
13151   // This combine happens after legalization so the fp_to_[su]i nodes are
13152   // already converted to PPCSISD nodes.
13153   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13154   if (FirstConversion == PPCISD::FCTIDZ ||
13155       FirstConversion == PPCISD::FCTIDUZ ||
13156       FirstConversion == PPCISD::FCTIWZ ||
13157       FirstConversion == PPCISD::FCTIWUZ) {
13158     bool IsSplat = true;
13159     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13160       FirstConversion == PPCISD::FCTIWUZ;
13161     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13162     SmallVector<SDValue, 4> Ops;
13163     EVT TargetVT = N->getValueType(0);
13164     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13165       SDValue NextOp = N->getOperand(i);
13166       if (NextOp.getOpcode() != PPCISD::MFVSR)
13167         return SDValue();
13168       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13169       if (NextConversion != FirstConversion)
13170         return SDValue();
13171       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13172       // This is not valid if the input was originally double precision. It is
13173       // also not profitable to do unless this is an extending load in which
13174       // case doing this combine will allow us to combine consecutive loads.
13175       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13176         return SDValue();
13177       if (N->getOperand(i) != FirstInput)
13178         IsSplat = false;
13179     }
13180 
13181     // If this is a splat, we leave it as-is since there will be only a single
13182     // fp-to-int conversion followed by a splat of the integer. This is better
13183     // for 32-bit and smaller ints and neutral for 64-bit ints.
13184     if (IsSplat)
13185       return SDValue();
13186 
13187     // Now that we know we have the right type of node, get its operands
13188     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13189       SDValue In = N->getOperand(i).getOperand(0);
13190       if (Is32Bit) {
13191         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13192         // here, we know that all inputs are extending loads so this is safe).
13193         if (In.isUndef())
13194           Ops.push_back(DAG.getUNDEF(SrcVT));
13195         else {
13196           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13197                                       MVT::f32, In.getOperand(0),
13198                                       DAG.getIntPtrConstant(1, dl));
13199           Ops.push_back(Trunc);
13200         }
13201       } else
13202         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13203     }
13204 
13205     unsigned Opcode;
13206     if (FirstConversion == PPCISD::FCTIDZ ||
13207         FirstConversion == PPCISD::FCTIWZ)
13208       Opcode = ISD::FP_TO_SINT;
13209     else
13210       Opcode = ISD::FP_TO_UINT;
13211 
13212     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13213     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13214     return DAG.getNode(Opcode, dl, TargetVT, BV);
13215   }
13216   return SDValue();
13217 }
13218 
13219 /// Reduce the number of loads when building a vector.
13220 ///
13221 /// Building a vector out of multiple loads can be converted to a load
13222 /// of the vector type if the loads are consecutive. If the loads are
13223 /// consecutive but in descending order, a shuffle is added at the end
13224 /// to reorder the vector.
13225 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13226   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13227          "Should be called with a BUILD_VECTOR node");
13228 
13229   SDLoc dl(N);
13230 
13231   // Return early for non byte-sized type, as they can't be consecutive.
13232   if (!N->getValueType(0).getVectorElementType().isByteSized())
13233     return SDValue();
13234 
13235   bool InputsAreConsecutiveLoads = true;
13236   bool InputsAreReverseConsecutive = true;
13237   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13238   SDValue FirstInput = N->getOperand(0);
13239   bool IsRoundOfExtLoad = false;
13240 
13241   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13242       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13243     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13244     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13245   }
13246   // Not a build vector of (possibly fp_rounded) loads.
13247   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13248       N->getNumOperands() == 1)
13249     return SDValue();
13250 
13251   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13252     // If any inputs are fp_round(extload), they all must be.
13253     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13254       return SDValue();
13255 
13256     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13257       N->getOperand(i);
13258     if (NextInput.getOpcode() != ISD::LOAD)
13259       return SDValue();
13260 
13261     SDValue PreviousInput =
13262       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13263     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13264     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13265 
13266     // If any inputs are fp_round(extload), they all must be.
13267     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13268       return SDValue();
13269 
13270     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13271       InputsAreConsecutiveLoads = false;
13272     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13273       InputsAreReverseConsecutive = false;
13274 
13275     // Exit early if the loads are neither consecutive nor reverse consecutive.
13276     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13277       return SDValue();
13278   }
13279 
13280   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13281          "The loads cannot be both consecutive and reverse consecutive.");
13282 
13283   SDValue FirstLoadOp =
13284     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13285   SDValue LastLoadOp =
13286     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13287                        N->getOperand(N->getNumOperands()-1);
13288 
13289   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13290   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13291   if (InputsAreConsecutiveLoads) {
13292     assert(LD1 && "Input needs to be a LoadSDNode.");
13293     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13294                        LD1->getBasePtr(), LD1->getPointerInfo(),
13295                        LD1->getAlignment());
13296   }
13297   if (InputsAreReverseConsecutive) {
13298     assert(LDL && "Input needs to be a LoadSDNode.");
13299     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13300                                LDL->getBasePtr(), LDL->getPointerInfo(),
13301                                LDL->getAlignment());
13302     SmallVector<int, 16> Ops;
13303     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13304       Ops.push_back(i);
13305 
13306     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13307                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13308   }
13309   return SDValue();
13310 }
13311 
13312 // This function adds the required vector_shuffle needed to get
13313 // the elements of the vector extract in the correct position
13314 // as specified by the CorrectElems encoding.
13315 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13316                                       SDValue Input, uint64_t Elems,
13317                                       uint64_t CorrectElems) {
13318   SDLoc dl(N);
13319 
13320   unsigned NumElems = Input.getValueType().getVectorNumElements();
13321   SmallVector<int, 16> ShuffleMask(NumElems, -1);
13322 
13323   // Knowing the element indices being extracted from the original
13324   // vector and the order in which they're being inserted, just put
13325   // them at element indices required for the instruction.
13326   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13327     if (DAG.getDataLayout().isLittleEndian())
13328       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13329     else
13330       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13331     CorrectElems = CorrectElems >> 8;
13332     Elems = Elems >> 8;
13333   }
13334 
13335   SDValue Shuffle =
13336       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13337                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13338 
13339   EVT VT = N->getValueType(0);
13340   SDValue Conv = DAG.getBitcast(VT, Shuffle);
13341 
13342   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13343                                Input.getValueType().getVectorElementType(),
13344                                VT.getVectorNumElements());
13345   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13346                      DAG.getValueType(ExtVT));
13347 }
13348 
13349 // Look for build vector patterns where input operands come from sign
13350 // extended vector_extract elements of specific indices. If the correct indices
13351 // aren't used, add a vector shuffle to fix up the indices and create
13352 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13353 // during instruction selection.
13354 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13355   // This array encodes the indices that the vector sign extend instructions
13356   // extract from when extending from one type to another for both BE and LE.
13357   // The right nibble of each byte corresponds to the LE incides.
13358   // and the left nibble of each byte corresponds to the BE incides.
13359   // For example: 0x3074B8FC  byte->word
13360   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13361   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13362   // For example: 0x000070F8  byte->double word
13363   // For LE: the allowed indices are: 0x0,0x8
13364   // For BE: the allowed indices are: 0x7,0xF
13365   uint64_t TargetElems[] = {
13366       0x3074B8FC, // b->w
13367       0x000070F8, // b->d
13368       0x10325476, // h->w
13369       0x00003074, // h->d
13370       0x00001032, // w->d
13371   };
13372 
13373   uint64_t Elems = 0;
13374   int Index;
13375   SDValue Input;
13376 
13377   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13378     if (!Op)
13379       return false;
13380     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13381         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13382       return false;
13383 
13384     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13385     // of the right width.
13386     SDValue Extract = Op.getOperand(0);
13387     if (Extract.getOpcode() == ISD::ANY_EXTEND)
13388       Extract = Extract.getOperand(0);
13389     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13390       return false;
13391 
13392     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13393     if (!ExtOp)
13394       return false;
13395 
13396     Index = ExtOp->getZExtValue();
13397     if (Input && Input != Extract.getOperand(0))
13398       return false;
13399 
13400     if (!Input)
13401       Input = Extract.getOperand(0);
13402 
13403     Elems = Elems << 8;
13404     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13405     Elems |= Index;
13406 
13407     return true;
13408   };
13409 
13410   // If the build vector operands aren't sign extended vector extracts,
13411   // of the same input vector, then return.
13412   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13413     if (!isSExtOfVecExtract(N->getOperand(i))) {
13414       return SDValue();
13415     }
13416   }
13417 
13418   // If the vector extract indicies are not correct, add the appropriate
13419   // vector_shuffle.
13420   int TgtElemArrayIdx;
13421   int InputSize = Input.getValueType().getScalarSizeInBits();
13422   int OutputSize = N->getValueType(0).getScalarSizeInBits();
13423   if (InputSize + OutputSize == 40)
13424     TgtElemArrayIdx = 0;
13425   else if (InputSize + OutputSize == 72)
13426     TgtElemArrayIdx = 1;
13427   else if (InputSize + OutputSize == 48)
13428     TgtElemArrayIdx = 2;
13429   else if (InputSize + OutputSize == 80)
13430     TgtElemArrayIdx = 3;
13431   else if (InputSize + OutputSize == 96)
13432     TgtElemArrayIdx = 4;
13433   else
13434     return SDValue();
13435 
13436   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13437   CorrectElems = DAG.getDataLayout().isLittleEndian()
13438                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13439                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13440   if (Elems != CorrectElems) {
13441     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13442   }
13443 
13444   // Regular lowering will catch cases where a shuffle is not needed.
13445   return SDValue();
13446 }
13447 
13448 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13449                                                  DAGCombinerInfo &DCI) const {
13450   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13451          "Should be called with a BUILD_VECTOR node");
13452 
13453   SelectionDAG &DAG = DCI.DAG;
13454   SDLoc dl(N);
13455 
13456   if (!Subtarget.hasVSX())
13457     return SDValue();
13458 
13459   // The target independent DAG combiner will leave a build_vector of
13460   // float-to-int conversions intact. We can generate MUCH better code for
13461   // a float-to-int conversion of a vector of floats.
13462   SDValue FirstInput = N->getOperand(0);
13463   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13464     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13465     if (Reduced)
13466       return Reduced;
13467   }
13468 
13469   // If we're building a vector out of consecutive loads, just load that
13470   // vector type.
13471   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13472   if (Reduced)
13473     return Reduced;
13474 
13475   // If we're building a vector out of extended elements from another vector
13476   // we have P9 vector integer extend instructions. The code assumes legal
13477   // input types (i.e. it can't handle things like v4i16) so do not run before
13478   // legalization.
13479   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13480     Reduced = combineBVOfVecSExt(N, DAG);
13481     if (Reduced)
13482       return Reduced;
13483   }
13484 
13485 
13486   if (N->getValueType(0) != MVT::v2f64)
13487     return SDValue();
13488 
13489   // Looking for:
13490   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13491   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13492       FirstInput.getOpcode() != ISD::UINT_TO_FP)
13493     return SDValue();
13494   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13495       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13496     return SDValue();
13497   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13498     return SDValue();
13499 
13500   SDValue Ext1 = FirstInput.getOperand(0);
13501   SDValue Ext2 = N->getOperand(1).getOperand(0);
13502   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13503      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13504     return SDValue();
13505 
13506   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13507   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13508   if (!Ext1Op || !Ext2Op)
13509     return SDValue();
13510   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13511       Ext1.getOperand(0) != Ext2.getOperand(0))
13512     return SDValue();
13513 
13514   int FirstElem = Ext1Op->getZExtValue();
13515   int SecondElem = Ext2Op->getZExtValue();
13516   int SubvecIdx;
13517   if (FirstElem == 0 && SecondElem == 1)
13518     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13519   else if (FirstElem == 2 && SecondElem == 3)
13520     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13521   else
13522     return SDValue();
13523 
13524   SDValue SrcVec = Ext1.getOperand(0);
13525   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13526     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13527   return DAG.getNode(NodeType, dl, MVT::v2f64,
13528                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13529 }
13530 
13531 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13532                                               DAGCombinerInfo &DCI) const {
13533   assert((N->getOpcode() == ISD::SINT_TO_FP ||
13534           N->getOpcode() == ISD::UINT_TO_FP) &&
13535          "Need an int -> FP conversion node here");
13536 
13537   if (useSoftFloat() || !Subtarget.has64BitSupport())
13538     return SDValue();
13539 
13540   SelectionDAG &DAG = DCI.DAG;
13541   SDLoc dl(N);
13542   SDValue Op(N, 0);
13543 
13544   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13545   // from the hardware.
13546   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13547     return SDValue();
13548   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13549       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13550     return SDValue();
13551 
13552   SDValue FirstOperand(Op.getOperand(0));
13553   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13554     (FirstOperand.getValueType() == MVT::i8 ||
13555      FirstOperand.getValueType() == MVT::i16);
13556   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
13557     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
13558     bool DstDouble = Op.getValueType() == MVT::f64;
13559     unsigned ConvOp = Signed ?
13560       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
13561       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
13562     SDValue WidthConst =
13563       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
13564                             dl, false);
13565     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
13566     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
13567     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
13568                                          DAG.getVTList(MVT::f64, MVT::Other),
13569                                          Ops, MVT::i8, LDN->getMemOperand());
13570 
13571     // For signed conversion, we need to sign-extend the value in the VSR
13572     if (Signed) {
13573       SDValue ExtOps[] = { Ld, WidthConst };
13574       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
13575       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
13576     } else
13577       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
13578   }
13579 
13580 
13581   // For i32 intermediate values, unfortunately, the conversion functions
13582   // leave the upper 32 bits of the value are undefined. Within the set of
13583   // scalar instructions, we have no method for zero- or sign-extending the
13584   // value. Thus, we cannot handle i32 intermediate values here.
13585   if (Op.getOperand(0).getValueType() == MVT::i32)
13586     return SDValue();
13587 
13588   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
13589          "UINT_TO_FP is supported only with FPCVT");
13590 
13591   // If we have FCFIDS, then use it when converting to single-precision.
13592   // Otherwise, convert to double-precision and then round.
13593   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13594                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
13595                                                             : PPCISD::FCFIDS)
13596                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
13597                                                             : PPCISD::FCFID);
13598   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13599                   ? MVT::f32
13600                   : MVT::f64;
13601 
13602   // If we're converting from a float, to an int, and back to a float again,
13603   // then we don't need the store/load pair at all.
13604   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
13605        Subtarget.hasFPCVT()) ||
13606       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
13607     SDValue Src = Op.getOperand(0).getOperand(0);
13608     if (Src.getValueType() == MVT::f32) {
13609       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
13610       DCI.AddToWorklist(Src.getNode());
13611     } else if (Src.getValueType() != MVT::f64) {
13612       // Make sure that we don't pick up a ppc_fp128 source value.
13613       return SDValue();
13614     }
13615 
13616     unsigned FCTOp =
13617       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
13618                                                         PPCISD::FCTIDUZ;
13619 
13620     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
13621     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
13622 
13623     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
13624       FP = DAG.getNode(ISD::FP_ROUND, dl,
13625                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
13626       DCI.AddToWorklist(FP.getNode());
13627     }
13628 
13629     return FP;
13630   }
13631 
13632   return SDValue();
13633 }
13634 
13635 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
13636 // builtins) into loads with swaps.
13637 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
13638                                               DAGCombinerInfo &DCI) const {
13639   SelectionDAG &DAG = DCI.DAG;
13640   SDLoc dl(N);
13641   SDValue Chain;
13642   SDValue Base;
13643   MachineMemOperand *MMO;
13644 
13645   switch (N->getOpcode()) {
13646   default:
13647     llvm_unreachable("Unexpected opcode for little endian VSX load");
13648   case ISD::LOAD: {
13649     LoadSDNode *LD = cast<LoadSDNode>(N);
13650     Chain = LD->getChain();
13651     Base = LD->getBasePtr();
13652     MMO = LD->getMemOperand();
13653     // If the MMO suggests this isn't a load of a full vector, leave
13654     // things alone.  For a built-in, we have to make the change for
13655     // correctness, so if there is a size problem that will be a bug.
13656     if (MMO->getSize() < 16)
13657       return SDValue();
13658     break;
13659   }
13660   case ISD::INTRINSIC_W_CHAIN: {
13661     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13662     Chain = Intrin->getChain();
13663     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
13664     // us what we want. Get operand 2 instead.
13665     Base = Intrin->getOperand(2);
13666     MMO = Intrin->getMemOperand();
13667     break;
13668   }
13669   }
13670 
13671   MVT VecTy = N->getValueType(0).getSimpleVT();
13672 
13673   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
13674   // aligned and the type is a vector with elements up to 4 bytes
13675   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13676       VecTy.getScalarSizeInBits() <= 32) {
13677     return SDValue();
13678   }
13679 
13680   SDValue LoadOps[] = { Chain, Base };
13681   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
13682                                          DAG.getVTList(MVT::v2f64, MVT::Other),
13683                                          LoadOps, MVT::v2f64, MMO);
13684 
13685   DCI.AddToWorklist(Load.getNode());
13686   Chain = Load.getValue(1);
13687   SDValue Swap = DAG.getNode(
13688       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
13689   DCI.AddToWorklist(Swap.getNode());
13690 
13691   // Add a bitcast if the resulting load type doesn't match v2f64.
13692   if (VecTy != MVT::v2f64) {
13693     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
13694     DCI.AddToWorklist(N.getNode());
13695     // Package {bitcast value, swap's chain} to match Load's shape.
13696     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
13697                        N, Swap.getValue(1));
13698   }
13699 
13700   return Swap;
13701 }
13702 
13703 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
13704 // builtins) into stores with swaps.
13705 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
13706                                                DAGCombinerInfo &DCI) const {
13707   SelectionDAG &DAG = DCI.DAG;
13708   SDLoc dl(N);
13709   SDValue Chain;
13710   SDValue Base;
13711   unsigned SrcOpnd;
13712   MachineMemOperand *MMO;
13713 
13714   switch (N->getOpcode()) {
13715   default:
13716     llvm_unreachable("Unexpected opcode for little endian VSX store");
13717   case ISD::STORE: {
13718     StoreSDNode *ST = cast<StoreSDNode>(N);
13719     Chain = ST->getChain();
13720     Base = ST->getBasePtr();
13721     MMO = ST->getMemOperand();
13722     SrcOpnd = 1;
13723     // If the MMO suggests this isn't a store of a full vector, leave
13724     // things alone.  For a built-in, we have to make the change for
13725     // correctness, so if there is a size problem that will be a bug.
13726     if (MMO->getSize() < 16)
13727       return SDValue();
13728     break;
13729   }
13730   case ISD::INTRINSIC_VOID: {
13731     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13732     Chain = Intrin->getChain();
13733     // Intrin->getBasePtr() oddly does not get what we want.
13734     Base = Intrin->getOperand(3);
13735     MMO = Intrin->getMemOperand();
13736     SrcOpnd = 2;
13737     break;
13738   }
13739   }
13740 
13741   SDValue Src = N->getOperand(SrcOpnd);
13742   MVT VecTy = Src.getValueType().getSimpleVT();
13743 
13744   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
13745   // aligned and the type is a vector with elements up to 4 bytes
13746   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13747       VecTy.getScalarSizeInBits() <= 32) {
13748     return SDValue();
13749   }
13750 
13751   // All stores are done as v2f64 and possible bit cast.
13752   if (VecTy != MVT::v2f64) {
13753     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
13754     DCI.AddToWorklist(Src.getNode());
13755   }
13756 
13757   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
13758                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
13759   DCI.AddToWorklist(Swap.getNode());
13760   Chain = Swap.getValue(1);
13761   SDValue StoreOps[] = { Chain, Swap, Base };
13762   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
13763                                           DAG.getVTList(MVT::Other),
13764                                           StoreOps, VecTy, MMO);
13765   DCI.AddToWorklist(Store.getNode());
13766   return Store;
13767 }
13768 
13769 // Handle DAG combine for STORE (FP_TO_INT F).
13770 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
13771                                                DAGCombinerInfo &DCI) const {
13772 
13773   SelectionDAG &DAG = DCI.DAG;
13774   SDLoc dl(N);
13775   unsigned Opcode = N->getOperand(1).getOpcode();
13776 
13777   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
13778          && "Not a FP_TO_INT Instruction!");
13779 
13780   SDValue Val = N->getOperand(1).getOperand(0);
13781   EVT Op1VT = N->getOperand(1).getValueType();
13782   EVT ResVT = Val.getValueType();
13783 
13784   // Floating point types smaller than 32 bits are not legal on Power.
13785   if (ResVT.getScalarSizeInBits() < 32)
13786     return SDValue();
13787 
13788   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
13789   bool ValidTypeForStoreFltAsInt =
13790         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
13791          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
13792 
13793   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
13794       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
13795     return SDValue();
13796 
13797   // Extend f32 values to f64
13798   if (ResVT.getScalarSizeInBits() == 32) {
13799     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
13800     DCI.AddToWorklist(Val.getNode());
13801   }
13802 
13803   // Set signed or unsigned conversion opcode.
13804   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
13805                           PPCISD::FP_TO_SINT_IN_VSR :
13806                           PPCISD::FP_TO_UINT_IN_VSR;
13807 
13808   Val = DAG.getNode(ConvOpcode,
13809                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
13810   DCI.AddToWorklist(Val.getNode());
13811 
13812   // Set number of bytes being converted.
13813   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
13814   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
13815                     DAG.getIntPtrConstant(ByteSize, dl, false),
13816                     DAG.getValueType(Op1VT) };
13817 
13818   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
13819           DAG.getVTList(MVT::Other), Ops,
13820           cast<StoreSDNode>(N)->getMemoryVT(),
13821           cast<StoreSDNode>(N)->getMemOperand());
13822 
13823   DCI.AddToWorklist(Val.getNode());
13824   return Val;
13825 }
13826 
13827 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
13828                                                 LSBaseSDNode *LSBase,
13829                                                 DAGCombinerInfo &DCI) const {
13830   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
13831         "Not a reverse memop pattern!");
13832 
13833   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
13834     auto Mask = SVN->getMask();
13835     int i = 0;
13836     auto I = Mask.rbegin();
13837     auto E = Mask.rend();
13838 
13839     for (; I != E; ++I) {
13840       if (*I != i)
13841         return false;
13842       i++;
13843     }
13844     return true;
13845   };
13846 
13847   SelectionDAG &DAG = DCI.DAG;
13848   EVT VT = SVN->getValueType(0);
13849 
13850   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
13851     return SDValue();
13852 
13853   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
13854   // See comment in PPCVSXSwapRemoval.cpp.
13855   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
13856   if (!Subtarget.hasP9Vector())
13857     return SDValue();
13858 
13859   if(!IsElementReverse(SVN))
13860     return SDValue();
13861 
13862   if (LSBase->getOpcode() == ISD::LOAD) {
13863     SDLoc dl(SVN);
13864     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
13865     return DAG.getMemIntrinsicNode(
13866         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
13867         LSBase->getMemoryVT(), LSBase->getMemOperand());
13868   }
13869 
13870   if (LSBase->getOpcode() == ISD::STORE) {
13871     SDLoc dl(LSBase);
13872     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
13873                           LSBase->getBasePtr()};
13874     return DAG.getMemIntrinsicNode(
13875         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
13876         LSBase->getMemoryVT(), LSBase->getMemOperand());
13877   }
13878 
13879   llvm_unreachable("Expected a load or store node here");
13880 }
13881 
13882 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
13883                                              DAGCombinerInfo &DCI) const {
13884   SelectionDAG &DAG = DCI.DAG;
13885   SDLoc dl(N);
13886   switch (N->getOpcode()) {
13887   default: break;
13888   case ISD::ADD:
13889     return combineADD(N, DCI);
13890   case ISD::SHL:
13891     return combineSHL(N, DCI);
13892   case ISD::SRA:
13893     return combineSRA(N, DCI);
13894   case ISD::SRL:
13895     return combineSRL(N, DCI);
13896   case ISD::MUL:
13897     return combineMUL(N, DCI);
13898   case PPCISD::SHL:
13899     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
13900         return N->getOperand(0);
13901     break;
13902   case PPCISD::SRL:
13903     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
13904         return N->getOperand(0);
13905     break;
13906   case PPCISD::SRA:
13907     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
13908       if (C->isNullValue() ||   //  0 >>s V -> 0.
13909           C->isAllOnesValue())    // -1 >>s V -> -1.
13910         return N->getOperand(0);
13911     }
13912     break;
13913   case ISD::SIGN_EXTEND:
13914   case ISD::ZERO_EXTEND:
13915   case ISD::ANY_EXTEND:
13916     return DAGCombineExtBoolTrunc(N, DCI);
13917   case ISD::TRUNCATE:
13918     return combineTRUNCATE(N, DCI);
13919   case ISD::SETCC:
13920     if (SDValue CSCC = combineSetCC(N, DCI))
13921       return CSCC;
13922     LLVM_FALLTHROUGH;
13923   case ISD::SELECT_CC:
13924     return DAGCombineTruncBoolExt(N, DCI);
13925   case ISD::SINT_TO_FP:
13926   case ISD::UINT_TO_FP:
13927     return combineFPToIntToFP(N, DCI);
13928   case ISD::VECTOR_SHUFFLE:
13929     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
13930       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
13931       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
13932     }
13933     break;
13934   case ISD::STORE: {
13935 
13936     EVT Op1VT = N->getOperand(1).getValueType();
13937     unsigned Opcode = N->getOperand(1).getOpcode();
13938 
13939     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
13940       SDValue Val= combineStoreFPToInt(N, DCI);
13941       if (Val)
13942         return Val;
13943     }
13944 
13945     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
13946       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
13947       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
13948       if (Val)
13949         return Val;
13950     }
13951 
13952     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
13953     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
13954         N->getOperand(1).getNode()->hasOneUse() &&
13955         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
13956          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
13957 
13958       // STBRX can only handle simple types and it makes no sense to store less
13959       // two bytes in byte-reversed order.
13960       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
13961       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
13962         break;
13963 
13964       SDValue BSwapOp = N->getOperand(1).getOperand(0);
13965       // Do an any-extend to 32-bits if this is a half-word input.
13966       if (BSwapOp.getValueType() == MVT::i16)
13967         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
13968 
13969       // If the type of BSWAP operand is wider than stored memory width
13970       // it need to be shifted to the right side before STBRX.
13971       if (Op1VT.bitsGT(mVT)) {
13972         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
13973         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
13974                               DAG.getConstant(Shift, dl, MVT::i32));
13975         // Need to truncate if this is a bswap of i64 stored as i32/i16.
13976         if (Op1VT == MVT::i64)
13977           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
13978       }
13979 
13980       SDValue Ops[] = {
13981         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
13982       };
13983       return
13984         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
13985                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
13986                                 cast<StoreSDNode>(N)->getMemOperand());
13987     }
13988 
13989     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
13990     // So it can increase the chance of CSE constant construction.
13991     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
13992         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
13993       // Need to sign-extended to 64-bits to handle negative values.
13994       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
13995       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
13996                                     MemVT.getSizeInBits());
13997       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
13998 
13999       // DAG.getTruncStore() can't be used here because it doesn't accept
14000       // the general (base + offset) addressing mode.
14001       // So we use UpdateNodeOperands and setTruncatingStore instead.
14002       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14003                              N->getOperand(3));
14004       cast<StoreSDNode>(N)->setTruncatingStore(true);
14005       return SDValue(N, 0);
14006     }
14007 
14008     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14009     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14010     if (Op1VT.isSimple()) {
14011       MVT StoreVT = Op1VT.getSimpleVT();
14012       if (Subtarget.needsSwapsForVSXMemOps() &&
14013           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14014            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14015         return expandVSXStoreForLE(N, DCI);
14016     }
14017     break;
14018   }
14019   case ISD::LOAD: {
14020     LoadSDNode *LD = cast<LoadSDNode>(N);
14021     EVT VT = LD->getValueType(0);
14022 
14023     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14024     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14025     if (VT.isSimple()) {
14026       MVT LoadVT = VT.getSimpleVT();
14027       if (Subtarget.needsSwapsForVSXMemOps() &&
14028           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14029            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14030         return expandVSXLoadForLE(N, DCI);
14031     }
14032 
14033     // We sometimes end up with a 64-bit integer load, from which we extract
14034     // two single-precision floating-point numbers. This happens with
14035     // std::complex<float>, and other similar structures, because of the way we
14036     // canonicalize structure copies. However, if we lack direct moves,
14037     // then the final bitcasts from the extracted integer values to the
14038     // floating-point numbers turn into store/load pairs. Even with direct moves,
14039     // just loading the two floating-point numbers is likely better.
14040     auto ReplaceTwoFloatLoad = [&]() {
14041       if (VT != MVT::i64)
14042         return false;
14043 
14044       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14045           LD->isVolatile())
14046         return false;
14047 
14048       //  We're looking for a sequence like this:
14049       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14050       //      t16: i64 = srl t13, Constant:i32<32>
14051       //    t17: i32 = truncate t16
14052       //  t18: f32 = bitcast t17
14053       //    t19: i32 = truncate t13
14054       //  t20: f32 = bitcast t19
14055 
14056       if (!LD->hasNUsesOfValue(2, 0))
14057         return false;
14058 
14059       auto UI = LD->use_begin();
14060       while (UI.getUse().getResNo() != 0) ++UI;
14061       SDNode *Trunc = *UI++;
14062       while (UI.getUse().getResNo() != 0) ++UI;
14063       SDNode *RightShift = *UI;
14064       if (Trunc->getOpcode() != ISD::TRUNCATE)
14065         std::swap(Trunc, RightShift);
14066 
14067       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14068           Trunc->getValueType(0) != MVT::i32 ||
14069           !Trunc->hasOneUse())
14070         return false;
14071       if (RightShift->getOpcode() != ISD::SRL ||
14072           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14073           RightShift->getConstantOperandVal(1) != 32 ||
14074           !RightShift->hasOneUse())
14075         return false;
14076 
14077       SDNode *Trunc2 = *RightShift->use_begin();
14078       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14079           Trunc2->getValueType(0) != MVT::i32 ||
14080           !Trunc2->hasOneUse())
14081         return false;
14082 
14083       SDNode *Bitcast = *Trunc->use_begin();
14084       SDNode *Bitcast2 = *Trunc2->use_begin();
14085 
14086       if (Bitcast->getOpcode() != ISD::BITCAST ||
14087           Bitcast->getValueType(0) != MVT::f32)
14088         return false;
14089       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14090           Bitcast2->getValueType(0) != MVT::f32)
14091         return false;
14092 
14093       if (Subtarget.isLittleEndian())
14094         std::swap(Bitcast, Bitcast2);
14095 
14096       // Bitcast has the second float (in memory-layout order) and Bitcast2
14097       // has the first one.
14098 
14099       SDValue BasePtr = LD->getBasePtr();
14100       if (LD->isIndexed()) {
14101         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14102                "Non-pre-inc AM on PPC?");
14103         BasePtr =
14104           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14105                       LD->getOffset());
14106       }
14107 
14108       auto MMOFlags =
14109           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14110       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14111                                       LD->getPointerInfo(), LD->getAlignment(),
14112                                       MMOFlags, LD->getAAInfo());
14113       SDValue AddPtr =
14114         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14115                     BasePtr, DAG.getIntPtrConstant(4, dl));
14116       SDValue FloatLoad2 = DAG.getLoad(
14117           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14118           LD->getPointerInfo().getWithOffset(4),
14119           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14120 
14121       if (LD->isIndexed()) {
14122         // Note that DAGCombine should re-form any pre-increment load(s) from
14123         // what is produced here if that makes sense.
14124         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14125       }
14126 
14127       DCI.CombineTo(Bitcast2, FloatLoad);
14128       DCI.CombineTo(Bitcast, FloatLoad2);
14129 
14130       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14131                                     SDValue(FloatLoad2.getNode(), 1));
14132       return true;
14133     };
14134 
14135     if (ReplaceTwoFloatLoad())
14136       return SDValue(N, 0);
14137 
14138     EVT MemVT = LD->getMemoryVT();
14139     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
14140     unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
14141     Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
14142     unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
14143     if (LD->isUnindexed() && VT.isVector() &&
14144         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
14145           // P8 and later hardware should just use LOAD.
14146           !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 ||
14147                                        VT == MVT::v4i32 || VT == MVT::v4f32)) ||
14148          (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
14149           LD->getAlignment() >= ScalarABIAlignment)) &&
14150         LD->getAlignment() < ABIAlignment) {
14151       // This is a type-legal unaligned Altivec or QPX load.
14152       SDValue Chain = LD->getChain();
14153       SDValue Ptr = LD->getBasePtr();
14154       bool isLittleEndian = Subtarget.isLittleEndian();
14155 
14156       // This implements the loading of unaligned vectors as described in
14157       // the venerable Apple Velocity Engine overview. Specifically:
14158       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
14159       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
14160       //
14161       // The general idea is to expand a sequence of one or more unaligned
14162       // loads into an alignment-based permutation-control instruction (lvsl
14163       // or lvsr), a series of regular vector loads (which always truncate
14164       // their input address to an aligned address), and a series of
14165       // permutations.  The results of these permutations are the requested
14166       // loaded values.  The trick is that the last "extra" load is not taken
14167       // from the address you might suspect (sizeof(vector) bytes after the
14168       // last requested load), but rather sizeof(vector) - 1 bytes after the
14169       // last requested vector. The point of this is to avoid a page fault if
14170       // the base address happened to be aligned. This works because if the
14171       // base address is aligned, then adding less than a full vector length
14172       // will cause the last vector in the sequence to be (re)loaded.
14173       // Otherwise, the next vector will be fetched as you might suspect was
14174       // necessary.
14175 
14176       // We might be able to reuse the permutation generation from
14177       // a different base address offset from this one by an aligned amount.
14178       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14179       // optimization later.
14180       Intrinsic::ID Intr, IntrLD, IntrPerm;
14181       MVT PermCntlTy, PermTy, LDTy;
14182       if (Subtarget.hasAltivec()) {
14183         Intr = isLittleEndian ?  Intrinsic::ppc_altivec_lvsr :
14184                                  Intrinsic::ppc_altivec_lvsl;
14185         IntrLD = Intrinsic::ppc_altivec_lvx;
14186         IntrPerm = Intrinsic::ppc_altivec_vperm;
14187         PermCntlTy = MVT::v16i8;
14188         PermTy = MVT::v4i32;
14189         LDTy = MVT::v4i32;
14190       } else {
14191         Intr =   MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
14192                                        Intrinsic::ppc_qpx_qvlpcls;
14193         IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
14194                                        Intrinsic::ppc_qpx_qvlfs;
14195         IntrPerm = Intrinsic::ppc_qpx_qvfperm;
14196         PermCntlTy = MVT::v4f64;
14197         PermTy = MVT::v4f64;
14198         LDTy = MemVT.getSimpleVT();
14199       }
14200 
14201       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14202 
14203       // Create the new MMO for the new base load. It is like the original MMO,
14204       // but represents an area in memory almost twice the vector size centered
14205       // on the original address. If the address is unaligned, we might start
14206       // reading up to (sizeof(vector)-1) bytes below the address of the
14207       // original unaligned load.
14208       MachineFunction &MF = DAG.getMachineFunction();
14209       MachineMemOperand *BaseMMO =
14210         MF.getMachineMemOperand(LD->getMemOperand(),
14211                                 -(long)MemVT.getStoreSize()+1,
14212                                 2*MemVT.getStoreSize()-1);
14213 
14214       // Create the new base load.
14215       SDValue LDXIntID =
14216           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14217       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14218       SDValue BaseLoad =
14219         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14220                                 DAG.getVTList(PermTy, MVT::Other),
14221                                 BaseLoadOps, LDTy, BaseMMO);
14222 
14223       // Note that the value of IncOffset (which is provided to the next
14224       // load's pointer info offset value, and thus used to calculate the
14225       // alignment), and the value of IncValue (which is actually used to
14226       // increment the pointer value) are different! This is because we
14227       // require the next load to appear to be aligned, even though it
14228       // is actually offset from the base pointer by a lesser amount.
14229       int IncOffset = VT.getSizeInBits() / 8;
14230       int IncValue = IncOffset;
14231 
14232       // Walk (both up and down) the chain looking for another load at the real
14233       // (aligned) offset (the alignment of the other load does not matter in
14234       // this case). If found, then do not use the offset reduction trick, as
14235       // that will prevent the loads from being later combined (as they would
14236       // otherwise be duplicates).
14237       if (!findConsecutiveLoad(LD, DAG))
14238         --IncValue;
14239 
14240       SDValue Increment =
14241           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14242       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14243 
14244       MachineMemOperand *ExtraMMO =
14245         MF.getMachineMemOperand(LD->getMemOperand(),
14246                                 1, 2*MemVT.getStoreSize()-1);
14247       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14248       SDValue ExtraLoad =
14249         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14250                                 DAG.getVTList(PermTy, MVT::Other),
14251                                 ExtraLoadOps, LDTy, ExtraMMO);
14252 
14253       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14254         BaseLoad.getValue(1), ExtraLoad.getValue(1));
14255 
14256       // Because vperm has a big-endian bias, we must reverse the order
14257       // of the input vectors and complement the permute control vector
14258       // when generating little endian code.  We have already handled the
14259       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14260       // and ExtraLoad here.
14261       SDValue Perm;
14262       if (isLittleEndian)
14263         Perm = BuildIntrinsicOp(IntrPerm,
14264                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14265       else
14266         Perm = BuildIntrinsicOp(IntrPerm,
14267                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14268 
14269       if (VT != PermTy)
14270         Perm = Subtarget.hasAltivec() ?
14271                  DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
14272                  DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
14273                                DAG.getTargetConstant(1, dl, MVT::i64));
14274                                // second argument is 1 because this rounding
14275                                // is always exact.
14276 
14277       // The output of the permutation is our loaded result, the TokenFactor is
14278       // our new chain.
14279       DCI.CombineTo(N, Perm, TF);
14280       return SDValue(N, 0);
14281     }
14282     }
14283     break;
14284     case ISD::INTRINSIC_WO_CHAIN: {
14285       bool isLittleEndian = Subtarget.isLittleEndian();
14286       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14287       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14288                                            : Intrinsic::ppc_altivec_lvsl);
14289       if ((IID == Intr ||
14290            IID == Intrinsic::ppc_qpx_qvlpcld  ||
14291            IID == Intrinsic::ppc_qpx_qvlpcls) &&
14292         N->getOperand(1)->getOpcode() == ISD::ADD) {
14293         SDValue Add = N->getOperand(1);
14294 
14295         int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
14296                    5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
14297 
14298         if (DAG.MaskedValueIsZero(Add->getOperand(1),
14299                                   APInt::getAllOnesValue(Bits /* alignment */)
14300                                       .zext(Add.getScalarValueSizeInBits()))) {
14301           SDNode *BasePtr = Add->getOperand(0).getNode();
14302           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14303                                     UE = BasePtr->use_end();
14304                UI != UE; ++UI) {
14305             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14306                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
14307               // We've found another LVSL/LVSR, and this address is an aligned
14308               // multiple of that one. The results will be the same, so use the
14309               // one we've just found instead.
14310 
14311               return SDValue(*UI, 0);
14312             }
14313           }
14314         }
14315 
14316         if (isa<ConstantSDNode>(Add->getOperand(1))) {
14317           SDNode *BasePtr = Add->getOperand(0).getNode();
14318           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14319                UE = BasePtr->use_end(); UI != UE; ++UI) {
14320             if (UI->getOpcode() == ISD::ADD &&
14321                 isa<ConstantSDNode>(UI->getOperand(1)) &&
14322                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
14323                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
14324                 (1ULL << Bits) == 0) {
14325               SDNode *OtherAdd = *UI;
14326               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
14327                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
14328                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14329                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
14330                   return SDValue(*VI, 0);
14331                 }
14332               }
14333             }
14334           }
14335         }
14336       }
14337 
14338       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
14339       // Expose the vabsduw/h/b opportunity for down stream
14340       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
14341           (IID == Intrinsic::ppc_altivec_vmaxsw ||
14342            IID == Intrinsic::ppc_altivec_vmaxsh ||
14343            IID == Intrinsic::ppc_altivec_vmaxsb)) {
14344         SDValue V1 = N->getOperand(1);
14345         SDValue V2 = N->getOperand(2);
14346         if ((V1.getSimpleValueType() == MVT::v4i32 ||
14347              V1.getSimpleValueType() == MVT::v8i16 ||
14348              V1.getSimpleValueType() == MVT::v16i8) &&
14349             V1.getSimpleValueType() == V2.getSimpleValueType()) {
14350           // (0-a, a)
14351           if (V1.getOpcode() == ISD::SUB &&
14352               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
14353               V1.getOperand(1) == V2) {
14354             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
14355           }
14356           // (a, 0-a)
14357           if (V2.getOpcode() == ISD::SUB &&
14358               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
14359               V2.getOperand(1) == V1) {
14360             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14361           }
14362           // (x-y, y-x)
14363           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
14364               V1.getOperand(0) == V2.getOperand(1) &&
14365               V1.getOperand(1) == V2.getOperand(0)) {
14366             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14367           }
14368         }
14369       }
14370     }
14371 
14372     break;
14373   case ISD::INTRINSIC_W_CHAIN:
14374     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14375     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14376     if (Subtarget.needsSwapsForVSXMemOps()) {
14377       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14378       default:
14379         break;
14380       case Intrinsic::ppc_vsx_lxvw4x:
14381       case Intrinsic::ppc_vsx_lxvd2x:
14382         return expandVSXLoadForLE(N, DCI);
14383       }
14384     }
14385     break;
14386   case ISD::INTRINSIC_VOID:
14387     // For little endian, VSX stores require generating xxswapd/stxvd2x.
14388     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14389     if (Subtarget.needsSwapsForVSXMemOps()) {
14390       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14391       default:
14392         break;
14393       case Intrinsic::ppc_vsx_stxvw4x:
14394       case Intrinsic::ppc_vsx_stxvd2x:
14395         return expandVSXStoreForLE(N, DCI);
14396       }
14397     }
14398     break;
14399   case ISD::BSWAP:
14400     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
14401     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
14402         N->getOperand(0).hasOneUse() &&
14403         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
14404          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
14405           N->getValueType(0) == MVT::i64))) {
14406       SDValue Load = N->getOperand(0);
14407       LoadSDNode *LD = cast<LoadSDNode>(Load);
14408       // Create the byte-swapping load.
14409       SDValue Ops[] = {
14410         LD->getChain(),    // Chain
14411         LD->getBasePtr(),  // Ptr
14412         DAG.getValueType(N->getValueType(0)) // VT
14413       };
14414       SDValue BSLoad =
14415         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
14416                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
14417                                               MVT::i64 : MVT::i32, MVT::Other),
14418                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
14419 
14420       // If this is an i16 load, insert the truncate.
14421       SDValue ResVal = BSLoad;
14422       if (N->getValueType(0) == MVT::i16)
14423         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
14424 
14425       // First, combine the bswap away.  This makes the value produced by the
14426       // load dead.
14427       DCI.CombineTo(N, ResVal);
14428 
14429       // Next, combine the load away, we give it a bogus result value but a real
14430       // chain result.  The result value is dead because the bswap is dead.
14431       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
14432 
14433       // Return N so it doesn't get rechecked!
14434       return SDValue(N, 0);
14435     }
14436     break;
14437   case PPCISD::VCMP:
14438     // If a VCMPo node already exists with exactly the same operands as this
14439     // node, use its result instead of this node (VCMPo computes both a CR6 and
14440     // a normal output).
14441     //
14442     if (!N->getOperand(0).hasOneUse() &&
14443         !N->getOperand(1).hasOneUse() &&
14444         !N->getOperand(2).hasOneUse()) {
14445 
14446       // Scan all of the users of the LHS, looking for VCMPo's that match.
14447       SDNode *VCMPoNode = nullptr;
14448 
14449       SDNode *LHSN = N->getOperand(0).getNode();
14450       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
14451            UI != E; ++UI)
14452         if (UI->getOpcode() == PPCISD::VCMPo &&
14453             UI->getOperand(1) == N->getOperand(1) &&
14454             UI->getOperand(2) == N->getOperand(2) &&
14455             UI->getOperand(0) == N->getOperand(0)) {
14456           VCMPoNode = *UI;
14457           break;
14458         }
14459 
14460       // If there is no VCMPo node, or if the flag value has a single use, don't
14461       // transform this.
14462       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
14463         break;
14464 
14465       // Look at the (necessarily single) use of the flag value.  If it has a
14466       // chain, this transformation is more complex.  Note that multiple things
14467       // could use the value result, which we should ignore.
14468       SDNode *FlagUser = nullptr;
14469       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
14470            FlagUser == nullptr; ++UI) {
14471         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
14472         SDNode *User = *UI;
14473         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
14474           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
14475             FlagUser = User;
14476             break;
14477           }
14478         }
14479       }
14480 
14481       // If the user is a MFOCRF instruction, we know this is safe.
14482       // Otherwise we give up for right now.
14483       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
14484         return SDValue(VCMPoNode, 0);
14485     }
14486     break;
14487   case ISD::BRCOND: {
14488     SDValue Cond = N->getOperand(1);
14489     SDValue Target = N->getOperand(2);
14490 
14491     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14492         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
14493           Intrinsic::loop_decrement) {
14494 
14495       // We now need to make the intrinsic dead (it cannot be instruction
14496       // selected).
14497       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
14498       assert(Cond.getNode()->hasOneUse() &&
14499              "Counter decrement has more than one use");
14500 
14501       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
14502                          N->getOperand(0), Target);
14503     }
14504   }
14505   break;
14506   case ISD::BR_CC: {
14507     // If this is a branch on an altivec predicate comparison, lower this so
14508     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
14509     // lowering is done pre-legalize, because the legalizer lowers the predicate
14510     // compare down to code that is difficult to reassemble.
14511     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
14512     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
14513 
14514     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
14515     // value. If so, pass-through the AND to get to the intrinsic.
14516     if (LHS.getOpcode() == ISD::AND &&
14517         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14518         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
14519           Intrinsic::loop_decrement &&
14520         isa<ConstantSDNode>(LHS.getOperand(1)) &&
14521         !isNullConstant(LHS.getOperand(1)))
14522       LHS = LHS.getOperand(0);
14523 
14524     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14525         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
14526           Intrinsic::loop_decrement &&
14527         isa<ConstantSDNode>(RHS)) {
14528       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
14529              "Counter decrement comparison is not EQ or NE");
14530 
14531       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14532       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
14533                     (CC == ISD::SETNE && !Val);
14534 
14535       // We now need to make the intrinsic dead (it cannot be instruction
14536       // selected).
14537       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
14538       assert(LHS.getNode()->hasOneUse() &&
14539              "Counter decrement has more than one use");
14540 
14541       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
14542                          N->getOperand(0), N->getOperand(4));
14543     }
14544 
14545     int CompareOpc;
14546     bool isDot;
14547 
14548     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14549         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
14550         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
14551       assert(isDot && "Can't compare against a vector result!");
14552 
14553       // If this is a comparison against something other than 0/1, then we know
14554       // that the condition is never/always true.
14555       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14556       if (Val != 0 && Val != 1) {
14557         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
14558           return N->getOperand(0);
14559         // Always !=, turn it into an unconditional branch.
14560         return DAG.getNode(ISD::BR, dl, MVT::Other,
14561                            N->getOperand(0), N->getOperand(4));
14562       }
14563 
14564       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
14565 
14566       // Create the PPCISD altivec 'dot' comparison node.
14567       SDValue Ops[] = {
14568         LHS.getOperand(2),  // LHS of compare
14569         LHS.getOperand(3),  // RHS of compare
14570         DAG.getConstant(CompareOpc, dl, MVT::i32)
14571       };
14572       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
14573       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
14574 
14575       // Unpack the result based on how the target uses it.
14576       PPC::Predicate CompOpc;
14577       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
14578       default:  // Can't happen, don't crash on invalid number though.
14579       case 0:   // Branch on the value of the EQ bit of CR6.
14580         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
14581         break;
14582       case 1:   // Branch on the inverted value of the EQ bit of CR6.
14583         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
14584         break;
14585       case 2:   // Branch on the value of the LT bit of CR6.
14586         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
14587         break;
14588       case 3:   // Branch on the inverted value of the LT bit of CR6.
14589         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
14590         break;
14591       }
14592 
14593       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
14594                          DAG.getConstant(CompOpc, dl, MVT::i32),
14595                          DAG.getRegister(PPC::CR6, MVT::i32),
14596                          N->getOperand(4), CompNode.getValue(1));
14597     }
14598     break;
14599   }
14600   case ISD::BUILD_VECTOR:
14601     return DAGCombineBuildVector(N, DCI);
14602   case ISD::ABS:
14603     return combineABS(N, DCI);
14604   case ISD::VSELECT:
14605     return combineVSelect(N, DCI);
14606   }
14607 
14608   return SDValue();
14609 }
14610 
14611 SDValue
14612 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
14613                                  SelectionDAG &DAG,
14614                                  SmallVectorImpl<SDNode *> &Created) const {
14615   // fold (sdiv X, pow2)
14616   EVT VT = N->getValueType(0);
14617   if (VT == MVT::i64 && !Subtarget.isPPC64())
14618     return SDValue();
14619   if ((VT != MVT::i32 && VT != MVT::i64) ||
14620       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
14621     return SDValue();
14622 
14623   SDLoc DL(N);
14624   SDValue N0 = N->getOperand(0);
14625 
14626   bool IsNegPow2 = (-Divisor).isPowerOf2();
14627   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
14628   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
14629 
14630   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
14631   Created.push_back(Op.getNode());
14632 
14633   if (IsNegPow2) {
14634     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
14635     Created.push_back(Op.getNode());
14636   }
14637 
14638   return Op;
14639 }
14640 
14641 //===----------------------------------------------------------------------===//
14642 // Inline Assembly Support
14643 //===----------------------------------------------------------------------===//
14644 
14645 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
14646                                                       KnownBits &Known,
14647                                                       const APInt &DemandedElts,
14648                                                       const SelectionDAG &DAG,
14649                                                       unsigned Depth) const {
14650   Known.resetAll();
14651   switch (Op.getOpcode()) {
14652   default: break;
14653   case PPCISD::LBRX: {
14654     // lhbrx is known to have the top bits cleared out.
14655     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
14656       Known.Zero = 0xFFFF0000;
14657     break;
14658   }
14659   case ISD::INTRINSIC_WO_CHAIN: {
14660     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
14661     default: break;
14662     case Intrinsic::ppc_altivec_vcmpbfp_p:
14663     case Intrinsic::ppc_altivec_vcmpeqfp_p:
14664     case Intrinsic::ppc_altivec_vcmpequb_p:
14665     case Intrinsic::ppc_altivec_vcmpequh_p:
14666     case Intrinsic::ppc_altivec_vcmpequw_p:
14667     case Intrinsic::ppc_altivec_vcmpequd_p:
14668     case Intrinsic::ppc_altivec_vcmpgefp_p:
14669     case Intrinsic::ppc_altivec_vcmpgtfp_p:
14670     case Intrinsic::ppc_altivec_vcmpgtsb_p:
14671     case Intrinsic::ppc_altivec_vcmpgtsh_p:
14672     case Intrinsic::ppc_altivec_vcmpgtsw_p:
14673     case Intrinsic::ppc_altivec_vcmpgtsd_p:
14674     case Intrinsic::ppc_altivec_vcmpgtub_p:
14675     case Intrinsic::ppc_altivec_vcmpgtuh_p:
14676     case Intrinsic::ppc_altivec_vcmpgtuw_p:
14677     case Intrinsic::ppc_altivec_vcmpgtud_p:
14678       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
14679       break;
14680     }
14681   }
14682   }
14683 }
14684 
14685 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
14686   switch (Subtarget.getCPUDirective()) {
14687   default: break;
14688   case PPC::DIR_970:
14689   case PPC::DIR_PWR4:
14690   case PPC::DIR_PWR5:
14691   case PPC::DIR_PWR5X:
14692   case PPC::DIR_PWR6:
14693   case PPC::DIR_PWR6X:
14694   case PPC::DIR_PWR7:
14695   case PPC::DIR_PWR8:
14696   case PPC::DIR_PWR9:
14697   case PPC::DIR_PWR_FUTURE: {
14698     if (!ML)
14699       break;
14700 
14701     if (!DisableInnermostLoopAlign32) {
14702       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
14703       // so that we can decrease cache misses and branch-prediction misses.
14704       // Actual alignment of the loop will depend on the hotness check and other
14705       // logic in alignBlocks.
14706       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
14707         return Align(32);
14708     }
14709 
14710     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
14711 
14712     // For small loops (between 5 and 8 instructions), align to a 32-byte
14713     // boundary so that the entire loop fits in one instruction-cache line.
14714     uint64_t LoopSize = 0;
14715     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
14716       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
14717         LoopSize += TII->getInstSizeInBytes(*J);
14718         if (LoopSize > 32)
14719           break;
14720       }
14721 
14722     if (LoopSize > 16 && LoopSize <= 32)
14723       return Align(32);
14724 
14725     break;
14726   }
14727   }
14728 
14729   return TargetLowering::getPrefLoopAlignment(ML);
14730 }
14731 
14732 /// getConstraintType - Given a constraint, return the type of
14733 /// constraint it is for this target.
14734 PPCTargetLowering::ConstraintType
14735 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
14736   if (Constraint.size() == 1) {
14737     switch (Constraint[0]) {
14738     default: break;
14739     case 'b':
14740     case 'r':
14741     case 'f':
14742     case 'd':
14743     case 'v':
14744     case 'y':
14745       return C_RegisterClass;
14746     case 'Z':
14747       // FIXME: While Z does indicate a memory constraint, it specifically
14748       // indicates an r+r address (used in conjunction with the 'y' modifier
14749       // in the replacement string). Currently, we're forcing the base
14750       // register to be r0 in the asm printer (which is interpreted as zero)
14751       // and forming the complete address in the second register. This is
14752       // suboptimal.
14753       return C_Memory;
14754     }
14755   } else if (Constraint == "wc") { // individual CR bits.
14756     return C_RegisterClass;
14757   } else if (Constraint == "wa" || Constraint == "wd" ||
14758              Constraint == "wf" || Constraint == "ws" ||
14759              Constraint == "wi" || Constraint == "ww") {
14760     return C_RegisterClass; // VSX registers.
14761   }
14762   return TargetLowering::getConstraintType(Constraint);
14763 }
14764 
14765 /// Examine constraint type and operand type and determine a weight value.
14766 /// This object must already have been set up with the operand type
14767 /// and the current alternative constraint selected.
14768 TargetLowering::ConstraintWeight
14769 PPCTargetLowering::getSingleConstraintMatchWeight(
14770     AsmOperandInfo &info, const char *constraint) const {
14771   ConstraintWeight weight = CW_Invalid;
14772   Value *CallOperandVal = info.CallOperandVal;
14773     // If we don't have a value, we can't do a match,
14774     // but allow it at the lowest weight.
14775   if (!CallOperandVal)
14776     return CW_Default;
14777   Type *type = CallOperandVal->getType();
14778 
14779   // Look at the constraint type.
14780   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
14781     return CW_Register; // an individual CR bit.
14782   else if ((StringRef(constraint) == "wa" ||
14783             StringRef(constraint) == "wd" ||
14784             StringRef(constraint) == "wf") &&
14785            type->isVectorTy())
14786     return CW_Register;
14787   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
14788     return CW_Register; // just hold 64-bit integers data.
14789   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
14790     return CW_Register;
14791   else if (StringRef(constraint) == "ww" && type->isFloatTy())
14792     return CW_Register;
14793 
14794   switch (*constraint) {
14795   default:
14796     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
14797     break;
14798   case 'b':
14799     if (type->isIntegerTy())
14800       weight = CW_Register;
14801     break;
14802   case 'f':
14803     if (type->isFloatTy())
14804       weight = CW_Register;
14805     break;
14806   case 'd':
14807     if (type->isDoubleTy())
14808       weight = CW_Register;
14809     break;
14810   case 'v':
14811     if (type->isVectorTy())
14812       weight = CW_Register;
14813     break;
14814   case 'y':
14815     weight = CW_Register;
14816     break;
14817   case 'Z':
14818     weight = CW_Memory;
14819     break;
14820   }
14821   return weight;
14822 }
14823 
14824 std::pair<unsigned, const TargetRegisterClass *>
14825 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
14826                                                 StringRef Constraint,
14827                                                 MVT VT) const {
14828   if (Constraint.size() == 1) {
14829     // GCC RS6000 Constraint Letters
14830     switch (Constraint[0]) {
14831     case 'b':   // R1-R31
14832       if (VT == MVT::i64 && Subtarget.isPPC64())
14833         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
14834       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
14835     case 'r':   // R0-R31
14836       if (VT == MVT::i64 && Subtarget.isPPC64())
14837         return std::make_pair(0U, &PPC::G8RCRegClass);
14838       return std::make_pair(0U, &PPC::GPRCRegClass);
14839     // 'd' and 'f' constraints are both defined to be "the floating point
14840     // registers", where one is for 32-bit and the other for 64-bit. We don't
14841     // really care overly much here so just give them all the same reg classes.
14842     case 'd':
14843     case 'f':
14844       if (Subtarget.hasSPE()) {
14845         if (VT == MVT::f32 || VT == MVT::i32)
14846           return std::make_pair(0U, &PPC::GPRCRegClass);
14847         if (VT == MVT::f64 || VT == MVT::i64)
14848           return std::make_pair(0U, &PPC::SPERCRegClass);
14849       } else {
14850         if (VT == MVT::f32 || VT == MVT::i32)
14851           return std::make_pair(0U, &PPC::F4RCRegClass);
14852         if (VT == MVT::f64 || VT == MVT::i64)
14853           return std::make_pair(0U, &PPC::F8RCRegClass);
14854         if (VT == MVT::v4f64 && Subtarget.hasQPX())
14855           return std::make_pair(0U, &PPC::QFRCRegClass);
14856         if (VT == MVT::v4f32 && Subtarget.hasQPX())
14857           return std::make_pair(0U, &PPC::QSRCRegClass);
14858       }
14859       break;
14860     case 'v':
14861       if (VT == MVT::v4f64 && Subtarget.hasQPX())
14862         return std::make_pair(0U, &PPC::QFRCRegClass);
14863       if (VT == MVT::v4f32 && Subtarget.hasQPX())
14864         return std::make_pair(0U, &PPC::QSRCRegClass);
14865       if (Subtarget.hasAltivec())
14866         return std::make_pair(0U, &PPC::VRRCRegClass);
14867       break;
14868     case 'y':   // crrc
14869       return std::make_pair(0U, &PPC::CRRCRegClass);
14870     }
14871   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
14872     // An individual CR bit.
14873     return std::make_pair(0U, &PPC::CRBITRCRegClass);
14874   } else if ((Constraint == "wa" || Constraint == "wd" ||
14875              Constraint == "wf" || Constraint == "wi") &&
14876              Subtarget.hasVSX()) {
14877     return std::make_pair(0U, &PPC::VSRCRegClass);
14878   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
14879     if (VT == MVT::f32 && Subtarget.hasP8Vector())
14880       return std::make_pair(0U, &PPC::VSSRCRegClass);
14881     else
14882       return std::make_pair(0U, &PPC::VSFRCRegClass);
14883   }
14884 
14885   // If we name a VSX register, we can't defer to the base class because it
14886   // will not recognize the correct register (their names will be VSL{0-31}
14887   // and V{0-31} so they won't match). So we match them here.
14888   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
14889     int VSNum = atoi(Constraint.data() + 3);
14890     assert(VSNum >= 0 && VSNum <= 63 &&
14891            "Attempted to access a vsr out of range");
14892     if (VSNum < 32)
14893       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
14894     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
14895   }
14896   std::pair<unsigned, const TargetRegisterClass *> R =
14897       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
14898 
14899   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
14900   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
14901   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
14902   // register.
14903   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
14904   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
14905   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
14906       PPC::GPRCRegClass.contains(R.first))
14907     return std::make_pair(TRI->getMatchingSuperReg(R.first,
14908                             PPC::sub_32, &PPC::G8RCRegClass),
14909                           &PPC::G8RCRegClass);
14910 
14911   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
14912   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
14913     R.first = PPC::CR0;
14914     R.second = &PPC::CRRCRegClass;
14915   }
14916 
14917   return R;
14918 }
14919 
14920 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
14921 /// vector.  If it is invalid, don't add anything to Ops.
14922 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
14923                                                      std::string &Constraint,
14924                                                      std::vector<SDValue>&Ops,
14925                                                      SelectionDAG &DAG) const {
14926   SDValue Result;
14927 
14928   // Only support length 1 constraints.
14929   if (Constraint.length() > 1) return;
14930 
14931   char Letter = Constraint[0];
14932   switch (Letter) {
14933   default: break;
14934   case 'I':
14935   case 'J':
14936   case 'K':
14937   case 'L':
14938   case 'M':
14939   case 'N':
14940   case 'O':
14941   case 'P': {
14942     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
14943     if (!CST) return; // Must be an immediate to match.
14944     SDLoc dl(Op);
14945     int64_t Value = CST->getSExtValue();
14946     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
14947                          // numbers are printed as such.
14948     switch (Letter) {
14949     default: llvm_unreachable("Unknown constraint letter!");
14950     case 'I':  // "I" is a signed 16-bit constant.
14951       if (isInt<16>(Value))
14952         Result = DAG.getTargetConstant(Value, dl, TCVT);
14953       break;
14954     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
14955       if (isShiftedUInt<16, 16>(Value))
14956         Result = DAG.getTargetConstant(Value, dl, TCVT);
14957       break;
14958     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
14959       if (isShiftedInt<16, 16>(Value))
14960         Result = DAG.getTargetConstant(Value, dl, TCVT);
14961       break;
14962     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
14963       if (isUInt<16>(Value))
14964         Result = DAG.getTargetConstant(Value, dl, TCVT);
14965       break;
14966     case 'M':  // "M" is a constant that is greater than 31.
14967       if (Value > 31)
14968         Result = DAG.getTargetConstant(Value, dl, TCVT);
14969       break;
14970     case 'N':  // "N" is a positive constant that is an exact power of two.
14971       if (Value > 0 && isPowerOf2_64(Value))
14972         Result = DAG.getTargetConstant(Value, dl, TCVT);
14973       break;
14974     case 'O':  // "O" is the constant zero.
14975       if (Value == 0)
14976         Result = DAG.getTargetConstant(Value, dl, TCVT);
14977       break;
14978     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
14979       if (isInt<16>(-Value))
14980         Result = DAG.getTargetConstant(Value, dl, TCVT);
14981       break;
14982     }
14983     break;
14984   }
14985   }
14986 
14987   if (Result.getNode()) {
14988     Ops.push_back(Result);
14989     return;
14990   }
14991 
14992   // Handle standard constraint letters.
14993   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
14994 }
14995 
14996 // isLegalAddressingMode - Return true if the addressing mode represented
14997 // by AM is legal for this target, for a load/store of the specified type.
14998 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
14999                                               const AddrMode &AM, Type *Ty,
15000                                               unsigned AS, Instruction *I) const {
15001   // PPC does not allow r+i addressing modes for vectors!
15002   if (Ty->isVectorTy() && AM.BaseOffs != 0)
15003     return false;
15004 
15005   // PPC allows a sign-extended 16-bit immediate field.
15006   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15007     return false;
15008 
15009   // No global is ever allowed as a base.
15010   if (AM.BaseGV)
15011     return false;
15012 
15013   // PPC only support r+r,
15014   switch (AM.Scale) {
15015   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15016     break;
15017   case 1:
15018     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15019       return false;
15020     // Otherwise we have r+r or r+i.
15021     break;
15022   case 2:
15023     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15024       return false;
15025     // Allow 2*r as r+r.
15026     break;
15027   default:
15028     // No other scales are supported.
15029     return false;
15030   }
15031 
15032   return true;
15033 }
15034 
15035 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15036                                            SelectionDAG &DAG) const {
15037   MachineFunction &MF = DAG.getMachineFunction();
15038   MachineFrameInfo &MFI = MF.getFrameInfo();
15039   MFI.setReturnAddressIsTaken(true);
15040 
15041   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15042     return SDValue();
15043 
15044   SDLoc dl(Op);
15045   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15046 
15047   // Make sure the function does not optimize away the store of the RA to
15048   // the stack.
15049   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15050   FuncInfo->setLRStoreRequired();
15051   bool isPPC64 = Subtarget.isPPC64();
15052   auto PtrVT = getPointerTy(MF.getDataLayout());
15053 
15054   if (Depth > 0) {
15055     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15056     SDValue Offset =
15057         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15058                         isPPC64 ? MVT::i64 : MVT::i32);
15059     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15060                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15061                        MachinePointerInfo());
15062   }
15063 
15064   // Just load the return address off the stack.
15065   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15066   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15067                      MachinePointerInfo());
15068 }
15069 
15070 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15071                                           SelectionDAG &DAG) const {
15072   SDLoc dl(Op);
15073   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15074 
15075   MachineFunction &MF = DAG.getMachineFunction();
15076   MachineFrameInfo &MFI = MF.getFrameInfo();
15077   MFI.setFrameAddressIsTaken(true);
15078 
15079   EVT PtrVT = getPointerTy(MF.getDataLayout());
15080   bool isPPC64 = PtrVT == MVT::i64;
15081 
15082   // Naked functions never have a frame pointer, and so we use r1. For all
15083   // other functions, this decision must be delayed until during PEI.
15084   unsigned FrameReg;
15085   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15086     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15087   else
15088     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15089 
15090   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15091                                          PtrVT);
15092   while (Depth--)
15093     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15094                             FrameAddr, MachinePointerInfo());
15095   return FrameAddr;
15096 }
15097 
15098 // FIXME? Maybe this could be a TableGen attribute on some registers and
15099 // this table could be generated automatically from RegInfo.
15100 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15101                                               const MachineFunction &MF) const {
15102   bool isPPC64 = Subtarget.isPPC64();
15103 
15104   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15105   if (!is64Bit && VT != LLT::scalar(32))
15106     report_fatal_error("Invalid register global variable type");
15107 
15108   Register Reg = StringSwitch<Register>(RegName)
15109                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15110                      .Case("r2", isPPC64 ? Register() : PPC::R2)
15111                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15112                      .Default(Register());
15113 
15114   if (Reg)
15115     return Reg;
15116   report_fatal_error("Invalid register name global variable");
15117 }
15118 
15119 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15120   // 32-bit SVR4 ABI access everything as got-indirect.
15121   if (Subtarget.is32BitELFABI())
15122     return true;
15123 
15124   // AIX accesses everything indirectly through the TOC, which is similar to
15125   // the GOT.
15126   if (Subtarget.isAIXABI())
15127     return true;
15128 
15129   CodeModel::Model CModel = getTargetMachine().getCodeModel();
15130   // If it is small or large code model, module locals are accessed
15131   // indirectly by loading their address from .toc/.got.
15132   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15133     return true;
15134 
15135   // JumpTable and BlockAddress are accessed as got-indirect.
15136   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15137     return true;
15138 
15139   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15140     return Subtarget.isGVIndirectSymbol(G->getGlobal());
15141 
15142   return false;
15143 }
15144 
15145 bool
15146 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15147   // The PowerPC target isn't yet aware of offsets.
15148   return false;
15149 }
15150 
15151 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15152                                            const CallInst &I,
15153                                            MachineFunction &MF,
15154                                            unsigned Intrinsic) const {
15155   switch (Intrinsic) {
15156   case Intrinsic::ppc_qpx_qvlfd:
15157   case Intrinsic::ppc_qpx_qvlfs:
15158   case Intrinsic::ppc_qpx_qvlfcd:
15159   case Intrinsic::ppc_qpx_qvlfcs:
15160   case Intrinsic::ppc_qpx_qvlfiwa:
15161   case Intrinsic::ppc_qpx_qvlfiwz:
15162   case Intrinsic::ppc_altivec_lvx:
15163   case Intrinsic::ppc_altivec_lvxl:
15164   case Intrinsic::ppc_altivec_lvebx:
15165   case Intrinsic::ppc_altivec_lvehx:
15166   case Intrinsic::ppc_altivec_lvewx:
15167   case Intrinsic::ppc_vsx_lxvd2x:
15168   case Intrinsic::ppc_vsx_lxvw4x: {
15169     EVT VT;
15170     switch (Intrinsic) {
15171     case Intrinsic::ppc_altivec_lvebx:
15172       VT = MVT::i8;
15173       break;
15174     case Intrinsic::ppc_altivec_lvehx:
15175       VT = MVT::i16;
15176       break;
15177     case Intrinsic::ppc_altivec_lvewx:
15178       VT = MVT::i32;
15179       break;
15180     case Intrinsic::ppc_vsx_lxvd2x:
15181       VT = MVT::v2f64;
15182       break;
15183     case Intrinsic::ppc_qpx_qvlfd:
15184       VT = MVT::v4f64;
15185       break;
15186     case Intrinsic::ppc_qpx_qvlfs:
15187       VT = MVT::v4f32;
15188       break;
15189     case Intrinsic::ppc_qpx_qvlfcd:
15190       VT = MVT::v2f64;
15191       break;
15192     case Intrinsic::ppc_qpx_qvlfcs:
15193       VT = MVT::v2f32;
15194       break;
15195     default:
15196       VT = MVT::v4i32;
15197       break;
15198     }
15199 
15200     Info.opc = ISD::INTRINSIC_W_CHAIN;
15201     Info.memVT = VT;
15202     Info.ptrVal = I.getArgOperand(0);
15203     Info.offset = -VT.getStoreSize()+1;
15204     Info.size = 2*VT.getStoreSize()-1;
15205     Info.align = Align(1);
15206     Info.flags = MachineMemOperand::MOLoad;
15207     return true;
15208   }
15209   case Intrinsic::ppc_qpx_qvlfda:
15210   case Intrinsic::ppc_qpx_qvlfsa:
15211   case Intrinsic::ppc_qpx_qvlfcda:
15212   case Intrinsic::ppc_qpx_qvlfcsa:
15213   case Intrinsic::ppc_qpx_qvlfiwaa:
15214   case Intrinsic::ppc_qpx_qvlfiwza: {
15215     EVT VT;
15216     switch (Intrinsic) {
15217     case Intrinsic::ppc_qpx_qvlfda:
15218       VT = MVT::v4f64;
15219       break;
15220     case Intrinsic::ppc_qpx_qvlfsa:
15221       VT = MVT::v4f32;
15222       break;
15223     case Intrinsic::ppc_qpx_qvlfcda:
15224       VT = MVT::v2f64;
15225       break;
15226     case Intrinsic::ppc_qpx_qvlfcsa:
15227       VT = MVT::v2f32;
15228       break;
15229     default:
15230       VT = MVT::v4i32;
15231       break;
15232     }
15233 
15234     Info.opc = ISD::INTRINSIC_W_CHAIN;
15235     Info.memVT = VT;
15236     Info.ptrVal = I.getArgOperand(0);
15237     Info.offset = 0;
15238     Info.size = VT.getStoreSize();
15239     Info.align = Align(1);
15240     Info.flags = MachineMemOperand::MOLoad;
15241     return true;
15242   }
15243   case Intrinsic::ppc_qpx_qvstfd:
15244   case Intrinsic::ppc_qpx_qvstfs:
15245   case Intrinsic::ppc_qpx_qvstfcd:
15246   case Intrinsic::ppc_qpx_qvstfcs:
15247   case Intrinsic::ppc_qpx_qvstfiw:
15248   case Intrinsic::ppc_altivec_stvx:
15249   case Intrinsic::ppc_altivec_stvxl:
15250   case Intrinsic::ppc_altivec_stvebx:
15251   case Intrinsic::ppc_altivec_stvehx:
15252   case Intrinsic::ppc_altivec_stvewx:
15253   case Intrinsic::ppc_vsx_stxvd2x:
15254   case Intrinsic::ppc_vsx_stxvw4x: {
15255     EVT VT;
15256     switch (Intrinsic) {
15257     case Intrinsic::ppc_altivec_stvebx:
15258       VT = MVT::i8;
15259       break;
15260     case Intrinsic::ppc_altivec_stvehx:
15261       VT = MVT::i16;
15262       break;
15263     case Intrinsic::ppc_altivec_stvewx:
15264       VT = MVT::i32;
15265       break;
15266     case Intrinsic::ppc_vsx_stxvd2x:
15267       VT = MVT::v2f64;
15268       break;
15269     case Intrinsic::ppc_qpx_qvstfd:
15270       VT = MVT::v4f64;
15271       break;
15272     case Intrinsic::ppc_qpx_qvstfs:
15273       VT = MVT::v4f32;
15274       break;
15275     case Intrinsic::ppc_qpx_qvstfcd:
15276       VT = MVT::v2f64;
15277       break;
15278     case Intrinsic::ppc_qpx_qvstfcs:
15279       VT = MVT::v2f32;
15280       break;
15281     default:
15282       VT = MVT::v4i32;
15283       break;
15284     }
15285 
15286     Info.opc = ISD::INTRINSIC_VOID;
15287     Info.memVT = VT;
15288     Info.ptrVal = I.getArgOperand(1);
15289     Info.offset = -VT.getStoreSize()+1;
15290     Info.size = 2*VT.getStoreSize()-1;
15291     Info.align = Align(1);
15292     Info.flags = MachineMemOperand::MOStore;
15293     return true;
15294   }
15295   case Intrinsic::ppc_qpx_qvstfda:
15296   case Intrinsic::ppc_qpx_qvstfsa:
15297   case Intrinsic::ppc_qpx_qvstfcda:
15298   case Intrinsic::ppc_qpx_qvstfcsa:
15299   case Intrinsic::ppc_qpx_qvstfiwa: {
15300     EVT VT;
15301     switch (Intrinsic) {
15302     case Intrinsic::ppc_qpx_qvstfda:
15303       VT = MVT::v4f64;
15304       break;
15305     case Intrinsic::ppc_qpx_qvstfsa:
15306       VT = MVT::v4f32;
15307       break;
15308     case Intrinsic::ppc_qpx_qvstfcda:
15309       VT = MVT::v2f64;
15310       break;
15311     case Intrinsic::ppc_qpx_qvstfcsa:
15312       VT = MVT::v2f32;
15313       break;
15314     default:
15315       VT = MVT::v4i32;
15316       break;
15317     }
15318 
15319     Info.opc = ISD::INTRINSIC_VOID;
15320     Info.memVT = VT;
15321     Info.ptrVal = I.getArgOperand(1);
15322     Info.offset = 0;
15323     Info.size = VT.getStoreSize();
15324     Info.align = Align(1);
15325     Info.flags = MachineMemOperand::MOStore;
15326     return true;
15327   }
15328   default:
15329     break;
15330   }
15331 
15332   return false;
15333 }
15334 
15335 /// It returns EVT::Other if the type should be determined using generic
15336 /// target-independent logic.
15337 EVT PPCTargetLowering::getOptimalMemOpType(
15338     const MemOp &Op, const AttributeList &FuncAttributes) const {
15339   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15340     // When expanding a memset, require at least two QPX instructions to cover
15341     // the cost of loading the value to be stored from the constant pool.
15342     if (Subtarget.hasQPX() && Op.size() >= 32 &&
15343         (Op.isMemcpy() || Op.size() >= 64) && Op.isAligned(Align(32)) &&
15344         !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
15345       return MVT::v4f64;
15346     }
15347 
15348     // We should use Altivec/VSX loads and stores when available. For unaligned
15349     // addresses, unaligned VSX loads are only fast starting with the P8.
15350     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
15351         (Op.isAligned(Align(16)) ||
15352          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15353       return MVT::v4i32;
15354   }
15355 
15356   if (Subtarget.isPPC64()) {
15357     return MVT::i64;
15358   }
15359 
15360   return MVT::i32;
15361 }
15362 
15363 /// Returns true if it is beneficial to convert a load of a constant
15364 /// to just the constant itself.
15365 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15366                                                           Type *Ty) const {
15367   assert(Ty->isIntegerTy());
15368 
15369   unsigned BitSize = Ty->getPrimitiveSizeInBits();
15370   return !(BitSize == 0 || BitSize > 64);
15371 }
15372 
15373 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15374   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15375     return false;
15376   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
15377   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
15378   return NumBits1 == 64 && NumBits2 == 32;
15379 }
15380 
15381 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15382   if (!VT1.isInteger() || !VT2.isInteger())
15383     return false;
15384   unsigned NumBits1 = VT1.getSizeInBits();
15385   unsigned NumBits2 = VT2.getSizeInBits();
15386   return NumBits1 == 64 && NumBits2 == 32;
15387 }
15388 
15389 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15390   // Generally speaking, zexts are not free, but they are free when they can be
15391   // folded with other operations.
15392   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
15393     EVT MemVT = LD->getMemoryVT();
15394     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
15395          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
15396         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
15397          LD->getExtensionType() == ISD::ZEXTLOAD))
15398       return true;
15399   }
15400 
15401   // FIXME: Add other cases...
15402   //  - 32-bit shifts with a zext to i64
15403   //  - zext after ctlz, bswap, etc.
15404   //  - zext after and by a constant mask
15405 
15406   return TargetLowering::isZExtFree(Val, VT2);
15407 }
15408 
15409 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
15410   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
15411          "invalid fpext types");
15412   // Extending to float128 is not free.
15413   if (DestVT == MVT::f128)
15414     return false;
15415   return true;
15416 }
15417 
15418 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15419   return isInt<16>(Imm) || isUInt<16>(Imm);
15420 }
15421 
15422 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15423   return isInt<16>(Imm) || isUInt<16>(Imm);
15424 }
15425 
15426 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
15427                                                        unsigned,
15428                                                        unsigned,
15429                                                        MachineMemOperand::Flags,
15430                                                        bool *Fast) const {
15431   if (DisablePPCUnaligned)
15432     return false;
15433 
15434   // PowerPC supports unaligned memory access for simple non-vector types.
15435   // Although accessing unaligned addresses is not as efficient as accessing
15436   // aligned addresses, it is generally more efficient than manual expansion,
15437   // and generally only traps for software emulation when crossing page
15438   // boundaries.
15439 
15440   if (!VT.isSimple())
15441     return false;
15442 
15443   if (VT.isFloatingPoint() && !Subtarget.allowsUnalignedFPAccess())
15444     return false;
15445 
15446   if (VT.getSimpleVT().isVector()) {
15447     if (Subtarget.hasVSX()) {
15448       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
15449           VT != MVT::v4f32 && VT != MVT::v4i32)
15450         return false;
15451     } else {
15452       return false;
15453     }
15454   }
15455 
15456   if (VT == MVT::ppcf128)
15457     return false;
15458 
15459   if (Fast)
15460     *Fast = true;
15461 
15462   return true;
15463 }
15464 
15465 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15466                                                    EVT VT) const {
15467   return isFMAFasterThanFMulAndFAdd(
15468       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
15469 }
15470 
15471 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
15472                                                    Type *Ty) const {
15473   switch (Ty->getScalarType()->getTypeID()) {
15474   case Type::FloatTyID:
15475   case Type::DoubleTyID:
15476     return true;
15477   case Type::FP128TyID:
15478     return EnableQuadPrecision && Subtarget.hasP9Vector();
15479   default:
15480     return false;
15481   }
15482 }
15483 
15484 // Currently this is a copy from AArch64TargetLowering::isProfitableToHoist.
15485 // FIXME: add more patterns which are profitable to hoist.
15486 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
15487   if (I->getOpcode() != Instruction::FMul)
15488     return true;
15489 
15490   if (!I->hasOneUse())
15491     return true;
15492 
15493   Instruction *User = I->user_back();
15494   assert(User && "A single use instruction with no uses.");
15495 
15496   if (User->getOpcode() != Instruction::FSub &&
15497       User->getOpcode() != Instruction::FAdd)
15498     return true;
15499 
15500   const TargetOptions &Options = getTargetMachine().Options;
15501   const Function *F = I->getFunction();
15502   const DataLayout &DL = F->getParent()->getDataLayout();
15503   Type *Ty = User->getOperand(0)->getType();
15504 
15505   return !(
15506       isFMAFasterThanFMulAndFAdd(*F, Ty) &&
15507       isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
15508       (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
15509 }
15510 
15511 const MCPhysReg *
15512 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
15513   // LR is a callee-save register, but we must treat it as clobbered by any call
15514   // site. Hence we include LR in the scratch registers, which are in turn added
15515   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
15516   // to CTR, which is used by any indirect call.
15517   static const MCPhysReg ScratchRegs[] = {
15518     PPC::X12, PPC::LR8, PPC::CTR8, 0
15519   };
15520 
15521   return ScratchRegs;
15522 }
15523 
15524 unsigned PPCTargetLowering::getExceptionPointerRegister(
15525     const Constant *PersonalityFn) const {
15526   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
15527 }
15528 
15529 unsigned PPCTargetLowering::getExceptionSelectorRegister(
15530     const Constant *PersonalityFn) const {
15531   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
15532 }
15533 
15534 bool
15535 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
15536                      EVT VT , unsigned DefinedValues) const {
15537   if (VT == MVT::v2i64)
15538     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
15539 
15540   if (Subtarget.hasVSX() || Subtarget.hasQPX())
15541     return true;
15542 
15543   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
15544 }
15545 
15546 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
15547   if (DisableILPPref || Subtarget.enableMachineScheduler())
15548     return TargetLowering::getSchedulingPreference(N);
15549 
15550   return Sched::ILP;
15551 }
15552 
15553 // Create a fast isel object.
15554 FastISel *
15555 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
15556                                   const TargetLibraryInfo *LibInfo) const {
15557   return PPC::createFastISel(FuncInfo, LibInfo);
15558 }
15559 
15560 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
15561   if (!Subtarget.isPPC64()) return;
15562 
15563   // Update IsSplitCSR in PPCFunctionInfo
15564   PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>();
15565   PFI->setIsSplitCSR(true);
15566 }
15567 
15568 void PPCTargetLowering::insertCopiesSplitCSR(
15569   MachineBasicBlock *Entry,
15570   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
15571   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
15572   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
15573   if (!IStart)
15574     return;
15575 
15576   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
15577   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
15578   MachineBasicBlock::iterator MBBI = Entry->begin();
15579   for (const MCPhysReg *I = IStart; *I; ++I) {
15580     const TargetRegisterClass *RC = nullptr;
15581     if (PPC::G8RCRegClass.contains(*I))
15582       RC = &PPC::G8RCRegClass;
15583     else if (PPC::F8RCRegClass.contains(*I))
15584       RC = &PPC::F8RCRegClass;
15585     else if (PPC::CRRCRegClass.contains(*I))
15586       RC = &PPC::CRRCRegClass;
15587     else if (PPC::VRRCRegClass.contains(*I))
15588       RC = &PPC::VRRCRegClass;
15589     else
15590       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
15591 
15592     Register NewVR = MRI->createVirtualRegister(RC);
15593     // Create copy from CSR to a virtual register.
15594     // FIXME: this currently does not emit CFI pseudo-instructions, it works
15595     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
15596     // nounwind. If we want to generalize this later, we may need to emit
15597     // CFI pseudo-instructions.
15598     assert(Entry->getParent()->getFunction().hasFnAttribute(
15599              Attribute::NoUnwind) &&
15600            "Function should be nounwind in insertCopiesSplitCSR!");
15601     Entry->addLiveIn(*I);
15602     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
15603       .addReg(*I);
15604 
15605     // Insert the copy-back instructions right before the terminator.
15606     for (auto *Exit : Exits)
15607       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
15608               TII->get(TargetOpcode::COPY), *I)
15609         .addReg(NewVR);
15610   }
15611 }
15612 
15613 // Override to enable LOAD_STACK_GUARD lowering on Linux.
15614 bool PPCTargetLowering::useLoadStackGuardNode() const {
15615   if (!Subtarget.isTargetLinux())
15616     return TargetLowering::useLoadStackGuardNode();
15617   return true;
15618 }
15619 
15620 // Override to disable global variable loading on Linux.
15621 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
15622   if (!Subtarget.isTargetLinux())
15623     return TargetLowering::insertSSPDeclarations(M);
15624 }
15625 
15626 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
15627                                      bool ForCodeSize) const {
15628   if (!VT.isSimple() || !Subtarget.hasVSX())
15629     return false;
15630 
15631   switch(VT.getSimpleVT().SimpleTy) {
15632   default:
15633     // For FP types that are currently not supported by PPC backend, return
15634     // false. Examples: f16, f80.
15635     return false;
15636   case MVT::f32:
15637   case MVT::f64:
15638   case MVT::ppcf128:
15639     return Imm.isPosZero();
15640   }
15641 }
15642 
15643 // For vector shift operation op, fold
15644 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
15645 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
15646                                   SelectionDAG &DAG) {
15647   SDValue N0 = N->getOperand(0);
15648   SDValue N1 = N->getOperand(1);
15649   EVT VT = N0.getValueType();
15650   unsigned OpSizeInBits = VT.getScalarSizeInBits();
15651   unsigned Opcode = N->getOpcode();
15652   unsigned TargetOpcode;
15653 
15654   switch (Opcode) {
15655   default:
15656     llvm_unreachable("Unexpected shift operation");
15657   case ISD::SHL:
15658     TargetOpcode = PPCISD::SHL;
15659     break;
15660   case ISD::SRL:
15661     TargetOpcode = PPCISD::SRL;
15662     break;
15663   case ISD::SRA:
15664     TargetOpcode = PPCISD::SRA;
15665     break;
15666   }
15667 
15668   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
15669       N1->getOpcode() == ISD::AND)
15670     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
15671       if (Mask->getZExtValue() == OpSizeInBits - 1)
15672         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
15673 
15674   return SDValue();
15675 }
15676 
15677 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
15678   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15679     return Value;
15680 
15681   SDValue N0 = N->getOperand(0);
15682   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
15683   if (!Subtarget.isISA3_0() ||
15684       N0.getOpcode() != ISD::SIGN_EXTEND ||
15685       N0.getOperand(0).getValueType() != MVT::i32 ||
15686       CN1 == nullptr || N->getValueType(0) != MVT::i64)
15687     return SDValue();
15688 
15689   // We can't save an operation here if the value is already extended, and
15690   // the existing shift is easier to combine.
15691   SDValue ExtsSrc = N0.getOperand(0);
15692   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
15693       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
15694     return SDValue();
15695 
15696   SDLoc DL(N0);
15697   SDValue ShiftBy = SDValue(CN1, 0);
15698   // We want the shift amount to be i32 on the extswli, but the shift could
15699   // have an i64.
15700   if (ShiftBy.getValueType() == MVT::i64)
15701     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
15702 
15703   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
15704                          ShiftBy);
15705 }
15706 
15707 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
15708   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15709     return Value;
15710 
15711   return SDValue();
15712 }
15713 
15714 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
15715   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15716     return Value;
15717 
15718   return SDValue();
15719 }
15720 
15721 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
15722 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
15723 // When C is zero, the equation (addi Z, -C) can be simplified to Z
15724 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
15725 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
15726                                  const PPCSubtarget &Subtarget) {
15727   if (!Subtarget.isPPC64())
15728     return SDValue();
15729 
15730   SDValue LHS = N->getOperand(0);
15731   SDValue RHS = N->getOperand(1);
15732 
15733   auto isZextOfCompareWithConstant = [](SDValue Op) {
15734     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
15735         Op.getValueType() != MVT::i64)
15736       return false;
15737 
15738     SDValue Cmp = Op.getOperand(0);
15739     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
15740         Cmp.getOperand(0).getValueType() != MVT::i64)
15741       return false;
15742 
15743     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
15744       int64_t NegConstant = 0 - Constant->getSExtValue();
15745       // Due to the limitations of the addi instruction,
15746       // -C is required to be [-32768, 32767].
15747       return isInt<16>(NegConstant);
15748     }
15749 
15750     return false;
15751   };
15752 
15753   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
15754   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
15755 
15756   // If there is a pattern, canonicalize a zext operand to the RHS.
15757   if (LHSHasPattern && !RHSHasPattern)
15758     std::swap(LHS, RHS);
15759   else if (!LHSHasPattern && !RHSHasPattern)
15760     return SDValue();
15761 
15762   SDLoc DL(N);
15763   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
15764   SDValue Cmp = RHS.getOperand(0);
15765   SDValue Z = Cmp.getOperand(0);
15766   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
15767 
15768   assert(Constant && "Constant Should not be a null pointer.");
15769   int64_t NegConstant = 0 - Constant->getSExtValue();
15770 
15771   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
15772   default: break;
15773   case ISD::SETNE: {
15774     //                                 when C == 0
15775     //                             --> addze X, (addic Z, -1).carry
15776     //                            /
15777     // add X, (zext(setne Z, C))--
15778     //                            \    when -32768 <= -C <= 32767 && C != 0
15779     //                             --> addze X, (addic (addi Z, -C), -1).carry
15780     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15781                               DAG.getConstant(NegConstant, DL, MVT::i64));
15782     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15783     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15784                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
15785     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15786                        SDValue(Addc.getNode(), 1));
15787     }
15788   case ISD::SETEQ: {
15789     //                                 when C == 0
15790     //                             --> addze X, (subfic Z, 0).carry
15791     //                            /
15792     // add X, (zext(sete  Z, C))--
15793     //                            \    when -32768 <= -C <= 32767 && C != 0
15794     //                             --> addze X, (subfic (addi Z, -C), 0).carry
15795     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15796                               DAG.getConstant(NegConstant, DL, MVT::i64));
15797     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15798     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15799                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
15800     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15801                        SDValue(Subc.getNode(), 1));
15802     }
15803   }
15804 
15805   return SDValue();
15806 }
15807 
15808 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
15809   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
15810     return Value;
15811 
15812   return SDValue();
15813 }
15814 
15815 // Detect TRUNCATE operations on bitcasts of float128 values.
15816 // What we are looking for here is the situtation where we extract a subset
15817 // of bits from a 128 bit float.
15818 // This can be of two forms:
15819 // 1) BITCAST of f128 feeding TRUNCATE
15820 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
15821 // The reason this is required is because we do not have a legal i128 type
15822 // and so we want to prevent having to store the f128 and then reload part
15823 // of it.
15824 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
15825                                            DAGCombinerInfo &DCI) const {
15826   // If we are using CRBits then try that first.
15827   if (Subtarget.useCRBits()) {
15828     // Check if CRBits did anything and return that if it did.
15829     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
15830       return CRTruncValue;
15831   }
15832 
15833   SDLoc dl(N);
15834   SDValue Op0 = N->getOperand(0);
15835 
15836   // Looking for a truncate of i128 to i64.
15837   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
15838     return SDValue();
15839 
15840   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
15841 
15842   // SRL feeding TRUNCATE.
15843   if (Op0.getOpcode() == ISD::SRL) {
15844     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
15845     // The right shift has to be by 64 bits.
15846     if (!ConstNode || ConstNode->getZExtValue() != 64)
15847       return SDValue();
15848 
15849     // Switch the element number to extract.
15850     EltToExtract = EltToExtract ? 0 : 1;
15851     // Update Op0 past the SRL.
15852     Op0 = Op0.getOperand(0);
15853   }
15854 
15855   // BITCAST feeding a TRUNCATE possibly via SRL.
15856   if (Op0.getOpcode() == ISD::BITCAST &&
15857       Op0.getValueType() == MVT::i128 &&
15858       Op0.getOperand(0).getValueType() == MVT::f128) {
15859     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
15860     return DCI.DAG.getNode(
15861         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
15862         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
15863   }
15864   return SDValue();
15865 }
15866 
15867 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
15868   SelectionDAG &DAG = DCI.DAG;
15869 
15870   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
15871   if (!ConstOpOrElement)
15872     return SDValue();
15873 
15874   // An imul is usually smaller than the alternative sequence for legal type.
15875   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
15876       isOperationLegal(ISD::MUL, N->getValueType(0)))
15877     return SDValue();
15878 
15879   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
15880     switch (this->Subtarget.getCPUDirective()) {
15881     default:
15882       // TODO: enhance the condition for subtarget before pwr8
15883       return false;
15884     case PPC::DIR_PWR8:
15885       //  type        mul     add    shl
15886       // scalar        4       1      1
15887       // vector        7       2      2
15888       return true;
15889     case PPC::DIR_PWR9:
15890     case PPC::DIR_PWR_FUTURE:
15891       //  type        mul     add    shl
15892       // scalar        5       2      2
15893       // vector        7       2      2
15894 
15895       // The cycle RATIO of related operations are showed as a table above.
15896       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
15897       // scalar and vector type. For 2 instrs patterns, add/sub + shl
15898       // are 4, it is always profitable; but for 3 instrs patterns
15899       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
15900       // So we should only do it for vector type.
15901       return IsAddOne && IsNeg ? VT.isVector() : true;
15902     }
15903   };
15904 
15905   EVT VT = N->getValueType(0);
15906   SDLoc DL(N);
15907 
15908   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
15909   bool IsNeg = MulAmt.isNegative();
15910   APInt MulAmtAbs = MulAmt.abs();
15911 
15912   if ((MulAmtAbs - 1).isPowerOf2()) {
15913     // (mul x, 2^N + 1) => (add (shl x, N), x)
15914     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
15915 
15916     if (!IsProfitable(IsNeg, true, VT))
15917       return SDValue();
15918 
15919     SDValue Op0 = N->getOperand(0);
15920     SDValue Op1 =
15921         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15922                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
15923     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
15924 
15925     if (!IsNeg)
15926       return Res;
15927 
15928     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
15929   } else if ((MulAmtAbs + 1).isPowerOf2()) {
15930     // (mul x, 2^N - 1) => (sub (shl x, N), x)
15931     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
15932 
15933     if (!IsProfitable(IsNeg, false, VT))
15934       return SDValue();
15935 
15936     SDValue Op0 = N->getOperand(0);
15937     SDValue Op1 =
15938         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15939                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
15940 
15941     if (!IsNeg)
15942       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
15943     else
15944       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
15945 
15946   } else {
15947     return SDValue();
15948   }
15949 }
15950 
15951 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
15952   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
15953   if (!Subtarget.is64BitELFABI())
15954     return false;
15955 
15956   // If not a tail call then no need to proceed.
15957   if (!CI->isTailCall())
15958     return false;
15959 
15960   // If sibling calls have been disabled and tail-calls aren't guaranteed
15961   // there is no reason to duplicate.
15962   auto &TM = getTargetMachine();
15963   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
15964     return false;
15965 
15966   // Can't tail call a function called indirectly, or if it has variadic args.
15967   const Function *Callee = CI->getCalledFunction();
15968   if (!Callee || Callee->isVarArg())
15969     return false;
15970 
15971   // Make sure the callee and caller calling conventions are eligible for tco.
15972   const Function *Caller = CI->getParent()->getParent();
15973   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
15974                                            CI->getCallingConv()))
15975       return false;
15976 
15977   // If the function is local then we have a good chance at tail-calling it
15978   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
15979 }
15980 
15981 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
15982   if (!Subtarget.hasVSX())
15983     return false;
15984   if (Subtarget.hasP9Vector() && VT == MVT::f128)
15985     return true;
15986   return VT == MVT::f32 || VT == MVT::f64 ||
15987     VT == MVT::v4f32 || VT == MVT::v2f64;
15988 }
15989 
15990 bool PPCTargetLowering::
15991 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
15992   const Value *Mask = AndI.getOperand(1);
15993   // If the mask is suitable for andi. or andis. we should sink the and.
15994   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
15995     // Can't handle constants wider than 64-bits.
15996     if (CI->getBitWidth() > 64)
15997       return false;
15998     int64_t ConstVal = CI->getZExtValue();
15999     return isUInt<16>(ConstVal) ||
16000       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16001   }
16002 
16003   // For non-constant masks, we can always use the record-form and.
16004   return true;
16005 }
16006 
16007 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16008 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16009 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16010 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16011 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
16012 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16013   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
16014   assert(Subtarget.hasP9Altivec() &&
16015          "Only combine this when P9 altivec supported!");
16016   EVT VT = N->getValueType(0);
16017   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16018     return SDValue();
16019 
16020   SelectionDAG &DAG = DCI.DAG;
16021   SDLoc dl(N);
16022   if (N->getOperand(0).getOpcode() == ISD::SUB) {
16023     // Even for signed integers, if it's known to be positive (as signed
16024     // integer) due to zero-extended inputs.
16025     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16026     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16027     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16028          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16029         (SubOpcd1 == ISD::ZERO_EXTEND ||
16030          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16031       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16032                          N->getOperand(0)->getOperand(0),
16033                          N->getOperand(0)->getOperand(1),
16034                          DAG.getTargetConstant(0, dl, MVT::i32));
16035     }
16036 
16037     // For type v4i32, it can be optimized with xvnegsp + vabsduw
16038     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16039         N->getOperand(0).hasOneUse()) {
16040       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16041                          N->getOperand(0)->getOperand(0),
16042                          N->getOperand(0)->getOperand(1),
16043                          DAG.getTargetConstant(1, dl, MVT::i32));
16044     }
16045   }
16046 
16047   return SDValue();
16048 }
16049 
16050 // For type v4i32/v8ii16/v16i8, transform
16051 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16052 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16053 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16054 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
16055 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16056                                           DAGCombinerInfo &DCI) const {
16057   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
16058   assert(Subtarget.hasP9Altivec() &&
16059          "Only combine this when P9 altivec supported!");
16060 
16061   SelectionDAG &DAG = DCI.DAG;
16062   SDLoc dl(N);
16063   SDValue Cond = N->getOperand(0);
16064   SDValue TrueOpnd = N->getOperand(1);
16065   SDValue FalseOpnd = N->getOperand(2);
16066   EVT VT = N->getOperand(1).getValueType();
16067 
16068   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
16069       FalseOpnd.getOpcode() != ISD::SUB)
16070     return SDValue();
16071 
16072   // ABSD only available for type v4i32/v8i16/v16i8
16073   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16074     return SDValue();
16075 
16076   // At least to save one more dependent computation
16077   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
16078     return SDValue();
16079 
16080   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
16081 
16082   // Can only handle unsigned comparison here
16083   switch (CC) {
16084   default:
16085     return SDValue();
16086   case ISD::SETUGT:
16087   case ISD::SETUGE:
16088     break;
16089   case ISD::SETULT:
16090   case ISD::SETULE:
16091     std::swap(TrueOpnd, FalseOpnd);
16092     break;
16093   }
16094 
16095   SDValue CmpOpnd1 = Cond.getOperand(0);
16096   SDValue CmpOpnd2 = Cond.getOperand(1);
16097 
16098   // SETCC CmpOpnd1 CmpOpnd2 cond
16099   // TrueOpnd = CmpOpnd1 - CmpOpnd2
16100   // FalseOpnd = CmpOpnd2 - CmpOpnd1
16101   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
16102       TrueOpnd.getOperand(1) == CmpOpnd2 &&
16103       FalseOpnd.getOperand(0) == CmpOpnd2 &&
16104       FalseOpnd.getOperand(1) == CmpOpnd1) {
16105     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
16106                        CmpOpnd1, CmpOpnd2,
16107                        DAG.getTargetConstant(0, dl, MVT::i32));
16108   }
16109 
16110   return SDValue();
16111 }
16112