1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSymbolXCOFF.h"
78 #include "llvm/Support/AtomicOrdering.h"
79 #include "llvm/Support/BranchProbability.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/CodeGen.h"
82 #include "llvm/Support/CommandLine.h"
83 #include "llvm/Support/Compiler.h"
84 #include "llvm/Support/Debug.h"
85 #include "llvm/Support/ErrorHandling.h"
86 #include "llvm/Support/Format.h"
87 #include "llvm/Support/KnownBits.h"
88 #include "llvm/Support/MachineValueType.h"
89 #include "llvm/Support/MathExtras.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include "llvm/Target/TargetMachine.h"
92 #include "llvm/Target/TargetOptions.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <cstdint>
96 #include <iterator>
97 #include <list>
98 #include <utility>
99 #include <vector>
100 
101 using namespace llvm;
102 
103 #define DEBUG_TYPE "ppc-lowering"
104 
105 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
106 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
107 
108 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
109 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
110 
111 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
112 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
113 
114 static cl::opt<bool> DisableSCO("disable-ppc-sco",
115 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
116 
117 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
118 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
119 
120 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
121 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
122 
123 STATISTIC(NumTailCalls, "Number of tail calls");
124 STATISTIC(NumSiblingCalls, "Number of sibling calls");
125 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
126 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
127 
128 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
129 
130 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
131 
132 // FIXME: Remove this once the bug has been fixed!
133 extern cl::opt<bool> ANDIGlueBug;
134 
135 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
136                                      const PPCSubtarget &STI)
137     : TargetLowering(TM), Subtarget(STI) {
138   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
139   // arguments are at least 4/8 bytes aligned.
140   bool isPPC64 = Subtarget.isPPC64();
141   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
142 
143   // Set up the register classes.
144   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
145   if (!useSoftFloat()) {
146     if (hasSPE()) {
147       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
148       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
149     } else {
150       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
151       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
152     }
153   }
154 
155   // Match BITREVERSE to customized fast code sequence in the td file.
156   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
157   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
158 
159   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
160   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
161 
162   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
163   for (MVT VT : MVT::integer_valuetypes()) {
164     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
165     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
166   }
167 
168   if (Subtarget.isISA3_0()) {
169     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
170     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
171     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
172     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
173   } else {
174     // No extending loads from f16 or HW conversions back and forth.
175     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
176     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
177     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
178     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
179     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
180     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
181     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
182     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
183   }
184 
185   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
186 
187   // PowerPC has pre-inc load and store's.
188   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
189   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
190   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
191   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
192   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
193   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
194   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
195   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
196   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
197   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
198   if (!Subtarget.hasSPE()) {
199     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
200     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
201     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
202     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
203   }
204 
205   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
206   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
207   for (MVT VT : ScalarIntVTs) {
208     setOperationAction(ISD::ADDC, VT, Legal);
209     setOperationAction(ISD::ADDE, VT, Legal);
210     setOperationAction(ISD::SUBC, VT, Legal);
211     setOperationAction(ISD::SUBE, VT, Legal);
212   }
213 
214   if (Subtarget.useCRBits()) {
215     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
216 
217     if (isPPC64 || Subtarget.hasFPCVT()) {
218       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
219       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
220                          isPPC64 ? MVT::i64 : MVT::i32);
221       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
222       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
223                         isPPC64 ? MVT::i64 : MVT::i32);
224     } else {
225       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
226       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
227     }
228 
229     // PowerPC does not support direct load/store of condition registers.
230     setOperationAction(ISD::LOAD, MVT::i1, Custom);
231     setOperationAction(ISD::STORE, MVT::i1, Custom);
232 
233     // FIXME: Remove this once the ANDI glue bug is fixed:
234     if (ANDIGlueBug)
235       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
236 
237     for (MVT VT : MVT::integer_valuetypes()) {
238       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
239       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
240       setTruncStoreAction(VT, MVT::i1, Expand);
241     }
242 
243     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
244   }
245 
246   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
247   // PPC (the libcall is not available).
248   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
249   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
250 
251   // We do not currently implement these libm ops for PowerPC.
252   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
253   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
254   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
255   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
256   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
257   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
258 
259   // PowerPC has no SREM/UREM instructions unless we are on P9
260   // On P9 we may use a hardware instruction to compute the remainder.
261   // When the result of both the remainder and the division is required it is
262   // more efficient to compute the remainder from the result of the division
263   // rather than use the remainder instruction. The instructions are legalized
264   // directly because the DivRemPairsPass performs the transformation at the IR
265   // level.
266   if (Subtarget.isISA3_0()) {
267     setOperationAction(ISD::SREM, MVT::i32, Legal);
268     setOperationAction(ISD::UREM, MVT::i32, Legal);
269     setOperationAction(ISD::SREM, MVT::i64, Legal);
270     setOperationAction(ISD::UREM, MVT::i64, Legal);
271   } else {
272     setOperationAction(ISD::SREM, MVT::i32, Expand);
273     setOperationAction(ISD::UREM, MVT::i32, Expand);
274     setOperationAction(ISD::SREM, MVT::i64, Expand);
275     setOperationAction(ISD::UREM, MVT::i64, Expand);
276   }
277 
278   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
279   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
280   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
281   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
282   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
283   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
284   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
285   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
286   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
287 
288   // Handle constrained floating-point operations of scalar.
289   // TODO: Handle SPE specific operation.
290   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
291   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
292   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
293   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
294   setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
295   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
296 
297   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
298   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
299   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
300   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
301   setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
302   if (Subtarget.hasVSX())
303     setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f64, Legal);
304 
305   if (Subtarget.hasFSQRT()) {
306     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
307     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
308   }
309 
310   if (Subtarget.hasFPRND()) {
311     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
312     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
313     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
314     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
315 
316     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
317     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
318     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
319     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
320   }
321 
322   // We don't support sin/cos/sqrt/fmod/pow
323   setOperationAction(ISD::FSIN , MVT::f64, Expand);
324   setOperationAction(ISD::FCOS , MVT::f64, Expand);
325   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
326   setOperationAction(ISD::FREM , MVT::f64, Expand);
327   setOperationAction(ISD::FPOW , MVT::f64, Expand);
328   setOperationAction(ISD::FSIN , MVT::f32, Expand);
329   setOperationAction(ISD::FCOS , MVT::f32, Expand);
330   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
331   setOperationAction(ISD::FREM , MVT::f32, Expand);
332   setOperationAction(ISD::FPOW , MVT::f32, Expand);
333   if (Subtarget.hasSPE()) {
334     setOperationAction(ISD::FMA  , MVT::f64, Expand);
335     setOperationAction(ISD::FMA  , MVT::f32, Expand);
336   } else {
337     setOperationAction(ISD::FMA  , MVT::f64, Legal);
338     setOperationAction(ISD::FMA  , MVT::f32, Legal);
339   }
340 
341   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
342 
343   // If we're enabling GP optimizations, use hardware square root
344   if (!Subtarget.hasFSQRT() &&
345       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
346         Subtarget.hasFRE()))
347     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
348 
349   if (!Subtarget.hasFSQRT() &&
350       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
351         Subtarget.hasFRES()))
352     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
353 
354   if (Subtarget.hasFCPSGN()) {
355     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
356     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
357   } else {
358     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
359     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
360   }
361 
362   if (Subtarget.hasFPRND()) {
363     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
364     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
365     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
366     setOperationAction(ISD::FROUND, MVT::f64, Legal);
367 
368     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
369     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
370     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
371     setOperationAction(ISD::FROUND, MVT::f32, Legal);
372   }
373 
374   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
375   // to speed up scalar BSWAP64.
376   // CTPOP or CTTZ were introduced in P8/P9 respectively
377   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
378   if (Subtarget.hasP9Vector())
379     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
380   else
381     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
382   if (Subtarget.isISA3_0()) {
383     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
384     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
385   } else {
386     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
387     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
388   }
389 
390   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
391     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
392     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
393   } else {
394     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
395     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
396   }
397 
398   // PowerPC does not have ROTR
399   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
400   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
401 
402   if (!Subtarget.useCRBits()) {
403     // PowerPC does not have Select
404     setOperationAction(ISD::SELECT, MVT::i32, Expand);
405     setOperationAction(ISD::SELECT, MVT::i64, Expand);
406     setOperationAction(ISD::SELECT, MVT::f32, Expand);
407     setOperationAction(ISD::SELECT, MVT::f64, Expand);
408   }
409 
410   // PowerPC wants to turn select_cc of FP into fsel when possible.
411   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
412   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
413 
414   // PowerPC wants to optimize integer setcc a bit
415   if (!Subtarget.useCRBits())
416     setOperationAction(ISD::SETCC, MVT::i32, Custom);
417 
418   // PowerPC does not have BRCOND which requires SetCC
419   if (!Subtarget.useCRBits())
420     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
421 
422   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
423 
424   if (Subtarget.hasSPE()) {
425     // SPE has built-in conversions
426     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
427     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
428     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
429     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
430     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
431     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
432   } else {
433     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
434     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
435 
436     // PowerPC does not have [U|S]INT_TO_FP
437     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
438     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
439   }
440 
441   if (Subtarget.hasDirectMove() && isPPC64) {
442     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
443     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
444     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
445     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
446     if (TM.Options.UnsafeFPMath) {
447       setOperationAction(ISD::LRINT, MVT::f64, Legal);
448       setOperationAction(ISD::LRINT, MVT::f32, Legal);
449       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
450       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
451       setOperationAction(ISD::LROUND, MVT::f64, Legal);
452       setOperationAction(ISD::LROUND, MVT::f32, Legal);
453       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
454       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
455     }
456   } else {
457     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
458     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
459     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
460     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
461   }
462 
463   // We cannot sextinreg(i1).  Expand to shifts.
464   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
465 
466   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
467   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
468   // support continuation, user-level threading, and etc.. As a result, no
469   // other SjLj exception interfaces are implemented and please don't build
470   // your own exception handling based on them.
471   // LLVM/Clang supports zero-cost DWARF exception handling.
472   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
473   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
474 
475   // We want to legalize GlobalAddress and ConstantPool nodes into the
476   // appropriate instructions to materialize the address.
477   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
478   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
479   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
480   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
481   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
482   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
483   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
484   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
485   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
486   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
487 
488   // TRAP is legal.
489   setOperationAction(ISD::TRAP, MVT::Other, Legal);
490 
491   // TRAMPOLINE is custom lowered.
492   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
493   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
494 
495   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
496   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
497 
498   if (Subtarget.is64BitELFABI()) {
499     // VAARG always uses double-word chunks, so promote anything smaller.
500     setOperationAction(ISD::VAARG, MVT::i1, Promote);
501     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
502     setOperationAction(ISD::VAARG, MVT::i8, Promote);
503     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
504     setOperationAction(ISD::VAARG, MVT::i16, Promote);
505     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
506     setOperationAction(ISD::VAARG, MVT::i32, Promote);
507     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
508     setOperationAction(ISD::VAARG, MVT::Other, Expand);
509   } else if (Subtarget.is32BitELFABI()) {
510     // VAARG is custom lowered with the 32-bit SVR4 ABI.
511     setOperationAction(ISD::VAARG, MVT::Other, Custom);
512     setOperationAction(ISD::VAARG, MVT::i64, Custom);
513   } else
514     setOperationAction(ISD::VAARG, MVT::Other, Expand);
515 
516   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
517   if (Subtarget.is32BitELFABI())
518     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
519   else
520     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
521 
522   // Use the default implementation.
523   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
524   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
525   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
526   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
527   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
528   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
529   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
530   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
531   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
532 
533   // We want to custom lower some of our intrinsics.
534   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
535 
536   // To handle counter-based loop conditions.
537   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
538 
539   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
540   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
541   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
542   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
543 
544   // Comparisons that require checking two conditions.
545   if (Subtarget.hasSPE()) {
546     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
547     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
548     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
549     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
550   }
551   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
552   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
553   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
554   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
555   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
556   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
557   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
558   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
559   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
560   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
561   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
562   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
563 
564   if (Subtarget.has64BitSupport()) {
565     // They also have instructions for converting between i64 and fp.
566     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
567     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
568     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
569     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
570     // This is just the low 32 bits of a (signed) fp->i64 conversion.
571     // We cannot do this with Promote because i64 is not a legal type.
572     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
573 
574     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
575       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
576   } else {
577     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
578     if (Subtarget.hasSPE()) {
579       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
580       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
581     } else
582       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
583   }
584 
585   // With the instructions enabled under FPCVT, we can do everything.
586   if (Subtarget.hasFPCVT()) {
587     if (Subtarget.has64BitSupport()) {
588       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
589       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
590       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
591       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
592     }
593 
594     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
595     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
596     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
597     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
598   }
599 
600   if (Subtarget.use64BitRegs()) {
601     // 64-bit PowerPC implementations can support i64 types directly
602     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
603     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
604     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
605     // 64-bit PowerPC wants to expand i128 shifts itself.
606     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
607     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
608     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
609   } else {
610     // 32-bit PowerPC wants to expand i64 shifts itself.
611     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
612     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
613     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
614   }
615 
616   if (Subtarget.hasVSX()) {
617     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
618     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
619     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
620     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
621   }
622 
623   if (Subtarget.hasAltivec()) {
624     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
625       setOperationAction(ISD::SADDSAT, VT, Legal);
626       setOperationAction(ISD::SSUBSAT, VT, Legal);
627       setOperationAction(ISD::UADDSAT, VT, Legal);
628       setOperationAction(ISD::USUBSAT, VT, Legal);
629     }
630     // First set operation action for all vector types to expand. Then we
631     // will selectively turn on ones that can be effectively codegen'd.
632     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
633       // add/sub are legal for all supported vector VT's.
634       setOperationAction(ISD::ADD, VT, Legal);
635       setOperationAction(ISD::SUB, VT, Legal);
636 
637       // For v2i64, these are only valid with P8Vector. This is corrected after
638       // the loop.
639       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
640         setOperationAction(ISD::SMAX, VT, Legal);
641         setOperationAction(ISD::SMIN, VT, Legal);
642         setOperationAction(ISD::UMAX, VT, Legal);
643         setOperationAction(ISD::UMIN, VT, Legal);
644       }
645       else {
646         setOperationAction(ISD::SMAX, VT, Expand);
647         setOperationAction(ISD::SMIN, VT, Expand);
648         setOperationAction(ISD::UMAX, VT, Expand);
649         setOperationAction(ISD::UMIN, VT, Expand);
650       }
651 
652       if (Subtarget.hasVSX()) {
653         setOperationAction(ISD::FMAXNUM, VT, Legal);
654         setOperationAction(ISD::FMINNUM, VT, Legal);
655       }
656 
657       // Vector instructions introduced in P8
658       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
659         setOperationAction(ISD::CTPOP, VT, Legal);
660         setOperationAction(ISD::CTLZ, VT, Legal);
661       }
662       else {
663         setOperationAction(ISD::CTPOP, VT, Expand);
664         setOperationAction(ISD::CTLZ, VT, Expand);
665       }
666 
667       // Vector instructions introduced in P9
668       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
669         setOperationAction(ISD::CTTZ, VT, Legal);
670       else
671         setOperationAction(ISD::CTTZ, VT, Expand);
672 
673       // We promote all shuffles to v16i8.
674       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
675       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
676 
677       // We promote all non-typed operations to v4i32.
678       setOperationAction(ISD::AND   , VT, Promote);
679       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
680       setOperationAction(ISD::OR    , VT, Promote);
681       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
682       setOperationAction(ISD::XOR   , VT, Promote);
683       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
684       setOperationAction(ISD::LOAD  , VT, Promote);
685       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
686       setOperationAction(ISD::SELECT, VT, Promote);
687       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
688       setOperationAction(ISD::VSELECT, VT, Legal);
689       setOperationAction(ISD::SELECT_CC, VT, Promote);
690       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
691       setOperationAction(ISD::STORE, VT, Promote);
692       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
693 
694       // No other operations are legal.
695       setOperationAction(ISD::MUL , VT, Expand);
696       setOperationAction(ISD::SDIV, VT, Expand);
697       setOperationAction(ISD::SREM, VT, Expand);
698       setOperationAction(ISD::UDIV, VT, Expand);
699       setOperationAction(ISD::UREM, VT, Expand);
700       setOperationAction(ISD::FDIV, VT, Expand);
701       setOperationAction(ISD::FREM, VT, Expand);
702       setOperationAction(ISD::FNEG, VT, Expand);
703       setOperationAction(ISD::FSQRT, VT, Expand);
704       setOperationAction(ISD::FLOG, VT, Expand);
705       setOperationAction(ISD::FLOG10, VT, Expand);
706       setOperationAction(ISD::FLOG2, VT, Expand);
707       setOperationAction(ISD::FEXP, VT, Expand);
708       setOperationAction(ISD::FEXP2, VT, Expand);
709       setOperationAction(ISD::FSIN, VT, Expand);
710       setOperationAction(ISD::FCOS, VT, Expand);
711       setOperationAction(ISD::FABS, VT, Expand);
712       setOperationAction(ISD::FFLOOR, VT, Expand);
713       setOperationAction(ISD::FCEIL,  VT, Expand);
714       setOperationAction(ISD::FTRUNC, VT, Expand);
715       setOperationAction(ISD::FRINT,  VT, Expand);
716       setOperationAction(ISD::FNEARBYINT, VT, Expand);
717       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
718       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
719       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
720       setOperationAction(ISD::MULHU, VT, Expand);
721       setOperationAction(ISD::MULHS, VT, Expand);
722       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
723       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
724       setOperationAction(ISD::UDIVREM, VT, Expand);
725       setOperationAction(ISD::SDIVREM, VT, Expand);
726       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
727       setOperationAction(ISD::FPOW, VT, Expand);
728       setOperationAction(ISD::BSWAP, VT, Expand);
729       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
730       setOperationAction(ISD::ROTL, VT, Expand);
731       setOperationAction(ISD::ROTR, VT, Expand);
732 
733       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
734         setTruncStoreAction(VT, InnerVT, Expand);
735         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
736         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
737         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
738       }
739     }
740     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
741     if (!Subtarget.hasP8Vector()) {
742       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
743       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
744       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
745       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
746     }
747 
748     for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
749       setOperationAction(ISD::ABS, VT, Custom);
750 
751     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
752     // with merges, splats, etc.
753     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
754 
755     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
756     // are cheap, so handle them before they get expanded to scalar.
757     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
758     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
759     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
760     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
761     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
762 
763     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
764     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
765     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
766     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
767     setOperationAction(ISD::SELECT, MVT::v4i32,
768                        Subtarget.useCRBits() ? Legal : Expand);
769     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
770     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
771     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
772     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
773     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
774     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
775     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
776     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
777     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
778 
779     // Without hasP8Altivec set, v2i64 SMAX isn't available.
780     // But ABS custom lowering requires SMAX support.
781     if (!Subtarget.hasP8Altivec())
782       setOperationAction(ISD::ABS, MVT::v2i64, Expand);
783 
784     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
785     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
786     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
787     if (Subtarget.hasAltivec())
788       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
789         setOperationAction(ISD::ROTL, VT, Legal);
790     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
791     if (Subtarget.hasP8Altivec())
792       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
793 
794     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
795     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
796     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
797     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
798 
799     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
800     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
801 
802     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
803       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
804       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
805     }
806 
807     if (Subtarget.hasP8Altivec())
808       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
809     else
810       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
811 
812     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
813     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
814 
815     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
816     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
817 
818     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
819     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
820     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
821     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
822 
823     // Altivec does not contain unordered floating-point compare instructions
824     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
825     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
826     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
827     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
828 
829     if (Subtarget.hasVSX()) {
830       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
831       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
832       if (Subtarget.hasP8Vector()) {
833         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
834         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
835       }
836       if (Subtarget.hasDirectMove() && isPPC64) {
837         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
838         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
839         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
840         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
841         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
842         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
843         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
844         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
845       }
846       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
847 
848       // The nearbyint variants are not allowed to raise the inexact exception
849       // so we can only code-gen them with unsafe math.
850       if (TM.Options.UnsafeFPMath) {
851         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
852         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
853       }
854 
855       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
856       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
857       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
858       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
859       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
860       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
861       setOperationAction(ISD::FROUND, MVT::f64, Legal);
862       setOperationAction(ISD::FRINT, MVT::f64, Legal);
863 
864       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
865       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
866       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
867       setOperationAction(ISD::FROUND, MVT::f32, Legal);
868       setOperationAction(ISD::FRINT, MVT::f32, Legal);
869 
870       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
871       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
872 
873       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
874       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
875 
876       // Share the Altivec comparison restrictions.
877       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
878       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
879       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
880       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
881 
882       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
883       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
884 
885       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
886 
887       if (Subtarget.hasP8Vector())
888         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
889 
890       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
891 
892       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
893       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
894       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
895 
896       if (Subtarget.hasP8Altivec()) {
897         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
898         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
899         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
900 
901         // 128 bit shifts can be accomplished via 3 instructions for SHL and
902         // SRL, but not for SRA because of the instructions available:
903         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
904         // doing
905         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
906         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
907         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
908 
909         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
910       }
911       else {
912         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
913         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
914         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
915 
916         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
917 
918         // VSX v2i64 only supports non-arithmetic operations.
919         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
920         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
921       }
922 
923       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
924       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
925       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
926       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
927 
928       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
929 
930       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
931       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
932       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
933       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
934 
935       // Custom handling for partial vectors of integers converted to
936       // floating point. We already have optimal handling for v2i32 through
937       // the DAG combine, so those aren't necessary.
938       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
939       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
940       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
941       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
942       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
943       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
944       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
945       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
946 
947       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
948       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
949       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
950       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
951       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
952       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
953 
954       if (Subtarget.hasDirectMove())
955         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
956       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
957 
958       // Handle constrained floating-point operations of vector.
959       // The predictor is `hasVSX` because altivec instruction has
960       // no exception but VSX vector instruction has.
961       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
962       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
963       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
964       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
965       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
966       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
967       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
968       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
969       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
970       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
971       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
972       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
973       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
974 
975       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
976       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
977       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
978       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
979       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
980       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
981       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
982       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
983       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
984       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
985       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
986       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
987       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
988 
989       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
990     }
991 
992     if (Subtarget.hasP8Altivec()) {
993       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
994       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
995     }
996 
997     if (Subtarget.hasP9Vector()) {
998       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
999       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1000 
1001       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1002       // SRL, but not for SRA because of the instructions available:
1003       // VS{RL} and VS{RL}O.
1004       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1005       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1006       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1007 
1008       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1009       setOperationAction(ISD::FADD, MVT::f128, Legal);
1010       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1011       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1012       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1013       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1014       // No extending loads to f128 on PPC.
1015       for (MVT FPT : MVT::fp_valuetypes())
1016         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1017       setOperationAction(ISD::FMA, MVT::f128, Legal);
1018       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1019       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1020       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1021       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1022       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1023       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1024 
1025       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1026       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1027       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1028       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1029       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1030       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1031 
1032       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1033       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1034       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1035       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1036       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1037       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1038       // No implementation for these ops for PowerPC.
1039       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1040       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1041       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1042       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1043       setOperationAction(ISD::FREM, MVT::f128, Expand);
1044 
1045       // Handle constrained floating-point operations of fp128
1046       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1047       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1048       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1049       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1050       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1051       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1052       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1053       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1054       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1055       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1056       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1057       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1058       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1059       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1060       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1061       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1062       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1063       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1064       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1065       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1066     }
1067 
1068     if (Subtarget.hasP9Altivec()) {
1069       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1070       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1071 
1072       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1073       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1074       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1075       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1076       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1077       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1078       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1079     }
1080   }
1081 
1082   if (Subtarget.hasQPX()) {
1083     setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1084     setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1085     setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1086     setOperationAction(ISD::FREM, MVT::v4f64, Expand);
1087 
1088     setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
1089     setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
1090 
1091     setOperationAction(ISD::LOAD  , MVT::v4f64, Custom);
1092     setOperationAction(ISD::STORE , MVT::v4f64, Custom);
1093 
1094     setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
1095     setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
1096 
1097     if (!Subtarget.useCRBits())
1098       setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
1099     setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
1100 
1101     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
1102     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
1103     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
1104     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
1105     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
1106     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
1107     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
1108 
1109     setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
1110     setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
1111 
1112     setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
1113     setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
1114 
1115     setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
1116     setOperationAction(ISD::FABS , MVT::v4f64, Legal);
1117     setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
1118     setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
1119     setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
1120     setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
1121     setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
1122     setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
1123     setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
1124     setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
1125 
1126     setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
1127     setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
1128 
1129     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
1130     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
1131 
1132     addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
1133 
1134     setOperationAction(ISD::FADD, MVT::v4f32, Legal);
1135     setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
1136     setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
1137     setOperationAction(ISD::FREM, MVT::v4f32, Expand);
1138 
1139     setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1140     setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
1141 
1142     setOperationAction(ISD::LOAD  , MVT::v4f32, Custom);
1143     setOperationAction(ISD::STORE , MVT::v4f32, Custom);
1144 
1145     if (!Subtarget.useCRBits())
1146       setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
1147     setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
1148 
1149     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
1150     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
1151     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
1152     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
1153     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
1154     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
1155     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
1156 
1157     setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
1158     setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
1159 
1160     setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
1161     setOperationAction(ISD::FABS , MVT::v4f32, Legal);
1162     setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
1163     setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
1164     setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
1165     setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
1166     setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
1167     setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
1168     setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
1169     setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
1170 
1171     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1172     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1173 
1174     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
1175     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
1176 
1177     addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
1178 
1179     setOperationAction(ISD::AND , MVT::v4i1, Legal);
1180     setOperationAction(ISD::OR , MVT::v4i1, Legal);
1181     setOperationAction(ISD::XOR , MVT::v4i1, Legal);
1182 
1183     if (!Subtarget.useCRBits())
1184       setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
1185     setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
1186 
1187     setOperationAction(ISD::LOAD  , MVT::v4i1, Custom);
1188     setOperationAction(ISD::STORE , MVT::v4i1, Custom);
1189 
1190     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
1191     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
1192     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
1193     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
1194     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
1195     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
1196     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1197 
1198     setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1199     setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1200 
1201     addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
1202 
1203     setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1204     setOperationAction(ISD::FCEIL,  MVT::v4f64, Legal);
1205     setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1206     setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
1207 
1208     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1209     setOperationAction(ISD::FCEIL,  MVT::v4f32, Legal);
1210     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1211     setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1212 
1213     setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1214     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1215 
1216     // These need to set FE_INEXACT, and so cannot be vectorized here.
1217     setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1218     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1219 
1220     if (TM.Options.UnsafeFPMath) {
1221       setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1222       setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1223 
1224       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1225       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1226     } else {
1227       setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1228       setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1229 
1230       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1231       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1232     }
1233 
1234     // TODO: Handle constrained floating-point operations of v4f64
1235   }
1236 
1237   if (Subtarget.has64BitSupport())
1238     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1239 
1240   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1241 
1242   if (!isPPC64) {
1243     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1244     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1245   }
1246 
1247   setBooleanContents(ZeroOrOneBooleanContent);
1248 
1249   if (Subtarget.hasAltivec()) {
1250     // Altivec instructions set fields to all zeros or all ones.
1251     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1252   }
1253 
1254   if (!isPPC64) {
1255     // These libcalls are not available in 32-bit.
1256     setLibcallName(RTLIB::SHL_I128, nullptr);
1257     setLibcallName(RTLIB::SRL_I128, nullptr);
1258     setLibcallName(RTLIB::SRA_I128, nullptr);
1259   }
1260 
1261   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1262 
1263   // We have target-specific dag combine patterns for the following nodes:
1264   setTargetDAGCombine(ISD::ADD);
1265   setTargetDAGCombine(ISD::SHL);
1266   setTargetDAGCombine(ISD::SRA);
1267   setTargetDAGCombine(ISD::SRL);
1268   setTargetDAGCombine(ISD::MUL);
1269   setTargetDAGCombine(ISD::FMA);
1270   setTargetDAGCombine(ISD::SINT_TO_FP);
1271   setTargetDAGCombine(ISD::BUILD_VECTOR);
1272   if (Subtarget.hasFPCVT())
1273     setTargetDAGCombine(ISD::UINT_TO_FP);
1274   setTargetDAGCombine(ISD::LOAD);
1275   setTargetDAGCombine(ISD::STORE);
1276   setTargetDAGCombine(ISD::BR_CC);
1277   if (Subtarget.useCRBits())
1278     setTargetDAGCombine(ISD::BRCOND);
1279   setTargetDAGCombine(ISD::BSWAP);
1280   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1281   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1282   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1283 
1284   setTargetDAGCombine(ISD::SIGN_EXTEND);
1285   setTargetDAGCombine(ISD::ZERO_EXTEND);
1286   setTargetDAGCombine(ISD::ANY_EXTEND);
1287 
1288   setTargetDAGCombine(ISD::TRUNCATE);
1289   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1290 
1291 
1292   if (Subtarget.useCRBits()) {
1293     setTargetDAGCombine(ISD::TRUNCATE);
1294     setTargetDAGCombine(ISD::SETCC);
1295     setTargetDAGCombine(ISD::SELECT_CC);
1296   }
1297 
1298   // Use reciprocal estimates.
1299   if (TM.Options.UnsafeFPMath) {
1300     setTargetDAGCombine(ISD::FDIV);
1301     setTargetDAGCombine(ISD::FSQRT);
1302   }
1303 
1304   if (Subtarget.hasP9Altivec()) {
1305     setTargetDAGCombine(ISD::ABS);
1306     setTargetDAGCombine(ISD::VSELECT);
1307   }
1308 
1309   setLibcallName(RTLIB::LOG_F128, "logf128");
1310   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1311   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1312   setLibcallName(RTLIB::EXP_F128, "expf128");
1313   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1314   setLibcallName(RTLIB::SIN_F128, "sinf128");
1315   setLibcallName(RTLIB::COS_F128, "cosf128");
1316   setLibcallName(RTLIB::POW_F128, "powf128");
1317   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1318   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1319   setLibcallName(RTLIB::POWI_F128, "__powikf2");
1320   setLibcallName(RTLIB::REM_F128, "fmodf128");
1321 
1322   // With 32 condition bits, we don't need to sink (and duplicate) compares
1323   // aggressively in CodeGenPrep.
1324   if (Subtarget.useCRBits()) {
1325     setHasMultipleConditionRegisters();
1326     setJumpIsExpensive();
1327   }
1328 
1329   setMinFunctionAlignment(Align(4));
1330 
1331   switch (Subtarget.getCPUDirective()) {
1332   default: break;
1333   case PPC::DIR_970:
1334   case PPC::DIR_A2:
1335   case PPC::DIR_E500:
1336   case PPC::DIR_E500mc:
1337   case PPC::DIR_E5500:
1338   case PPC::DIR_PWR4:
1339   case PPC::DIR_PWR5:
1340   case PPC::DIR_PWR5X:
1341   case PPC::DIR_PWR6:
1342   case PPC::DIR_PWR6X:
1343   case PPC::DIR_PWR7:
1344   case PPC::DIR_PWR8:
1345   case PPC::DIR_PWR9:
1346   case PPC::DIR_PWR10:
1347   case PPC::DIR_PWR_FUTURE:
1348     setPrefLoopAlignment(Align(16));
1349     setPrefFunctionAlignment(Align(16));
1350     break;
1351   }
1352 
1353   if (Subtarget.enableMachineScheduler())
1354     setSchedulingPreference(Sched::Source);
1355   else
1356     setSchedulingPreference(Sched::Hybrid);
1357 
1358   computeRegisterProperties(STI.getRegisterInfo());
1359 
1360   // The Freescale cores do better with aggressive inlining of memcpy and
1361   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1362   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1363       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1364     MaxStoresPerMemset = 32;
1365     MaxStoresPerMemsetOptSize = 16;
1366     MaxStoresPerMemcpy = 32;
1367     MaxStoresPerMemcpyOptSize = 8;
1368     MaxStoresPerMemmove = 32;
1369     MaxStoresPerMemmoveOptSize = 8;
1370   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1371     // The A2 also benefits from (very) aggressive inlining of memcpy and
1372     // friends. The overhead of a the function call, even when warm, can be
1373     // over one hundred cycles.
1374     MaxStoresPerMemset = 128;
1375     MaxStoresPerMemcpy = 128;
1376     MaxStoresPerMemmove = 128;
1377     MaxLoadsPerMemcmp = 128;
1378   } else {
1379     MaxLoadsPerMemcmp = 8;
1380     MaxLoadsPerMemcmpOptSize = 4;
1381   }
1382 
1383   // Let the subtarget (CPU) decide if a predictable select is more expensive
1384   // than the corresponding branch. This information is used in CGP to decide
1385   // when to convert selects into branches.
1386   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1387 }
1388 
1389 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1390 /// the desired ByVal argument alignment.
1391 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1392   if (MaxAlign == MaxMaxAlign)
1393     return;
1394   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1395     if (MaxMaxAlign >= 32 &&
1396         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1397       MaxAlign = Align(32);
1398     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1399              MaxAlign < 16)
1400       MaxAlign = Align(16);
1401   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1402     Align EltAlign;
1403     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1404     if (EltAlign > MaxAlign)
1405       MaxAlign = EltAlign;
1406   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1407     for (auto *EltTy : STy->elements()) {
1408       Align EltAlign;
1409       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1410       if (EltAlign > MaxAlign)
1411         MaxAlign = EltAlign;
1412       if (MaxAlign == MaxMaxAlign)
1413         break;
1414     }
1415   }
1416 }
1417 
1418 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1419 /// function arguments in the caller parameter area.
1420 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1421                                                   const DataLayout &DL) const {
1422   // 16byte and wider vectors are passed on 16byte boundary.
1423   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1424   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1425   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1426     getMaxByValAlign(Ty, Alignment, Subtarget.hasQPX() ? Align(32) : Align(16));
1427   return Alignment.value();
1428 }
1429 
1430 bool PPCTargetLowering::useSoftFloat() const {
1431   return Subtarget.useSoftFloat();
1432 }
1433 
1434 bool PPCTargetLowering::hasSPE() const {
1435   return Subtarget.hasSPE();
1436 }
1437 
1438 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1439   return VT.isScalarInteger();
1440 }
1441 
1442 /// isMulhCheaperThanMulShift - Return true if a mulh[s|u] node for a specific
1443 /// type is cheaper than a multiply followed by a shift.
1444 /// This is true for words and doublewords on 64-bit PowerPC.
1445 bool PPCTargetLowering::isMulhCheaperThanMulShift(EVT Type) const {
1446   if (Subtarget.isPPC64() && (isOperationLegal(ISD::MULHS, Type) ||
1447                               isOperationLegal(ISD::MULHU, Type)))
1448     return true;
1449   return TargetLowering::isMulhCheaperThanMulShift(Type);
1450 }
1451 
1452 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1453   switch ((PPCISD::NodeType)Opcode) {
1454   case PPCISD::FIRST_NUMBER:    break;
1455   case PPCISD::FSEL:            return "PPCISD::FSEL";
1456   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1457   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1458   case PPCISD::FCFID:           return "PPCISD::FCFID";
1459   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1460   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1461   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1462   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1463   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1464   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1465   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1466   case PPCISD::FP_TO_UINT_IN_VSR:
1467                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1468   case PPCISD::FP_TO_SINT_IN_VSR:
1469                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1470   case PPCISD::FRE:             return "PPCISD::FRE";
1471   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1472   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1473   case PPCISD::VPERM:           return "PPCISD::VPERM";
1474   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1475   case PPCISD::XXSPLTI_SP_TO_DP:
1476     return "PPCISD::XXSPLTI_SP_TO_DP";
1477   case PPCISD::XXSPLTI32DX:
1478     return "PPCISD::XXSPLTI32DX";
1479   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1480   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1481   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1482   case PPCISD::CMPB:            return "PPCISD::CMPB";
1483   case PPCISD::Hi:              return "PPCISD::Hi";
1484   case PPCISD::Lo:              return "PPCISD::Lo";
1485   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1486   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1487   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1488   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1489   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1490   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1491   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1492   case PPCISD::SRL:             return "PPCISD::SRL";
1493   case PPCISD::SRA:             return "PPCISD::SRA";
1494   case PPCISD::SHL:             return "PPCISD::SHL";
1495   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1496   case PPCISD::CALL:            return "PPCISD::CALL";
1497   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1498   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1499   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1500   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1501   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1502   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1503   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1504   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1505   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1506   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1507   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1508   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1509   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1510   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1511   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1512   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1513     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1514   case PPCISD::ANDI_rec_1_EQ_BIT:
1515     return "PPCISD::ANDI_rec_1_EQ_BIT";
1516   case PPCISD::ANDI_rec_1_GT_BIT:
1517     return "PPCISD::ANDI_rec_1_GT_BIT";
1518   case PPCISD::VCMP:            return "PPCISD::VCMP";
1519   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1520   case PPCISD::LBRX:            return "PPCISD::LBRX";
1521   case PPCISD::STBRX:           return "PPCISD::STBRX";
1522   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1523   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1524   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1525   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1526   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1527   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1528   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1529   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1530   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1531   case PPCISD::ST_VSR_SCAL_INT:
1532                                 return "PPCISD::ST_VSR_SCAL_INT";
1533   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1534   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1535   case PPCISD::BDZ:             return "PPCISD::BDZ";
1536   case PPCISD::MFFS:            return "PPCISD::MFFS";
1537   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1538   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1539   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1540   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1541   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1542   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1543   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1544   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1545   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1546   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1547   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1548   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1549   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1550   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1551   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1552   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1553   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1554   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1555   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1556   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1557   case PPCISD::SC:              return "PPCISD::SC";
1558   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1559   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1560   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1561   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1562   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1563   case PPCISD::VABSD:           return "PPCISD::VABSD";
1564   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
1565   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
1566   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
1567   case PPCISD::QVESPLATI:       return "PPCISD::QVESPLATI";
1568   case PPCISD::QBFLT:           return "PPCISD::QBFLT";
1569   case PPCISD::QVLFSb:          return "PPCISD::QVLFSb";
1570   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1571   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1572   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1573   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1574   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1575   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1576   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1577   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1578   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1579   }
1580   return nullptr;
1581 }
1582 
1583 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1584                                           EVT VT) const {
1585   if (!VT.isVector())
1586     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1587 
1588   if (Subtarget.hasQPX())
1589     return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1590 
1591   return VT.changeVectorElementTypeToInteger();
1592 }
1593 
1594 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1595   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1596   return true;
1597 }
1598 
1599 //===----------------------------------------------------------------------===//
1600 // Node matching predicates, for use by the tblgen matching code.
1601 //===----------------------------------------------------------------------===//
1602 
1603 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1604 static bool isFloatingPointZero(SDValue Op) {
1605   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1606     return CFP->getValueAPF().isZero();
1607   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1608     // Maybe this has already been legalized into the constant pool?
1609     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1610       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1611         return CFP->getValueAPF().isZero();
1612   }
1613   return false;
1614 }
1615 
1616 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1617 /// true if Op is undef or if it matches the specified value.
1618 static bool isConstantOrUndef(int Op, int Val) {
1619   return Op < 0 || Op == Val;
1620 }
1621 
1622 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1623 /// VPKUHUM instruction.
1624 /// The ShuffleKind distinguishes between big-endian operations with
1625 /// two different inputs (0), either-endian operations with two identical
1626 /// inputs (1), and little-endian operations with two different inputs (2).
1627 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1628 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1629                                SelectionDAG &DAG) {
1630   bool IsLE = DAG.getDataLayout().isLittleEndian();
1631   if (ShuffleKind == 0) {
1632     if (IsLE)
1633       return false;
1634     for (unsigned i = 0; i != 16; ++i)
1635       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1636         return false;
1637   } else if (ShuffleKind == 2) {
1638     if (!IsLE)
1639       return false;
1640     for (unsigned i = 0; i != 16; ++i)
1641       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1642         return false;
1643   } else if (ShuffleKind == 1) {
1644     unsigned j = IsLE ? 0 : 1;
1645     for (unsigned i = 0; i != 8; ++i)
1646       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1647           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1648         return false;
1649   }
1650   return true;
1651 }
1652 
1653 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1654 /// VPKUWUM instruction.
1655 /// The ShuffleKind distinguishes between big-endian operations with
1656 /// two different inputs (0), either-endian operations with two identical
1657 /// inputs (1), and little-endian operations with two different inputs (2).
1658 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1659 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1660                                SelectionDAG &DAG) {
1661   bool IsLE = DAG.getDataLayout().isLittleEndian();
1662   if (ShuffleKind == 0) {
1663     if (IsLE)
1664       return false;
1665     for (unsigned i = 0; i != 16; i += 2)
1666       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1667           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1668         return false;
1669   } else if (ShuffleKind == 2) {
1670     if (!IsLE)
1671       return false;
1672     for (unsigned i = 0; i != 16; i += 2)
1673       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1674           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1675         return false;
1676   } else if (ShuffleKind == 1) {
1677     unsigned j = IsLE ? 0 : 2;
1678     for (unsigned i = 0; i != 8; i += 2)
1679       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1680           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1681           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1682           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1683         return false;
1684   }
1685   return true;
1686 }
1687 
1688 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1689 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1690 /// current subtarget.
1691 ///
1692 /// The ShuffleKind distinguishes between big-endian operations with
1693 /// two different inputs (0), either-endian operations with two identical
1694 /// inputs (1), and little-endian operations with two different inputs (2).
1695 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1696 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1697                                SelectionDAG &DAG) {
1698   const PPCSubtarget& Subtarget =
1699       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1700   if (!Subtarget.hasP8Vector())
1701     return false;
1702 
1703   bool IsLE = DAG.getDataLayout().isLittleEndian();
1704   if (ShuffleKind == 0) {
1705     if (IsLE)
1706       return false;
1707     for (unsigned i = 0; i != 16; i += 4)
1708       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1709           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1710           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1711           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1712         return false;
1713   } else if (ShuffleKind == 2) {
1714     if (!IsLE)
1715       return false;
1716     for (unsigned i = 0; i != 16; i += 4)
1717       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1718           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1719           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1720           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1721         return false;
1722   } else if (ShuffleKind == 1) {
1723     unsigned j = IsLE ? 0 : 4;
1724     for (unsigned i = 0; i != 8; i += 4)
1725       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1726           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1727           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1728           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1729           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1730           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1731           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1732           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1733         return false;
1734   }
1735   return true;
1736 }
1737 
1738 /// isVMerge - Common function, used to match vmrg* shuffles.
1739 ///
1740 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1741                      unsigned LHSStart, unsigned RHSStart) {
1742   if (N->getValueType(0) != MVT::v16i8)
1743     return false;
1744   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1745          "Unsupported merge size!");
1746 
1747   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1748     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1749       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1750                              LHSStart+j+i*UnitSize) ||
1751           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1752                              RHSStart+j+i*UnitSize))
1753         return false;
1754     }
1755   return true;
1756 }
1757 
1758 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1759 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1760 /// The ShuffleKind distinguishes between big-endian merges with two
1761 /// different inputs (0), either-endian merges with two identical inputs (1),
1762 /// and little-endian merges with two different inputs (2).  For the latter,
1763 /// the input operands are swapped (see PPCInstrAltivec.td).
1764 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1765                              unsigned ShuffleKind, SelectionDAG &DAG) {
1766   if (DAG.getDataLayout().isLittleEndian()) {
1767     if (ShuffleKind == 1) // unary
1768       return isVMerge(N, UnitSize, 0, 0);
1769     else if (ShuffleKind == 2) // swapped
1770       return isVMerge(N, UnitSize, 0, 16);
1771     else
1772       return false;
1773   } else {
1774     if (ShuffleKind == 1) // unary
1775       return isVMerge(N, UnitSize, 8, 8);
1776     else if (ShuffleKind == 0) // normal
1777       return isVMerge(N, UnitSize, 8, 24);
1778     else
1779       return false;
1780   }
1781 }
1782 
1783 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1784 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1785 /// The ShuffleKind distinguishes between big-endian merges with two
1786 /// different inputs (0), either-endian merges with two identical inputs (1),
1787 /// and little-endian merges with two different inputs (2).  For the latter,
1788 /// the input operands are swapped (see PPCInstrAltivec.td).
1789 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1790                              unsigned ShuffleKind, SelectionDAG &DAG) {
1791   if (DAG.getDataLayout().isLittleEndian()) {
1792     if (ShuffleKind == 1) // unary
1793       return isVMerge(N, UnitSize, 8, 8);
1794     else if (ShuffleKind == 2) // swapped
1795       return isVMerge(N, UnitSize, 8, 24);
1796     else
1797       return false;
1798   } else {
1799     if (ShuffleKind == 1) // unary
1800       return isVMerge(N, UnitSize, 0, 0);
1801     else if (ShuffleKind == 0) // normal
1802       return isVMerge(N, UnitSize, 0, 16);
1803     else
1804       return false;
1805   }
1806 }
1807 
1808 /**
1809  * Common function used to match vmrgew and vmrgow shuffles
1810  *
1811  * The indexOffset determines whether to look for even or odd words in
1812  * the shuffle mask. This is based on the of the endianness of the target
1813  * machine.
1814  *   - Little Endian:
1815  *     - Use offset of 0 to check for odd elements
1816  *     - Use offset of 4 to check for even elements
1817  *   - Big Endian:
1818  *     - Use offset of 0 to check for even elements
1819  *     - Use offset of 4 to check for odd elements
1820  * A detailed description of the vector element ordering for little endian and
1821  * big endian can be found at
1822  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1823  * Targeting your applications - what little endian and big endian IBM XL C/C++
1824  * compiler differences mean to you
1825  *
1826  * The mask to the shuffle vector instruction specifies the indices of the
1827  * elements from the two input vectors to place in the result. The elements are
1828  * numbered in array-access order, starting with the first vector. These vectors
1829  * are always of type v16i8, thus each vector will contain 16 elements of size
1830  * 8. More info on the shuffle vector can be found in the
1831  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1832  * Language Reference.
1833  *
1834  * The RHSStartValue indicates whether the same input vectors are used (unary)
1835  * or two different input vectors are used, based on the following:
1836  *   - If the instruction uses the same vector for both inputs, the range of the
1837  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1838  *     be 0.
1839  *   - If the instruction has two different vectors then the range of the
1840  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1841  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1842  *     to 31 specify elements in the second vector).
1843  *
1844  * \param[in] N The shuffle vector SD Node to analyze
1845  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1846  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1847  * vector to the shuffle_vector instruction
1848  * \return true iff this shuffle vector represents an even or odd word merge
1849  */
1850 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1851                      unsigned RHSStartValue) {
1852   if (N->getValueType(0) != MVT::v16i8)
1853     return false;
1854 
1855   for (unsigned i = 0; i < 2; ++i)
1856     for (unsigned j = 0; j < 4; ++j)
1857       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1858                              i*RHSStartValue+j+IndexOffset) ||
1859           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1860                              i*RHSStartValue+j+IndexOffset+8))
1861         return false;
1862   return true;
1863 }
1864 
1865 /**
1866  * Determine if the specified shuffle mask is suitable for the vmrgew or
1867  * vmrgow instructions.
1868  *
1869  * \param[in] N The shuffle vector SD Node to analyze
1870  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1871  * \param[in] ShuffleKind Identify the type of merge:
1872  *   - 0 = big-endian merge with two different inputs;
1873  *   - 1 = either-endian merge with two identical inputs;
1874  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1875  *     little-endian merges).
1876  * \param[in] DAG The current SelectionDAG
1877  * \return true iff this shuffle mask
1878  */
1879 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1880                               unsigned ShuffleKind, SelectionDAG &DAG) {
1881   if (DAG.getDataLayout().isLittleEndian()) {
1882     unsigned indexOffset = CheckEven ? 4 : 0;
1883     if (ShuffleKind == 1) // Unary
1884       return isVMerge(N, indexOffset, 0);
1885     else if (ShuffleKind == 2) // swapped
1886       return isVMerge(N, indexOffset, 16);
1887     else
1888       return false;
1889   }
1890   else {
1891     unsigned indexOffset = CheckEven ? 0 : 4;
1892     if (ShuffleKind == 1) // Unary
1893       return isVMerge(N, indexOffset, 0);
1894     else if (ShuffleKind == 0) // Normal
1895       return isVMerge(N, indexOffset, 16);
1896     else
1897       return false;
1898   }
1899   return false;
1900 }
1901 
1902 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1903 /// amount, otherwise return -1.
1904 /// The ShuffleKind distinguishes between big-endian operations with two
1905 /// different inputs (0), either-endian operations with two identical inputs
1906 /// (1), and little-endian operations with two different inputs (2).  For the
1907 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1908 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1909                              SelectionDAG &DAG) {
1910   if (N->getValueType(0) != MVT::v16i8)
1911     return -1;
1912 
1913   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1914 
1915   // Find the first non-undef value in the shuffle mask.
1916   unsigned i;
1917   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1918     /*search*/;
1919 
1920   if (i == 16) return -1;  // all undef.
1921 
1922   // Otherwise, check to see if the rest of the elements are consecutively
1923   // numbered from this value.
1924   unsigned ShiftAmt = SVOp->getMaskElt(i);
1925   if (ShiftAmt < i) return -1;
1926 
1927   ShiftAmt -= i;
1928   bool isLE = DAG.getDataLayout().isLittleEndian();
1929 
1930   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1931     // Check the rest of the elements to see if they are consecutive.
1932     for (++i; i != 16; ++i)
1933       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1934         return -1;
1935   } else if (ShuffleKind == 1) {
1936     // Check the rest of the elements to see if they are consecutive.
1937     for (++i; i != 16; ++i)
1938       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1939         return -1;
1940   } else
1941     return -1;
1942 
1943   if (isLE)
1944     ShiftAmt = 16 - ShiftAmt;
1945 
1946   return ShiftAmt;
1947 }
1948 
1949 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1950 /// specifies a splat of a single element that is suitable for input to
1951 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1952 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1953   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1954          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1955 
1956   // The consecutive indices need to specify an element, not part of two
1957   // different elements.  So abandon ship early if this isn't the case.
1958   if (N->getMaskElt(0) % EltSize != 0)
1959     return false;
1960 
1961   // This is a splat operation if each element of the permute is the same, and
1962   // if the value doesn't reference the second vector.
1963   unsigned ElementBase = N->getMaskElt(0);
1964 
1965   // FIXME: Handle UNDEF elements too!
1966   if (ElementBase >= 16)
1967     return false;
1968 
1969   // Check that the indices are consecutive, in the case of a multi-byte element
1970   // splatted with a v16i8 mask.
1971   for (unsigned i = 1; i != EltSize; ++i)
1972     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1973       return false;
1974 
1975   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1976     if (N->getMaskElt(i) < 0) continue;
1977     for (unsigned j = 0; j != EltSize; ++j)
1978       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1979         return false;
1980   }
1981   return true;
1982 }
1983 
1984 /// Check that the mask is shuffling N byte elements. Within each N byte
1985 /// element of the mask, the indices could be either in increasing or
1986 /// decreasing order as long as they are consecutive.
1987 /// \param[in] N the shuffle vector SD Node to analyze
1988 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1989 /// Word/DoubleWord/QuadWord).
1990 /// \param[in] StepLen the delta indices number among the N byte element, if
1991 /// the mask is in increasing/decreasing order then it is 1/-1.
1992 /// \return true iff the mask is shuffling N byte elements.
1993 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1994                                    int StepLen) {
1995   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1996          "Unexpected element width.");
1997   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1998 
1999   unsigned NumOfElem = 16 / Width;
2000   unsigned MaskVal[16]; //  Width is never greater than 16
2001   for (unsigned i = 0; i < NumOfElem; ++i) {
2002     MaskVal[0] = N->getMaskElt(i * Width);
2003     if ((StepLen == 1) && (MaskVal[0] % Width)) {
2004       return false;
2005     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2006       return false;
2007     }
2008 
2009     for (unsigned int j = 1; j < Width; ++j) {
2010       MaskVal[j] = N->getMaskElt(i * Width + j);
2011       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2012         return false;
2013       }
2014     }
2015   }
2016 
2017   return true;
2018 }
2019 
2020 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2021                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2022   if (!isNByteElemShuffleMask(N, 4, 1))
2023     return false;
2024 
2025   // Now we look at mask elements 0,4,8,12
2026   unsigned M0 = N->getMaskElt(0) / 4;
2027   unsigned M1 = N->getMaskElt(4) / 4;
2028   unsigned M2 = N->getMaskElt(8) / 4;
2029   unsigned M3 = N->getMaskElt(12) / 4;
2030   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2031   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2032 
2033   // Below, let H and L be arbitrary elements of the shuffle mask
2034   // where H is in the range [4,7] and L is in the range [0,3].
2035   // H, 1, 2, 3 or L, 5, 6, 7
2036   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2037       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2038     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2039     InsertAtByte = IsLE ? 12 : 0;
2040     Swap = M0 < 4;
2041     return true;
2042   }
2043   // 0, H, 2, 3 or 4, L, 6, 7
2044   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2045       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2046     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2047     InsertAtByte = IsLE ? 8 : 4;
2048     Swap = M1 < 4;
2049     return true;
2050   }
2051   // 0, 1, H, 3 or 4, 5, L, 7
2052   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2053       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2054     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2055     InsertAtByte = IsLE ? 4 : 8;
2056     Swap = M2 < 4;
2057     return true;
2058   }
2059   // 0, 1, 2, H or 4, 5, 6, L
2060   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2061       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2062     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2063     InsertAtByte = IsLE ? 0 : 12;
2064     Swap = M3 < 4;
2065     return true;
2066   }
2067 
2068   // If both vector operands for the shuffle are the same vector, the mask will
2069   // contain only elements from the first one and the second one will be undef.
2070   if (N->getOperand(1).isUndef()) {
2071     ShiftElts = 0;
2072     Swap = true;
2073     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2074     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2075       InsertAtByte = IsLE ? 12 : 0;
2076       return true;
2077     }
2078     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2079       InsertAtByte = IsLE ? 8 : 4;
2080       return true;
2081     }
2082     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2083       InsertAtByte = IsLE ? 4 : 8;
2084       return true;
2085     }
2086     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2087       InsertAtByte = IsLE ? 0 : 12;
2088       return true;
2089     }
2090   }
2091 
2092   return false;
2093 }
2094 
2095 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2096                                bool &Swap, bool IsLE) {
2097   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2098   // Ensure each byte index of the word is consecutive.
2099   if (!isNByteElemShuffleMask(N, 4, 1))
2100     return false;
2101 
2102   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2103   unsigned M0 = N->getMaskElt(0) / 4;
2104   unsigned M1 = N->getMaskElt(4) / 4;
2105   unsigned M2 = N->getMaskElt(8) / 4;
2106   unsigned M3 = N->getMaskElt(12) / 4;
2107 
2108   // If both vector operands for the shuffle are the same vector, the mask will
2109   // contain only elements from the first one and the second one will be undef.
2110   if (N->getOperand(1).isUndef()) {
2111     assert(M0 < 4 && "Indexing into an undef vector?");
2112     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2113       return false;
2114 
2115     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2116     Swap = false;
2117     return true;
2118   }
2119 
2120   // Ensure each word index of the ShuffleVector Mask is consecutive.
2121   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2122     return false;
2123 
2124   if (IsLE) {
2125     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2126       // Input vectors don't need to be swapped if the leading element
2127       // of the result is one of the 3 left elements of the second vector
2128       // (or if there is no shift to be done at all).
2129       Swap = false;
2130       ShiftElts = (8 - M0) % 8;
2131     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2132       // Input vectors need to be swapped if the leading element
2133       // of the result is one of the 3 left elements of the first vector
2134       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2135       Swap = true;
2136       ShiftElts = (4 - M0) % 4;
2137     }
2138 
2139     return true;
2140   } else {                                          // BE
2141     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2142       // Input vectors don't need to be swapped if the leading element
2143       // of the result is one of the 4 elements of the first vector.
2144       Swap = false;
2145       ShiftElts = M0;
2146     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2147       // Input vectors need to be swapped if the leading element
2148       // of the result is one of the 4 elements of the right vector.
2149       Swap = true;
2150       ShiftElts = M0 - 4;
2151     }
2152 
2153     return true;
2154   }
2155 }
2156 
2157 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2158   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2159 
2160   if (!isNByteElemShuffleMask(N, Width, -1))
2161     return false;
2162 
2163   for (int i = 0; i < 16; i += Width)
2164     if (N->getMaskElt(i) != i + Width - 1)
2165       return false;
2166 
2167   return true;
2168 }
2169 
2170 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2171   return isXXBRShuffleMaskHelper(N, 2);
2172 }
2173 
2174 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2175   return isXXBRShuffleMaskHelper(N, 4);
2176 }
2177 
2178 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2179   return isXXBRShuffleMaskHelper(N, 8);
2180 }
2181 
2182 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2183   return isXXBRShuffleMaskHelper(N, 16);
2184 }
2185 
2186 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2187 /// if the inputs to the instruction should be swapped and set \p DM to the
2188 /// value for the immediate.
2189 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2190 /// AND element 0 of the result comes from the first input (LE) or second input
2191 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2192 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2193 /// mask.
2194 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2195                                bool &Swap, bool IsLE) {
2196   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2197 
2198   // Ensure each byte index of the double word is consecutive.
2199   if (!isNByteElemShuffleMask(N, 8, 1))
2200     return false;
2201 
2202   unsigned M0 = N->getMaskElt(0) / 8;
2203   unsigned M1 = N->getMaskElt(8) / 8;
2204   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2205 
2206   // If both vector operands for the shuffle are the same vector, the mask will
2207   // contain only elements from the first one and the second one will be undef.
2208   if (N->getOperand(1).isUndef()) {
2209     if ((M0 | M1) < 2) {
2210       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2211       Swap = false;
2212       return true;
2213     } else
2214       return false;
2215   }
2216 
2217   if (IsLE) {
2218     if (M0 > 1 && M1 < 2) {
2219       Swap = false;
2220     } else if (M0 < 2 && M1 > 1) {
2221       M0 = (M0 + 2) % 4;
2222       M1 = (M1 + 2) % 4;
2223       Swap = true;
2224     } else
2225       return false;
2226 
2227     // Note: if control flow comes here that means Swap is already set above
2228     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2229     return true;
2230   } else { // BE
2231     if (M0 < 2 && M1 > 1) {
2232       Swap = false;
2233     } else if (M0 > 1 && M1 < 2) {
2234       M0 = (M0 + 2) % 4;
2235       M1 = (M1 + 2) % 4;
2236       Swap = true;
2237     } else
2238       return false;
2239 
2240     // Note: if control flow comes here that means Swap is already set above
2241     DM = (M0 << 1) + (M1 & 1);
2242     return true;
2243   }
2244 }
2245 
2246 
2247 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2248 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2249 /// elements are counted from the left of the vector register).
2250 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2251                                          SelectionDAG &DAG) {
2252   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2253   assert(isSplatShuffleMask(SVOp, EltSize));
2254   if (DAG.getDataLayout().isLittleEndian())
2255     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2256   else
2257     return SVOp->getMaskElt(0) / EltSize;
2258 }
2259 
2260 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2261 /// by using a vspltis[bhw] instruction of the specified element size, return
2262 /// the constant being splatted.  The ByteSize field indicates the number of
2263 /// bytes of each element [124] -> [bhw].
2264 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2265   SDValue OpVal(nullptr, 0);
2266 
2267   // If ByteSize of the splat is bigger than the element size of the
2268   // build_vector, then we have a case where we are checking for a splat where
2269   // multiple elements of the buildvector are folded together into a single
2270   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2271   unsigned EltSize = 16/N->getNumOperands();
2272   if (EltSize < ByteSize) {
2273     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2274     SDValue UniquedVals[4];
2275     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2276 
2277     // See if all of the elements in the buildvector agree across.
2278     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2279       if (N->getOperand(i).isUndef()) continue;
2280       // If the element isn't a constant, bail fully out.
2281       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2282 
2283       if (!UniquedVals[i&(Multiple-1)].getNode())
2284         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2285       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2286         return SDValue();  // no match.
2287     }
2288 
2289     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2290     // either constant or undef values that are identical for each chunk.  See
2291     // if these chunks can form into a larger vspltis*.
2292 
2293     // Check to see if all of the leading entries are either 0 or -1.  If
2294     // neither, then this won't fit into the immediate field.
2295     bool LeadingZero = true;
2296     bool LeadingOnes = true;
2297     for (unsigned i = 0; i != Multiple-1; ++i) {
2298       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2299 
2300       LeadingZero &= isNullConstant(UniquedVals[i]);
2301       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2302     }
2303     // Finally, check the least significant entry.
2304     if (LeadingZero) {
2305       if (!UniquedVals[Multiple-1].getNode())
2306         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2307       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2308       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2309         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2310     }
2311     if (LeadingOnes) {
2312       if (!UniquedVals[Multiple-1].getNode())
2313         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2314       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2315       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2316         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2317     }
2318 
2319     return SDValue();
2320   }
2321 
2322   // Check to see if this buildvec has a single non-undef value in its elements.
2323   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2324     if (N->getOperand(i).isUndef()) continue;
2325     if (!OpVal.getNode())
2326       OpVal = N->getOperand(i);
2327     else if (OpVal != N->getOperand(i))
2328       return SDValue();
2329   }
2330 
2331   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2332 
2333   unsigned ValSizeInBytes = EltSize;
2334   uint64_t Value = 0;
2335   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2336     Value = CN->getZExtValue();
2337   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2338     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2339     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2340   }
2341 
2342   // If the splat value is larger than the element value, then we can never do
2343   // this splat.  The only case that we could fit the replicated bits into our
2344   // immediate field for would be zero, and we prefer to use vxor for it.
2345   if (ValSizeInBytes < ByteSize) return SDValue();
2346 
2347   // If the element value is larger than the splat value, check if it consists
2348   // of a repeated bit pattern of size ByteSize.
2349   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2350     return SDValue();
2351 
2352   // Properly sign extend the value.
2353   int MaskVal = SignExtend32(Value, ByteSize * 8);
2354 
2355   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2356   if (MaskVal == 0) return SDValue();
2357 
2358   // Finally, if this value fits in a 5 bit sext field, return it
2359   if (SignExtend32<5>(MaskVal) == MaskVal)
2360     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2361   return SDValue();
2362 }
2363 
2364 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2365 /// amount, otherwise return -1.
2366 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2367   EVT VT = N->getValueType(0);
2368   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2369     return -1;
2370 
2371   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2372 
2373   // Find the first non-undef value in the shuffle mask.
2374   unsigned i;
2375   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2376     /*search*/;
2377 
2378   if (i == 4) return -1;  // all undef.
2379 
2380   // Otherwise, check to see if the rest of the elements are consecutively
2381   // numbered from this value.
2382   unsigned ShiftAmt = SVOp->getMaskElt(i);
2383   if (ShiftAmt < i) return -1;
2384   ShiftAmt -= i;
2385 
2386   // Check the rest of the elements to see if they are consecutive.
2387   for (++i; i != 4; ++i)
2388     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2389       return -1;
2390 
2391   return ShiftAmt;
2392 }
2393 
2394 //===----------------------------------------------------------------------===//
2395 //  Addressing Mode Selection
2396 //===----------------------------------------------------------------------===//
2397 
2398 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2399 /// or 64-bit immediate, and if the value can be accurately represented as a
2400 /// sign extension from a 16-bit value.  If so, this returns true and the
2401 /// immediate.
2402 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2403   if (!isa<ConstantSDNode>(N))
2404     return false;
2405 
2406   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2407   if (N->getValueType(0) == MVT::i32)
2408     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2409   else
2410     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2411 }
2412 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2413   return isIntS16Immediate(Op.getNode(), Imm);
2414 }
2415 
2416 
2417 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2418 /// be represented as an indexed [r+r] operation.
2419 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2420                                                SDValue &Index,
2421                                                SelectionDAG &DAG) const {
2422   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2423       UI != E; ++UI) {
2424     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2425       if (Memop->getMemoryVT() == MVT::f64) {
2426           Base = N.getOperand(0);
2427           Index = N.getOperand(1);
2428           return true;
2429       }
2430     }
2431   }
2432   return false;
2433 }
2434 
2435 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2436 /// can be represented as an indexed [r+r] operation.  Returns false if it
2437 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2438 /// non-zero and N can be represented by a base register plus a signed 16-bit
2439 /// displacement, make a more precise judgement by checking (displacement % \p
2440 /// EncodingAlignment).
2441 bool PPCTargetLowering::SelectAddressRegReg(
2442     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2443     MaybeAlign EncodingAlignment) const {
2444   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2445   // a [pc+imm].
2446   if (SelectAddressPCRel(N, Base))
2447     return false;
2448 
2449   int16_t Imm = 0;
2450   if (N.getOpcode() == ISD::ADD) {
2451     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2452     // SPE load/store can only handle 8-bit offsets.
2453     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2454         return true;
2455     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2456         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2457       return false; // r+i
2458     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2459       return false;    // r+i
2460 
2461     Base = N.getOperand(0);
2462     Index = N.getOperand(1);
2463     return true;
2464   } else if (N.getOpcode() == ISD::OR) {
2465     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2466         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2467       return false; // r+i can fold it if we can.
2468 
2469     // If this is an or of disjoint bitfields, we can codegen this as an add
2470     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2471     // disjoint.
2472     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2473 
2474     if (LHSKnown.Zero.getBoolValue()) {
2475       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2476       // If all of the bits are known zero on the LHS or RHS, the add won't
2477       // carry.
2478       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2479         Base = N.getOperand(0);
2480         Index = N.getOperand(1);
2481         return true;
2482       }
2483     }
2484   }
2485 
2486   return false;
2487 }
2488 
2489 // If we happen to be doing an i64 load or store into a stack slot that has
2490 // less than a 4-byte alignment, then the frame-index elimination may need to
2491 // use an indexed load or store instruction (because the offset may not be a
2492 // multiple of 4). The extra register needed to hold the offset comes from the
2493 // register scavenger, and it is possible that the scavenger will need to use
2494 // an emergency spill slot. As a result, we need to make sure that a spill slot
2495 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2496 // stack slot.
2497 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2498   // FIXME: This does not handle the LWA case.
2499   if (VT != MVT::i64)
2500     return;
2501 
2502   // NOTE: We'll exclude negative FIs here, which come from argument
2503   // lowering, because there are no known test cases triggering this problem
2504   // using packed structures (or similar). We can remove this exclusion if
2505   // we find such a test case. The reason why this is so test-case driven is
2506   // because this entire 'fixup' is only to prevent crashes (from the
2507   // register scavenger) on not-really-valid inputs. For example, if we have:
2508   //   %a = alloca i1
2509   //   %b = bitcast i1* %a to i64*
2510   //   store i64* a, i64 b
2511   // then the store should really be marked as 'align 1', but is not. If it
2512   // were marked as 'align 1' then the indexed form would have been
2513   // instruction-selected initially, and the problem this 'fixup' is preventing
2514   // won't happen regardless.
2515   if (FrameIdx < 0)
2516     return;
2517 
2518   MachineFunction &MF = DAG.getMachineFunction();
2519   MachineFrameInfo &MFI = MF.getFrameInfo();
2520 
2521   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2522     return;
2523 
2524   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2525   FuncInfo->setHasNonRISpills();
2526 }
2527 
2528 /// Returns true if the address N can be represented by a base register plus
2529 /// a signed 16-bit displacement [r+imm], and if it is not better
2530 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2531 /// displacements that are multiples of that value.
2532 bool PPCTargetLowering::SelectAddressRegImm(
2533     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2534     MaybeAlign EncodingAlignment) const {
2535   // FIXME dl should come from parent load or store, not from address
2536   SDLoc dl(N);
2537 
2538   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2539   // a [pc+imm].
2540   if (SelectAddressPCRel(N, Base))
2541     return false;
2542 
2543   // If this can be more profitably realized as r+r, fail.
2544   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2545     return false;
2546 
2547   if (N.getOpcode() == ISD::ADD) {
2548     int16_t imm = 0;
2549     if (isIntS16Immediate(N.getOperand(1), imm) &&
2550         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2551       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2552       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2553         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2554         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2555       } else {
2556         Base = N.getOperand(0);
2557       }
2558       return true; // [r+i]
2559     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2560       // Match LOAD (ADD (X, Lo(G))).
2561       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2562              && "Cannot handle constant offsets yet!");
2563       Disp = N.getOperand(1).getOperand(0);  // The global address.
2564       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2565              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2566              Disp.getOpcode() == ISD::TargetConstantPool ||
2567              Disp.getOpcode() == ISD::TargetJumpTable);
2568       Base = N.getOperand(0);
2569       return true;  // [&g+r]
2570     }
2571   } else if (N.getOpcode() == ISD::OR) {
2572     int16_t imm = 0;
2573     if (isIntS16Immediate(N.getOperand(1), imm) &&
2574         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2575       // If this is an or of disjoint bitfields, we can codegen this as an add
2576       // (for better address arithmetic) if the LHS and RHS of the OR are
2577       // provably disjoint.
2578       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2579 
2580       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2581         // If all of the bits are known zero on the LHS or RHS, the add won't
2582         // carry.
2583         if (FrameIndexSDNode *FI =
2584               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2585           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2586           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2587         } else {
2588           Base = N.getOperand(0);
2589         }
2590         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2591         return true;
2592       }
2593     }
2594   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2595     // Loading from a constant address.
2596 
2597     // If this address fits entirely in a 16-bit sext immediate field, codegen
2598     // this as "d, 0"
2599     int16_t Imm;
2600     if (isIntS16Immediate(CN, Imm) &&
2601         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2602       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2603       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2604                              CN->getValueType(0));
2605       return true;
2606     }
2607 
2608     // Handle 32-bit sext immediates with LIS + addr mode.
2609     if ((CN->getValueType(0) == MVT::i32 ||
2610          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2611         (!EncodingAlignment ||
2612          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2613       int Addr = (int)CN->getZExtValue();
2614 
2615       // Otherwise, break this down into an LIS + disp.
2616       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2617 
2618       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2619                                    MVT::i32);
2620       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2621       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2622       return true;
2623     }
2624   }
2625 
2626   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2627   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2628     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2629     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2630   } else
2631     Base = N;
2632   return true;      // [r+0]
2633 }
2634 
2635 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2636 /// represented as an indexed [r+r] operation.
2637 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2638                                                 SDValue &Index,
2639                                                 SelectionDAG &DAG) const {
2640   // Check to see if we can easily represent this as an [r+r] address.  This
2641   // will fail if it thinks that the address is more profitably represented as
2642   // reg+imm, e.g. where imm = 0.
2643   if (SelectAddressRegReg(N, Base, Index, DAG))
2644     return true;
2645 
2646   // If the address is the result of an add, we will utilize the fact that the
2647   // address calculation includes an implicit add.  However, we can reduce
2648   // register pressure if we do not materialize a constant just for use as the
2649   // index register.  We only get rid of the add if it is not an add of a
2650   // value and a 16-bit signed constant and both have a single use.
2651   int16_t imm = 0;
2652   if (N.getOpcode() == ISD::ADD &&
2653       (!isIntS16Immediate(N.getOperand(1), imm) ||
2654        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2655     Base = N.getOperand(0);
2656     Index = N.getOperand(1);
2657     return true;
2658   }
2659 
2660   // Otherwise, do it the hard way, using R0 as the base register.
2661   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2662                          N.getValueType());
2663   Index = N;
2664   return true;
2665 }
2666 
2667 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2668   Ty *PCRelCand = dyn_cast<Ty>(N);
2669   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2670 }
2671 
2672 /// Returns true if this address is a PC Relative address.
2673 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2674 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2675 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2676   // This is a materialize PC Relative node. Always select this as PC Relative.
2677   Base = N;
2678   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2679     return true;
2680   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2681       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2682       isValidPCRelNode<JumpTableSDNode>(N) ||
2683       isValidPCRelNode<BlockAddressSDNode>(N))
2684     return true;
2685   return false;
2686 }
2687 
2688 /// Returns true if we should use a direct load into vector instruction
2689 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2690 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2691 
2692   // If there are any other uses other than scalar to vector, then we should
2693   // keep it as a scalar load -> direct move pattern to prevent multiple
2694   // loads.
2695   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2696   if (!LD)
2697     return false;
2698 
2699   EVT MemVT = LD->getMemoryVT();
2700   if (!MemVT.isSimple())
2701     return false;
2702   switch(MemVT.getSimpleVT().SimpleTy) {
2703   case MVT::i64:
2704     break;
2705   case MVT::i32:
2706     if (!ST.hasP8Vector())
2707       return false;
2708     break;
2709   case MVT::i16:
2710   case MVT::i8:
2711     if (!ST.hasP9Vector())
2712       return false;
2713     break;
2714   default:
2715     return false;
2716   }
2717 
2718   SDValue LoadedVal(N, 0);
2719   if (!LoadedVal.hasOneUse())
2720     return false;
2721 
2722   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2723        UI != UE; ++UI)
2724     if (UI.getUse().get().getResNo() == 0 &&
2725         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2726         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2727       return false;
2728 
2729   return true;
2730 }
2731 
2732 /// getPreIndexedAddressParts - returns true by value, base pointer and
2733 /// offset pointer and addressing mode by reference if the node's address
2734 /// can be legally represented as pre-indexed load / store address.
2735 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2736                                                   SDValue &Offset,
2737                                                   ISD::MemIndexedMode &AM,
2738                                                   SelectionDAG &DAG) const {
2739   if (DisablePPCPreinc) return false;
2740 
2741   bool isLoad = true;
2742   SDValue Ptr;
2743   EVT VT;
2744   unsigned Alignment;
2745   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2746     Ptr = LD->getBasePtr();
2747     VT = LD->getMemoryVT();
2748     Alignment = LD->getAlignment();
2749   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2750     Ptr = ST->getBasePtr();
2751     VT  = ST->getMemoryVT();
2752     Alignment = ST->getAlignment();
2753     isLoad = false;
2754   } else
2755     return false;
2756 
2757   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2758   // instructions because we can fold these into a more efficient instruction
2759   // instead, (such as LXSD).
2760   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2761     return false;
2762   }
2763 
2764   // PowerPC doesn't have preinc load/store instructions for vectors (except
2765   // for QPX, which does have preinc r+r forms).
2766   if (VT.isVector()) {
2767     if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2768       return false;
2769     } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2770       AM = ISD::PRE_INC;
2771       return true;
2772     }
2773   }
2774 
2775   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2776     // Common code will reject creating a pre-inc form if the base pointer
2777     // is a frame index, or if N is a store and the base pointer is either
2778     // the same as or a predecessor of the value being stored.  Check for
2779     // those situations here, and try with swapped Base/Offset instead.
2780     bool Swap = false;
2781 
2782     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2783       Swap = true;
2784     else if (!isLoad) {
2785       SDValue Val = cast<StoreSDNode>(N)->getValue();
2786       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2787         Swap = true;
2788     }
2789 
2790     if (Swap)
2791       std::swap(Base, Offset);
2792 
2793     AM = ISD::PRE_INC;
2794     return true;
2795   }
2796 
2797   // LDU/STU can only handle immediates that are a multiple of 4.
2798   if (VT != MVT::i64) {
2799     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2800       return false;
2801   } else {
2802     // LDU/STU need an address with at least 4-byte alignment.
2803     if (Alignment < 4)
2804       return false;
2805 
2806     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2807       return false;
2808   }
2809 
2810   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2811     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2812     // sext i32 to i64 when addr mode is r+i.
2813     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2814         LD->getExtensionType() == ISD::SEXTLOAD &&
2815         isa<ConstantSDNode>(Offset))
2816       return false;
2817   }
2818 
2819   AM = ISD::PRE_INC;
2820   return true;
2821 }
2822 
2823 //===----------------------------------------------------------------------===//
2824 //  LowerOperation implementation
2825 //===----------------------------------------------------------------------===//
2826 
2827 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2828 /// and LoOpFlags to the target MO flags.
2829 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2830                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2831                                const GlobalValue *GV = nullptr) {
2832   HiOpFlags = PPCII::MO_HA;
2833   LoOpFlags = PPCII::MO_LO;
2834 
2835   // Don't use the pic base if not in PIC relocation model.
2836   if (IsPIC) {
2837     HiOpFlags |= PPCII::MO_PIC_FLAG;
2838     LoOpFlags |= PPCII::MO_PIC_FLAG;
2839   }
2840 }
2841 
2842 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2843                              SelectionDAG &DAG) {
2844   SDLoc DL(HiPart);
2845   EVT PtrVT = HiPart.getValueType();
2846   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2847 
2848   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2849   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2850 
2851   // With PIC, the first instruction is actually "GR+hi(&G)".
2852   if (isPIC)
2853     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2854                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2855 
2856   // Generate non-pic code that has direct accesses to the constant pool.
2857   // The address of the global is just (hi(&g)+lo(&g)).
2858   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2859 }
2860 
2861 static void setUsesTOCBasePtr(MachineFunction &MF) {
2862   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2863   FuncInfo->setUsesTOCBasePtr();
2864 }
2865 
2866 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2867   setUsesTOCBasePtr(DAG.getMachineFunction());
2868 }
2869 
2870 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2871                                        SDValue GA) const {
2872   const bool Is64Bit = Subtarget.isPPC64();
2873   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2874   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2875                         : Subtarget.isAIXABI()
2876                               ? DAG.getRegister(PPC::R2, VT)
2877                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2878   SDValue Ops[] = { GA, Reg };
2879   return DAG.getMemIntrinsicNode(
2880       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2881       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2882       MachineMemOperand::MOLoad);
2883 }
2884 
2885 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2886                                              SelectionDAG &DAG) const {
2887   EVT PtrVT = Op.getValueType();
2888   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2889   const Constant *C = CP->getConstVal();
2890 
2891   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2892   // The actual address of the GlobalValue is stored in the TOC.
2893   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2894     if (Subtarget.isUsingPCRelativeCalls()) {
2895       SDLoc DL(CP);
2896       EVT Ty = getPointerTy(DAG.getDataLayout());
2897       SDValue ConstPool = DAG.getTargetConstantPool(
2898           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2899       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2900     }
2901     setUsesTOCBasePtr(DAG);
2902     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2903     return getTOCEntry(DAG, SDLoc(CP), GA);
2904   }
2905 
2906   unsigned MOHiFlag, MOLoFlag;
2907   bool IsPIC = isPositionIndependent();
2908   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2909 
2910   if (IsPIC && Subtarget.isSVR4ABI()) {
2911     SDValue GA =
2912         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2913     return getTOCEntry(DAG, SDLoc(CP), GA);
2914   }
2915 
2916   SDValue CPIHi =
2917       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2918   SDValue CPILo =
2919       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2920   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2921 }
2922 
2923 // For 64-bit PowerPC, prefer the more compact relative encodings.
2924 // This trades 32 bits per jump table entry for one or two instructions
2925 // on the jump site.
2926 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2927   if (isJumpTableRelative())
2928     return MachineJumpTableInfo::EK_LabelDifference32;
2929 
2930   return TargetLowering::getJumpTableEncoding();
2931 }
2932 
2933 bool PPCTargetLowering::isJumpTableRelative() const {
2934   if (UseAbsoluteJumpTables)
2935     return false;
2936   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2937     return true;
2938   return TargetLowering::isJumpTableRelative();
2939 }
2940 
2941 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2942                                                     SelectionDAG &DAG) const {
2943   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2944     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2945 
2946   switch (getTargetMachine().getCodeModel()) {
2947   case CodeModel::Small:
2948   case CodeModel::Medium:
2949     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2950   default:
2951     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2952                        getPointerTy(DAG.getDataLayout()));
2953   }
2954 }
2955 
2956 const MCExpr *
2957 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2958                                                 unsigned JTI,
2959                                                 MCContext &Ctx) const {
2960   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2961     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2962 
2963   switch (getTargetMachine().getCodeModel()) {
2964   case CodeModel::Small:
2965   case CodeModel::Medium:
2966     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2967   default:
2968     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2969   }
2970 }
2971 
2972 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2973   EVT PtrVT = Op.getValueType();
2974   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2975 
2976   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2977   if (Subtarget.isUsingPCRelativeCalls()) {
2978     SDLoc DL(JT);
2979     EVT Ty = getPointerTy(DAG.getDataLayout());
2980     SDValue GA =
2981         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
2982     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2983     return MatAddr;
2984   }
2985 
2986   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2987   // The actual address of the GlobalValue is stored in the TOC.
2988   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2989     setUsesTOCBasePtr(DAG);
2990     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2991     return getTOCEntry(DAG, SDLoc(JT), GA);
2992   }
2993 
2994   unsigned MOHiFlag, MOLoFlag;
2995   bool IsPIC = isPositionIndependent();
2996   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2997 
2998   if (IsPIC && Subtarget.isSVR4ABI()) {
2999     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3000                                         PPCII::MO_PIC_FLAG);
3001     return getTOCEntry(DAG, SDLoc(GA), GA);
3002   }
3003 
3004   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3005   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3006   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3007 }
3008 
3009 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3010                                              SelectionDAG &DAG) const {
3011   EVT PtrVT = Op.getValueType();
3012   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3013   const BlockAddress *BA = BASDN->getBlockAddress();
3014 
3015   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3016   if (Subtarget.isUsingPCRelativeCalls()) {
3017     SDLoc DL(BASDN);
3018     EVT Ty = getPointerTy(DAG.getDataLayout());
3019     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3020                                            PPCII::MO_PCREL_FLAG);
3021     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3022     return MatAddr;
3023   }
3024 
3025   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3026   // The actual BlockAddress is stored in the TOC.
3027   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3028     setUsesTOCBasePtr(DAG);
3029     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3030     return getTOCEntry(DAG, SDLoc(BASDN), GA);
3031   }
3032 
3033   // 32-bit position-independent ELF stores the BlockAddress in the .got.
3034   if (Subtarget.is32BitELFABI() && isPositionIndependent())
3035     return getTOCEntry(
3036         DAG, SDLoc(BASDN),
3037         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3038 
3039   unsigned MOHiFlag, MOLoFlag;
3040   bool IsPIC = isPositionIndependent();
3041   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3042   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3043   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3044   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3045 }
3046 
3047 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3048                                               SelectionDAG &DAG) const {
3049   // FIXME: TLS addresses currently use medium model code sequences,
3050   // which is the most useful form.  Eventually support for small and
3051   // large models could be added if users need it, at the cost of
3052   // additional complexity.
3053   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3054   if (DAG.getTarget().useEmulatedTLS())
3055     return LowerToTLSEmulatedModel(GA, DAG);
3056 
3057   SDLoc dl(GA);
3058   const GlobalValue *GV = GA->getGlobal();
3059   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3060   bool is64bit = Subtarget.isPPC64();
3061   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3062   PICLevel::Level picLevel = M->getPICLevel();
3063 
3064   const TargetMachine &TM = getTargetMachine();
3065   TLSModel::Model Model = TM.getTLSModel(GV);
3066 
3067   if (Model == TLSModel::LocalExec) {
3068     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3069                                                PPCII::MO_TPREL_HA);
3070     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3071                                                PPCII::MO_TPREL_LO);
3072     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3073                              : DAG.getRegister(PPC::R2, MVT::i32);
3074 
3075     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3076     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3077   }
3078 
3079   if (Model == TLSModel::InitialExec) {
3080     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3081     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3082                                                 PPCII::MO_TLS);
3083     SDValue GOTPtr;
3084     if (is64bit) {
3085       setUsesTOCBasePtr(DAG);
3086       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3087       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
3088                            PtrVT, GOTReg, TGA);
3089     } else {
3090       if (!TM.isPositionIndependent())
3091         GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3092       else if (picLevel == PICLevel::SmallPIC)
3093         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3094       else
3095         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3096     }
3097     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
3098                                    PtrVT, TGA, GOTPtr);
3099     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3100   }
3101 
3102   if (Model == TLSModel::GeneralDynamic) {
3103     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3104     SDValue GOTPtr;
3105     if (is64bit) {
3106       setUsesTOCBasePtr(DAG);
3107       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3108       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3109                                    GOTReg, TGA);
3110     } else {
3111       if (picLevel == PICLevel::SmallPIC)
3112         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3113       else
3114         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3115     }
3116     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3117                        GOTPtr, TGA, TGA);
3118   }
3119 
3120   if (Model == TLSModel::LocalDynamic) {
3121     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3122     SDValue GOTPtr;
3123     if (is64bit) {
3124       setUsesTOCBasePtr(DAG);
3125       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3126       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3127                            GOTReg, TGA);
3128     } else {
3129       if (picLevel == PICLevel::SmallPIC)
3130         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3131       else
3132         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3133     }
3134     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3135                                   PtrVT, GOTPtr, TGA, TGA);
3136     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3137                                       PtrVT, TLSAddr, TGA);
3138     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3139   }
3140 
3141   llvm_unreachable("Unknown TLS model!");
3142 }
3143 
3144 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3145                                               SelectionDAG &DAG) const {
3146   EVT PtrVT = Op.getValueType();
3147   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3148   SDLoc DL(GSDN);
3149   const GlobalValue *GV = GSDN->getGlobal();
3150 
3151   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3152   // The actual address of the GlobalValue is stored in the TOC.
3153   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3154     if (Subtarget.isUsingPCRelativeCalls()) {
3155       EVT Ty = getPointerTy(DAG.getDataLayout());
3156       if (isAccessedAsGotIndirect(Op)) {
3157         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3158                                                 PPCII::MO_PCREL_FLAG |
3159                                                     PPCII::MO_GOT_FLAG);
3160         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3161         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3162                                    MachinePointerInfo());
3163         return Load;
3164       } else {
3165         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3166                                                 PPCII::MO_PCREL_FLAG);
3167         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3168       }
3169     }
3170     setUsesTOCBasePtr(DAG);
3171     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3172     return getTOCEntry(DAG, DL, GA);
3173   }
3174 
3175   unsigned MOHiFlag, MOLoFlag;
3176   bool IsPIC = isPositionIndependent();
3177   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3178 
3179   if (IsPIC && Subtarget.isSVR4ABI()) {
3180     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3181                                             GSDN->getOffset(),
3182                                             PPCII::MO_PIC_FLAG);
3183     return getTOCEntry(DAG, DL, GA);
3184   }
3185 
3186   SDValue GAHi =
3187     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3188   SDValue GALo =
3189     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3190 
3191   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3192 }
3193 
3194 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3195   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3196   SDLoc dl(Op);
3197 
3198   if (Op.getValueType() == MVT::v2i64) {
3199     // When the operands themselves are v2i64 values, we need to do something
3200     // special because VSX has no underlying comparison operations for these.
3201     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3202       // Equality can be handled by casting to the legal type for Altivec
3203       // comparisons, everything else needs to be expanded.
3204       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3205         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3206                  DAG.getSetCC(dl, MVT::v4i32,
3207                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3208                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3209                    CC));
3210       }
3211 
3212       return SDValue();
3213     }
3214 
3215     // We handle most of these in the usual way.
3216     return Op;
3217   }
3218 
3219   // If we're comparing for equality to zero, expose the fact that this is
3220   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3221   // fold the new nodes.
3222   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3223     return V;
3224 
3225   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3226     // Leave comparisons against 0 and -1 alone for now, since they're usually
3227     // optimized.  FIXME: revisit this when we can custom lower all setcc
3228     // optimizations.
3229     if (C->isAllOnesValue() || C->isNullValue())
3230       return SDValue();
3231   }
3232 
3233   // If we have an integer seteq/setne, turn it into a compare against zero
3234   // by xor'ing the rhs with the lhs, which is faster than setting a
3235   // condition register, reading it back out, and masking the correct bit.  The
3236   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3237   // the result to other bit-twiddling opportunities.
3238   EVT LHSVT = Op.getOperand(0).getValueType();
3239   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3240     EVT VT = Op.getValueType();
3241     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3242                                 Op.getOperand(1));
3243     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3244   }
3245   return SDValue();
3246 }
3247 
3248 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3249   SDNode *Node = Op.getNode();
3250   EVT VT = Node->getValueType(0);
3251   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3252   SDValue InChain = Node->getOperand(0);
3253   SDValue VAListPtr = Node->getOperand(1);
3254   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3255   SDLoc dl(Node);
3256 
3257   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3258 
3259   // gpr_index
3260   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3261                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3262   InChain = GprIndex.getValue(1);
3263 
3264   if (VT == MVT::i64) {
3265     // Check if GprIndex is even
3266     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3267                                  DAG.getConstant(1, dl, MVT::i32));
3268     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3269                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3270     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3271                                           DAG.getConstant(1, dl, MVT::i32));
3272     // Align GprIndex to be even if it isn't
3273     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3274                            GprIndex);
3275   }
3276 
3277   // fpr index is 1 byte after gpr
3278   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3279                                DAG.getConstant(1, dl, MVT::i32));
3280 
3281   // fpr
3282   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3283                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3284   InChain = FprIndex.getValue(1);
3285 
3286   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3287                                        DAG.getConstant(8, dl, MVT::i32));
3288 
3289   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3290                                         DAG.getConstant(4, dl, MVT::i32));
3291 
3292   // areas
3293   SDValue OverflowArea =
3294       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3295   InChain = OverflowArea.getValue(1);
3296 
3297   SDValue RegSaveArea =
3298       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3299   InChain = RegSaveArea.getValue(1);
3300 
3301   // select overflow_area if index > 8
3302   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3303                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3304 
3305   // adjustment constant gpr_index * 4/8
3306   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3307                                     VT.isInteger() ? GprIndex : FprIndex,
3308                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3309                                                     MVT::i32));
3310 
3311   // OurReg = RegSaveArea + RegConstant
3312   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3313                                RegConstant);
3314 
3315   // Floating types are 32 bytes into RegSaveArea
3316   if (VT.isFloatingPoint())
3317     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3318                          DAG.getConstant(32, dl, MVT::i32));
3319 
3320   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3321   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3322                                    VT.isInteger() ? GprIndex : FprIndex,
3323                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3324                                                    MVT::i32));
3325 
3326   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3327                               VT.isInteger() ? VAListPtr : FprPtr,
3328                               MachinePointerInfo(SV), MVT::i8);
3329 
3330   // determine if we should load from reg_save_area or overflow_area
3331   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3332 
3333   // increase overflow_area by 4/8 if gpr/fpr > 8
3334   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3335                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3336                                           dl, MVT::i32));
3337 
3338   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3339                              OverflowAreaPlusN);
3340 
3341   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3342                               MachinePointerInfo(), MVT::i32);
3343 
3344   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3345 }
3346 
3347 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3348   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3349 
3350   // We have to copy the entire va_list struct:
3351   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3352   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3353                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3354                        false, true, false, MachinePointerInfo(),
3355                        MachinePointerInfo());
3356 }
3357 
3358 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3359                                                   SelectionDAG &DAG) const {
3360   if (Subtarget.isAIXABI())
3361     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3362 
3363   return Op.getOperand(0);
3364 }
3365 
3366 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3367                                                 SelectionDAG &DAG) const {
3368   if (Subtarget.isAIXABI())
3369     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3370 
3371   SDValue Chain = Op.getOperand(0);
3372   SDValue Trmp = Op.getOperand(1); // trampoline
3373   SDValue FPtr = Op.getOperand(2); // nested function
3374   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3375   SDLoc dl(Op);
3376 
3377   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3378   bool isPPC64 = (PtrVT == MVT::i64);
3379   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3380 
3381   TargetLowering::ArgListTy Args;
3382   TargetLowering::ArgListEntry Entry;
3383 
3384   Entry.Ty = IntPtrTy;
3385   Entry.Node = Trmp; Args.push_back(Entry);
3386 
3387   // TrampSize == (isPPC64 ? 48 : 40);
3388   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3389                                isPPC64 ? MVT::i64 : MVT::i32);
3390   Args.push_back(Entry);
3391 
3392   Entry.Node = FPtr; Args.push_back(Entry);
3393   Entry.Node = Nest; Args.push_back(Entry);
3394 
3395   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3396   TargetLowering::CallLoweringInfo CLI(DAG);
3397   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3398       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3399       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3400 
3401   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3402   return CallResult.second;
3403 }
3404 
3405 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3406   MachineFunction &MF = DAG.getMachineFunction();
3407   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3408   EVT PtrVT = getPointerTy(MF.getDataLayout());
3409 
3410   SDLoc dl(Op);
3411 
3412   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3413     // vastart just stores the address of the VarArgsFrameIndex slot into the
3414     // memory location argument.
3415     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3416     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3417     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3418                         MachinePointerInfo(SV));
3419   }
3420 
3421   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3422   // We suppose the given va_list is already allocated.
3423   //
3424   // typedef struct {
3425   //  char gpr;     /* index into the array of 8 GPRs
3426   //                 * stored in the register save area
3427   //                 * gpr=0 corresponds to r3,
3428   //                 * gpr=1 to r4, etc.
3429   //                 */
3430   //  char fpr;     /* index into the array of 8 FPRs
3431   //                 * stored in the register save area
3432   //                 * fpr=0 corresponds to f1,
3433   //                 * fpr=1 to f2, etc.
3434   //                 */
3435   //  char *overflow_arg_area;
3436   //                /* location on stack that holds
3437   //                 * the next overflow argument
3438   //                 */
3439   //  char *reg_save_area;
3440   //               /* where r3:r10 and f1:f8 (if saved)
3441   //                * are stored
3442   //                */
3443   // } va_list[1];
3444 
3445   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3446   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3447   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3448                                             PtrVT);
3449   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3450                                  PtrVT);
3451 
3452   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3453   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3454 
3455   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3456   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3457 
3458   uint64_t FPROffset = 1;
3459   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3460 
3461   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3462 
3463   // Store first byte : number of int regs
3464   SDValue firstStore =
3465       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3466                         MachinePointerInfo(SV), MVT::i8);
3467   uint64_t nextOffset = FPROffset;
3468   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3469                                   ConstFPROffset);
3470 
3471   // Store second byte : number of float regs
3472   SDValue secondStore =
3473       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3474                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3475   nextOffset += StackOffset;
3476   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3477 
3478   // Store second word : arguments given on stack
3479   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3480                                     MachinePointerInfo(SV, nextOffset));
3481   nextOffset += FrameOffset;
3482   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3483 
3484   // Store third word : arguments given in registers
3485   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3486                       MachinePointerInfo(SV, nextOffset));
3487 }
3488 
3489 /// FPR - The set of FP registers that should be allocated for arguments
3490 /// on Darwin and AIX.
3491 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3492                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3493                                 PPC::F11, PPC::F12, PPC::F13};
3494 
3495 /// QFPR - The set of QPX registers that should be allocated for arguments.
3496 static const MCPhysReg QFPR[] = {
3497     PPC::QF1, PPC::QF2, PPC::QF3,  PPC::QF4,  PPC::QF5,  PPC::QF6, PPC::QF7,
3498     PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3499 
3500 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3501 /// the stack.
3502 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3503                                        unsigned PtrByteSize) {
3504   unsigned ArgSize = ArgVT.getStoreSize();
3505   if (Flags.isByVal())
3506     ArgSize = Flags.getByValSize();
3507 
3508   // Round up to multiples of the pointer size, except for array members,
3509   // which are always packed.
3510   if (!Flags.isInConsecutiveRegs())
3511     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3512 
3513   return ArgSize;
3514 }
3515 
3516 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3517 /// on the stack.
3518 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3519                                          ISD::ArgFlagsTy Flags,
3520                                          unsigned PtrByteSize) {
3521   Align Alignment(PtrByteSize);
3522 
3523   // Altivec parameters are padded to a 16 byte boundary.
3524   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3525       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3526       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3527       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3528     Alignment = Align(16);
3529   // QPX vector types stored in double-precision are padded to a 32 byte
3530   // boundary.
3531   else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3532     Alignment = Align(32);
3533 
3534   // ByVal parameters are aligned as requested.
3535   if (Flags.isByVal()) {
3536     auto BVAlign = Flags.getNonZeroByValAlign();
3537     if (BVAlign > PtrByteSize) {
3538       if (BVAlign.value() % PtrByteSize != 0)
3539         llvm_unreachable(
3540             "ByVal alignment is not a multiple of the pointer size");
3541 
3542       Alignment = BVAlign;
3543     }
3544   }
3545 
3546   // Array members are always packed to their original alignment.
3547   if (Flags.isInConsecutiveRegs()) {
3548     // If the array member was split into multiple registers, the first
3549     // needs to be aligned to the size of the full type.  (Except for
3550     // ppcf128, which is only aligned as its f64 components.)
3551     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3552       Alignment = Align(OrigVT.getStoreSize());
3553     else
3554       Alignment = Align(ArgVT.getStoreSize());
3555   }
3556 
3557   return Alignment;
3558 }
3559 
3560 /// CalculateStackSlotUsed - Return whether this argument will use its
3561 /// stack slot (instead of being passed in registers).  ArgOffset,
3562 /// AvailableFPRs, and AvailableVRs must hold the current argument
3563 /// position, and will be updated to account for this argument.
3564 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3565                                    ISD::ArgFlagsTy Flags,
3566                                    unsigned PtrByteSize,
3567                                    unsigned LinkageSize,
3568                                    unsigned ParamAreaSize,
3569                                    unsigned &ArgOffset,
3570                                    unsigned &AvailableFPRs,
3571                                    unsigned &AvailableVRs, bool HasQPX) {
3572   bool UseMemory = false;
3573 
3574   // Respect alignment of argument on the stack.
3575   Align Alignment =
3576       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3577   ArgOffset = alignTo(ArgOffset, Alignment);
3578   // If there's no space left in the argument save area, we must
3579   // use memory (this check also catches zero-sized arguments).
3580   if (ArgOffset >= LinkageSize + ParamAreaSize)
3581     UseMemory = true;
3582 
3583   // Allocate argument on the stack.
3584   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3585   if (Flags.isInConsecutiveRegsLast())
3586     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3587   // If we overran the argument save area, we must use memory
3588   // (this check catches arguments passed partially in memory)
3589   if (ArgOffset > LinkageSize + ParamAreaSize)
3590     UseMemory = true;
3591 
3592   // However, if the argument is actually passed in an FPR or a VR,
3593   // we don't use memory after all.
3594   if (!Flags.isByVal()) {
3595     if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3596         // QPX registers overlap with the scalar FP registers.
3597         (HasQPX && (ArgVT == MVT::v4f32 ||
3598                     ArgVT == MVT::v4f64 ||
3599                     ArgVT == MVT::v4i1)))
3600       if (AvailableFPRs > 0) {
3601         --AvailableFPRs;
3602         return false;
3603       }
3604     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3605         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3606         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3607         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3608       if (AvailableVRs > 0) {
3609         --AvailableVRs;
3610         return false;
3611       }
3612   }
3613 
3614   return UseMemory;
3615 }
3616 
3617 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3618 /// ensure minimum alignment required for target.
3619 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3620                                      unsigned NumBytes) {
3621   return alignTo(NumBytes, Lowering->getStackAlign());
3622 }
3623 
3624 SDValue PPCTargetLowering::LowerFormalArguments(
3625     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3626     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3627     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3628   if (Subtarget.isAIXABI())
3629     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3630                                     InVals);
3631   if (Subtarget.is64BitELFABI())
3632     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3633                                        InVals);
3634   if (Subtarget.is32BitELFABI())
3635     return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3636                                        InVals);
3637 
3638   return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3639                                      InVals);
3640 }
3641 
3642 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3643     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3644     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3645     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3646 
3647   // 32-bit SVR4 ABI Stack Frame Layout:
3648   //              +-----------------------------------+
3649   //        +-->  |            Back chain             |
3650   //        |     +-----------------------------------+
3651   //        |     | Floating-point register save area |
3652   //        |     +-----------------------------------+
3653   //        |     |    General register save area     |
3654   //        |     +-----------------------------------+
3655   //        |     |          CR save word             |
3656   //        |     +-----------------------------------+
3657   //        |     |         VRSAVE save word          |
3658   //        |     +-----------------------------------+
3659   //        |     |         Alignment padding         |
3660   //        |     +-----------------------------------+
3661   //        |     |     Vector register save area     |
3662   //        |     +-----------------------------------+
3663   //        |     |       Local variable space        |
3664   //        |     +-----------------------------------+
3665   //        |     |        Parameter list area        |
3666   //        |     +-----------------------------------+
3667   //        |     |           LR save word            |
3668   //        |     +-----------------------------------+
3669   // SP-->  +---  |            Back chain             |
3670   //              +-----------------------------------+
3671   //
3672   // Specifications:
3673   //   System V Application Binary Interface PowerPC Processor Supplement
3674   //   AltiVec Technology Programming Interface Manual
3675 
3676   MachineFunction &MF = DAG.getMachineFunction();
3677   MachineFrameInfo &MFI = MF.getFrameInfo();
3678   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3679 
3680   EVT PtrVT = getPointerTy(MF.getDataLayout());
3681   // Potential tail calls could cause overwriting of argument stack slots.
3682   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3683                        (CallConv == CallingConv::Fast));
3684   const Align PtrAlign(4);
3685 
3686   // Assign locations to all of the incoming arguments.
3687   SmallVector<CCValAssign, 16> ArgLocs;
3688   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3689                  *DAG.getContext());
3690 
3691   // Reserve space for the linkage area on the stack.
3692   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3693   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3694   if (useSoftFloat())
3695     CCInfo.PreAnalyzeFormalArguments(Ins);
3696 
3697   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3698   CCInfo.clearWasPPCF128();
3699 
3700   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3701     CCValAssign &VA = ArgLocs[i];
3702 
3703     // Arguments stored in registers.
3704     if (VA.isRegLoc()) {
3705       const TargetRegisterClass *RC;
3706       EVT ValVT = VA.getValVT();
3707 
3708       switch (ValVT.getSimpleVT().SimpleTy) {
3709         default:
3710           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3711         case MVT::i1:
3712         case MVT::i32:
3713           RC = &PPC::GPRCRegClass;
3714           break;
3715         case MVT::f32:
3716           if (Subtarget.hasP8Vector())
3717             RC = &PPC::VSSRCRegClass;
3718           else if (Subtarget.hasSPE())
3719             RC = &PPC::GPRCRegClass;
3720           else
3721             RC = &PPC::F4RCRegClass;
3722           break;
3723         case MVT::f64:
3724           if (Subtarget.hasVSX())
3725             RC = &PPC::VSFRCRegClass;
3726           else if (Subtarget.hasSPE())
3727             // SPE passes doubles in GPR pairs.
3728             RC = &PPC::GPRCRegClass;
3729           else
3730             RC = &PPC::F8RCRegClass;
3731           break;
3732         case MVT::v16i8:
3733         case MVT::v8i16:
3734         case MVT::v4i32:
3735           RC = &PPC::VRRCRegClass;
3736           break;
3737         case MVT::v4f32:
3738           RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3739           break;
3740         case MVT::v2f64:
3741         case MVT::v2i64:
3742           RC = &PPC::VRRCRegClass;
3743           break;
3744         case MVT::v4f64:
3745           RC = &PPC::QFRCRegClass;
3746           break;
3747         case MVT::v4i1:
3748           RC = &PPC::QBRCRegClass;
3749           break;
3750       }
3751 
3752       SDValue ArgValue;
3753       // Transform the arguments stored in physical registers into
3754       // virtual ones.
3755       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3756         assert(i + 1 < e && "No second half of double precision argument");
3757         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3758         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3759         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3760         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3761         if (!Subtarget.isLittleEndian())
3762           std::swap (ArgValueLo, ArgValueHi);
3763         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3764                                ArgValueHi);
3765       } else {
3766         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3767         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3768                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3769         if (ValVT == MVT::i1)
3770           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3771       }
3772 
3773       InVals.push_back(ArgValue);
3774     } else {
3775       // Argument stored in memory.
3776       assert(VA.isMemLoc());
3777 
3778       // Get the extended size of the argument type in stack
3779       unsigned ArgSize = VA.getLocVT().getStoreSize();
3780       // Get the actual size of the argument type
3781       unsigned ObjSize = VA.getValVT().getStoreSize();
3782       unsigned ArgOffset = VA.getLocMemOffset();
3783       // Stack objects in PPC32 are right justified.
3784       ArgOffset += ArgSize - ObjSize;
3785       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3786 
3787       // Create load nodes to retrieve arguments from the stack.
3788       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3789       InVals.push_back(
3790           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3791     }
3792   }
3793 
3794   // Assign locations to all of the incoming aggregate by value arguments.
3795   // Aggregates passed by value are stored in the local variable space of the
3796   // caller's stack frame, right above the parameter list area.
3797   SmallVector<CCValAssign, 16> ByValArgLocs;
3798   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3799                       ByValArgLocs, *DAG.getContext());
3800 
3801   // Reserve stack space for the allocations in CCInfo.
3802   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3803 
3804   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3805 
3806   // Area that is at least reserved in the caller of this function.
3807   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3808   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3809 
3810   // Set the size that is at least reserved in caller of this function.  Tail
3811   // call optimized function's reserved stack space needs to be aligned so that
3812   // taking the difference between two stack areas will result in an aligned
3813   // stack.
3814   MinReservedArea =
3815       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3816   FuncInfo->setMinReservedArea(MinReservedArea);
3817 
3818   SmallVector<SDValue, 8> MemOps;
3819 
3820   // If the function takes variable number of arguments, make a frame index for
3821   // the start of the first vararg value... for expansion of llvm.va_start.
3822   if (isVarArg) {
3823     static const MCPhysReg GPArgRegs[] = {
3824       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3825       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3826     };
3827     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3828 
3829     static const MCPhysReg FPArgRegs[] = {
3830       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3831       PPC::F8
3832     };
3833     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3834 
3835     if (useSoftFloat() || hasSPE())
3836        NumFPArgRegs = 0;
3837 
3838     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3839     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3840 
3841     // Make room for NumGPArgRegs and NumFPArgRegs.
3842     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3843                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3844 
3845     FuncInfo->setVarArgsStackOffset(
3846       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3847                             CCInfo.getNextStackOffset(), true));
3848 
3849     FuncInfo->setVarArgsFrameIndex(
3850         MFI.CreateStackObject(Depth, Align(8), false));
3851     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3852 
3853     // The fixed integer arguments of a variadic function are stored to the
3854     // VarArgsFrameIndex on the stack so that they may be loaded by
3855     // dereferencing the result of va_next.
3856     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3857       // Get an existing live-in vreg, or add a new one.
3858       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3859       if (!VReg)
3860         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3861 
3862       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3863       SDValue Store =
3864           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3865       MemOps.push_back(Store);
3866       // Increment the address by four for the next argument to store
3867       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3868       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3869     }
3870 
3871     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3872     // is set.
3873     // The double arguments are stored to the VarArgsFrameIndex
3874     // on the stack.
3875     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3876       // Get an existing live-in vreg, or add a new one.
3877       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3878       if (!VReg)
3879         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3880 
3881       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3882       SDValue Store =
3883           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3884       MemOps.push_back(Store);
3885       // Increment the address by eight for the next argument to store
3886       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3887                                          PtrVT);
3888       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3889     }
3890   }
3891 
3892   if (!MemOps.empty())
3893     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3894 
3895   return Chain;
3896 }
3897 
3898 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3899 // value to MVT::i64 and then truncate to the correct register size.
3900 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3901                                              EVT ObjectVT, SelectionDAG &DAG,
3902                                              SDValue ArgVal,
3903                                              const SDLoc &dl) const {
3904   if (Flags.isSExt())
3905     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3906                          DAG.getValueType(ObjectVT));
3907   else if (Flags.isZExt())
3908     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3909                          DAG.getValueType(ObjectVT));
3910 
3911   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3912 }
3913 
3914 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3915     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3916     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3917     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3918   // TODO: add description of PPC stack frame format, or at least some docs.
3919   //
3920   bool isELFv2ABI = Subtarget.isELFv2ABI();
3921   bool isLittleEndian = Subtarget.isLittleEndian();
3922   MachineFunction &MF = DAG.getMachineFunction();
3923   MachineFrameInfo &MFI = MF.getFrameInfo();
3924   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3925 
3926   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3927          "fastcc not supported on varargs functions");
3928 
3929   EVT PtrVT = getPointerTy(MF.getDataLayout());
3930   // Potential tail calls could cause overwriting of argument stack slots.
3931   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3932                        (CallConv == CallingConv::Fast));
3933   unsigned PtrByteSize = 8;
3934   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3935 
3936   static const MCPhysReg GPR[] = {
3937     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3938     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3939   };
3940   static const MCPhysReg VR[] = {
3941     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3942     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3943   };
3944 
3945   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3946   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3947   const unsigned Num_VR_Regs  = array_lengthof(VR);
3948   const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3949 
3950   // Do a first pass over the arguments to determine whether the ABI
3951   // guarantees that our caller has allocated the parameter save area
3952   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3953   // in the ELFv2 ABI, it is true if this is a vararg function or if
3954   // any parameter is located in a stack slot.
3955 
3956   bool HasParameterArea = !isELFv2ABI || isVarArg;
3957   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3958   unsigned NumBytes = LinkageSize;
3959   unsigned AvailableFPRs = Num_FPR_Regs;
3960   unsigned AvailableVRs = Num_VR_Regs;
3961   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3962     if (Ins[i].Flags.isNest())
3963       continue;
3964 
3965     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3966                                PtrByteSize, LinkageSize, ParamAreaSize,
3967                                NumBytes, AvailableFPRs, AvailableVRs,
3968                                Subtarget.hasQPX()))
3969       HasParameterArea = true;
3970   }
3971 
3972   // Add DAG nodes to load the arguments or copy them out of registers.  On
3973   // entry to a function on PPC, the arguments start after the linkage area,
3974   // although the first ones are often in registers.
3975 
3976   unsigned ArgOffset = LinkageSize;
3977   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3978   unsigned &QFPR_idx = FPR_idx;
3979   SmallVector<SDValue, 8> MemOps;
3980   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3981   unsigned CurArgIdx = 0;
3982   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3983     SDValue ArgVal;
3984     bool needsLoad = false;
3985     EVT ObjectVT = Ins[ArgNo].VT;
3986     EVT OrigVT = Ins[ArgNo].ArgVT;
3987     unsigned ObjSize = ObjectVT.getStoreSize();
3988     unsigned ArgSize = ObjSize;
3989     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3990     if (Ins[ArgNo].isOrigArg()) {
3991       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3992       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3993     }
3994     // We re-align the argument offset for each argument, except when using the
3995     // fast calling convention, when we need to make sure we do that only when
3996     // we'll actually use a stack slot.
3997     unsigned CurArgOffset;
3998     Align Alignment;
3999     auto ComputeArgOffset = [&]() {
4000       /* Respect alignment of argument on the stack.  */
4001       Alignment =
4002           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4003       ArgOffset = alignTo(ArgOffset, Alignment);
4004       CurArgOffset = ArgOffset;
4005     };
4006 
4007     if (CallConv != CallingConv::Fast) {
4008       ComputeArgOffset();
4009 
4010       /* Compute GPR index associated with argument offset.  */
4011       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4012       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4013     }
4014 
4015     // FIXME the codegen can be much improved in some cases.
4016     // We do not have to keep everything in memory.
4017     if (Flags.isByVal()) {
4018       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4019 
4020       if (CallConv == CallingConv::Fast)
4021         ComputeArgOffset();
4022 
4023       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4024       ObjSize = Flags.getByValSize();
4025       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4026       // Empty aggregate parameters do not take up registers.  Examples:
4027       //   struct { } a;
4028       //   union  { } b;
4029       //   int c[0];
4030       // etc.  However, we have to provide a place-holder in InVals, so
4031       // pretend we have an 8-byte item at the current address for that
4032       // purpose.
4033       if (!ObjSize) {
4034         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4035         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4036         InVals.push_back(FIN);
4037         continue;
4038       }
4039 
4040       // Create a stack object covering all stack doublewords occupied
4041       // by the argument.  If the argument is (fully or partially) on
4042       // the stack, or if the argument is fully in registers but the
4043       // caller has allocated the parameter save anyway, we can refer
4044       // directly to the caller's stack frame.  Otherwise, create a
4045       // local copy in our own frame.
4046       int FI;
4047       if (HasParameterArea ||
4048           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4049         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4050       else
4051         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4052       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4053 
4054       // Handle aggregates smaller than 8 bytes.
4055       if (ObjSize < PtrByteSize) {
4056         // The value of the object is its address, which differs from the
4057         // address of the enclosing doubleword on big-endian systems.
4058         SDValue Arg = FIN;
4059         if (!isLittleEndian) {
4060           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4061           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4062         }
4063         InVals.push_back(Arg);
4064 
4065         if (GPR_idx != Num_GPR_Regs) {
4066           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4067           FuncInfo->addLiveInAttr(VReg, Flags);
4068           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4069           SDValue Store;
4070 
4071           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4072             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4073                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4074             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4075                                       MachinePointerInfo(&*FuncArg), ObjType);
4076           } else {
4077             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4078             // store the whole register as-is to the parameter save area
4079             // slot.
4080             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4081                                  MachinePointerInfo(&*FuncArg));
4082           }
4083 
4084           MemOps.push_back(Store);
4085         }
4086         // Whether we copied from a register or not, advance the offset
4087         // into the parameter save area by a full doubleword.
4088         ArgOffset += PtrByteSize;
4089         continue;
4090       }
4091 
4092       // The value of the object is its address, which is the address of
4093       // its first stack doubleword.
4094       InVals.push_back(FIN);
4095 
4096       // Store whatever pieces of the object are in registers to memory.
4097       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4098         if (GPR_idx == Num_GPR_Regs)
4099           break;
4100 
4101         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4102         FuncInfo->addLiveInAttr(VReg, Flags);
4103         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4104         SDValue Addr = FIN;
4105         if (j) {
4106           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4107           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4108         }
4109         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4110                                      MachinePointerInfo(&*FuncArg, j));
4111         MemOps.push_back(Store);
4112         ++GPR_idx;
4113       }
4114       ArgOffset += ArgSize;
4115       continue;
4116     }
4117 
4118     switch (ObjectVT.getSimpleVT().SimpleTy) {
4119     default: llvm_unreachable("Unhandled argument type!");
4120     case MVT::i1:
4121     case MVT::i32:
4122     case MVT::i64:
4123       if (Flags.isNest()) {
4124         // The 'nest' parameter, if any, is passed in R11.
4125         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4126         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4127 
4128         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4129           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4130 
4131         break;
4132       }
4133 
4134       // These can be scalar arguments or elements of an integer array type
4135       // passed directly.  Clang may use those instead of "byval" aggregate
4136       // types to avoid forcing arguments to memory unnecessarily.
4137       if (GPR_idx != Num_GPR_Regs) {
4138         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4139         FuncInfo->addLiveInAttr(VReg, Flags);
4140         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4141 
4142         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4143           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4144           // value to MVT::i64 and then truncate to the correct register size.
4145           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4146       } else {
4147         if (CallConv == CallingConv::Fast)
4148           ComputeArgOffset();
4149 
4150         needsLoad = true;
4151         ArgSize = PtrByteSize;
4152       }
4153       if (CallConv != CallingConv::Fast || needsLoad)
4154         ArgOffset += 8;
4155       break;
4156 
4157     case MVT::f32:
4158     case MVT::f64:
4159       // These can be scalar arguments or elements of a float array type
4160       // passed directly.  The latter are used to implement ELFv2 homogenous
4161       // float aggregates.
4162       if (FPR_idx != Num_FPR_Regs) {
4163         unsigned VReg;
4164 
4165         if (ObjectVT == MVT::f32)
4166           VReg = MF.addLiveIn(FPR[FPR_idx],
4167                               Subtarget.hasP8Vector()
4168                                   ? &PPC::VSSRCRegClass
4169                                   : &PPC::F4RCRegClass);
4170         else
4171           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4172                                                 ? &PPC::VSFRCRegClass
4173                                                 : &PPC::F8RCRegClass);
4174 
4175         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4176         ++FPR_idx;
4177       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4178         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4179         // once we support fp <-> gpr moves.
4180 
4181         // This can only ever happen in the presence of f32 array types,
4182         // since otherwise we never run out of FPRs before running out
4183         // of GPRs.
4184         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4185         FuncInfo->addLiveInAttr(VReg, Flags);
4186         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4187 
4188         if (ObjectVT == MVT::f32) {
4189           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4190             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4191                                  DAG.getConstant(32, dl, MVT::i32));
4192           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4193         }
4194 
4195         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4196       } else {
4197         if (CallConv == CallingConv::Fast)
4198           ComputeArgOffset();
4199 
4200         needsLoad = true;
4201       }
4202 
4203       // When passing an array of floats, the array occupies consecutive
4204       // space in the argument area; only round up to the next doubleword
4205       // at the end of the array.  Otherwise, each float takes 8 bytes.
4206       if (CallConv != CallingConv::Fast || needsLoad) {
4207         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4208         ArgOffset += ArgSize;
4209         if (Flags.isInConsecutiveRegsLast())
4210           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4211       }
4212       break;
4213     case MVT::v4f32:
4214     case MVT::v4i32:
4215     case MVT::v8i16:
4216     case MVT::v16i8:
4217     case MVT::v2f64:
4218     case MVT::v2i64:
4219     case MVT::v1i128:
4220     case MVT::f128:
4221       if (!Subtarget.hasQPX()) {
4222         // These can be scalar arguments or elements of a vector array type
4223         // passed directly.  The latter are used to implement ELFv2 homogenous
4224         // vector aggregates.
4225         if (VR_idx != Num_VR_Regs) {
4226           unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4227           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4228           ++VR_idx;
4229         } else {
4230           if (CallConv == CallingConv::Fast)
4231             ComputeArgOffset();
4232           needsLoad = true;
4233         }
4234         if (CallConv != CallingConv::Fast || needsLoad)
4235           ArgOffset += 16;
4236         break;
4237       } // not QPX
4238 
4239       assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
4240              "Invalid QPX parameter type");
4241       LLVM_FALLTHROUGH;
4242 
4243     case MVT::v4f64:
4244     case MVT::v4i1:
4245       // QPX vectors are treated like their scalar floating-point subregisters
4246       // (except that they're larger).
4247       unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
4248       if (QFPR_idx != Num_QFPR_Regs) {
4249         const TargetRegisterClass *RC;
4250         switch (ObjectVT.getSimpleVT().SimpleTy) {
4251         case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
4252         case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
4253         default:         RC = &PPC::QBRCRegClass; break;
4254         }
4255 
4256         unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
4257         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4258         ++QFPR_idx;
4259       } else {
4260         if (CallConv == CallingConv::Fast)
4261           ComputeArgOffset();
4262         needsLoad = true;
4263       }
4264       if (CallConv != CallingConv::Fast || needsLoad)
4265         ArgOffset += Sz;
4266       break;
4267     }
4268 
4269     // We need to load the argument to a virtual register if we determined
4270     // above that we ran out of physical registers of the appropriate type.
4271     if (needsLoad) {
4272       if (ObjSize < ArgSize && !isLittleEndian)
4273         CurArgOffset += ArgSize - ObjSize;
4274       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4275       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4276       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4277     }
4278 
4279     InVals.push_back(ArgVal);
4280   }
4281 
4282   // Area that is at least reserved in the caller of this function.
4283   unsigned MinReservedArea;
4284   if (HasParameterArea)
4285     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4286   else
4287     MinReservedArea = LinkageSize;
4288 
4289   // Set the size that is at least reserved in caller of this function.  Tail
4290   // call optimized functions' reserved stack space needs to be aligned so that
4291   // taking the difference between two stack areas will result in an aligned
4292   // stack.
4293   MinReservedArea =
4294       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4295   FuncInfo->setMinReservedArea(MinReservedArea);
4296 
4297   // If the function takes variable number of arguments, make a frame index for
4298   // the start of the first vararg value... for expansion of llvm.va_start.
4299   // On ELFv2ABI spec, it writes:
4300   // C programs that are intended to be *portable* across different compilers
4301   // and architectures must use the header file <stdarg.h> to deal with variable
4302   // argument lists.
4303   if (isVarArg && MFI.hasVAStart()) {
4304     int Depth = ArgOffset;
4305 
4306     FuncInfo->setVarArgsFrameIndex(
4307       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4308     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4309 
4310     // If this function is vararg, store any remaining integer argument regs
4311     // to their spots on the stack so that they may be loaded by dereferencing
4312     // the result of va_next.
4313     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4314          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4315       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4316       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4317       SDValue Store =
4318           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4319       MemOps.push_back(Store);
4320       // Increment the address by four for the next argument to store
4321       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4322       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4323     }
4324   }
4325 
4326   if (!MemOps.empty())
4327     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4328 
4329   return Chain;
4330 }
4331 
4332 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4333     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4334     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4335     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4336   // TODO: add description of PPC stack frame format, or at least some docs.
4337   //
4338   MachineFunction &MF = DAG.getMachineFunction();
4339   MachineFrameInfo &MFI = MF.getFrameInfo();
4340   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4341 
4342   EVT PtrVT = getPointerTy(MF.getDataLayout());
4343   bool isPPC64 = PtrVT == MVT::i64;
4344   // Potential tail calls could cause overwriting of argument stack slots.
4345   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4346                        (CallConv == CallingConv::Fast));
4347   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4348   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4349   unsigned ArgOffset = LinkageSize;
4350   // Area that is at least reserved in caller of this function.
4351   unsigned MinReservedArea = ArgOffset;
4352 
4353   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4354     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4355     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4356   };
4357   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4358     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4359     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4360   };
4361   static const MCPhysReg VR[] = {
4362     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4363     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4364   };
4365 
4366   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4367   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4368   const unsigned Num_VR_Regs  = array_lengthof( VR);
4369 
4370   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4371 
4372   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4373 
4374   // In 32-bit non-varargs functions, the stack space for vectors is after the
4375   // stack space for non-vectors.  We do not use this space unless we have
4376   // too many vectors to fit in registers, something that only occurs in
4377   // constructed examples:), but we have to walk the arglist to figure
4378   // that out...for the pathological case, compute VecArgOffset as the
4379   // start of the vector parameter area.  Computing VecArgOffset is the
4380   // entire point of the following loop.
4381   unsigned VecArgOffset = ArgOffset;
4382   if (!isVarArg && !isPPC64) {
4383     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4384          ++ArgNo) {
4385       EVT ObjectVT = Ins[ArgNo].VT;
4386       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4387 
4388       if (Flags.isByVal()) {
4389         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4390         unsigned ObjSize = Flags.getByValSize();
4391         unsigned ArgSize =
4392                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4393         VecArgOffset += ArgSize;
4394         continue;
4395       }
4396 
4397       switch(ObjectVT.getSimpleVT().SimpleTy) {
4398       default: llvm_unreachable("Unhandled argument type!");
4399       case MVT::i1:
4400       case MVT::i32:
4401       case MVT::f32:
4402         VecArgOffset += 4;
4403         break;
4404       case MVT::i64:  // PPC64
4405       case MVT::f64:
4406         // FIXME: We are guaranteed to be !isPPC64 at this point.
4407         // Does MVT::i64 apply?
4408         VecArgOffset += 8;
4409         break;
4410       case MVT::v4f32:
4411       case MVT::v4i32:
4412       case MVT::v8i16:
4413       case MVT::v16i8:
4414         // Nothing to do, we're only looking at Nonvector args here.
4415         break;
4416       }
4417     }
4418   }
4419   // We've found where the vector parameter area in memory is.  Skip the
4420   // first 12 parameters; these don't use that memory.
4421   VecArgOffset = ((VecArgOffset+15)/16)*16;
4422   VecArgOffset += 12*16;
4423 
4424   // Add DAG nodes to load the arguments or copy them out of registers.  On
4425   // entry to a function on PPC, the arguments start after the linkage area,
4426   // although the first ones are often in registers.
4427 
4428   SmallVector<SDValue, 8> MemOps;
4429   unsigned nAltivecParamsAtEnd = 0;
4430   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4431   unsigned CurArgIdx = 0;
4432   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4433     SDValue ArgVal;
4434     bool needsLoad = false;
4435     EVT ObjectVT = Ins[ArgNo].VT;
4436     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4437     unsigned ArgSize = ObjSize;
4438     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4439     if (Ins[ArgNo].isOrigArg()) {
4440       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4441       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4442     }
4443     unsigned CurArgOffset = ArgOffset;
4444 
4445     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4446     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4447         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4448       if (isVarArg || isPPC64) {
4449         MinReservedArea = ((MinReservedArea+15)/16)*16;
4450         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4451                                                   Flags,
4452                                                   PtrByteSize);
4453       } else  nAltivecParamsAtEnd++;
4454     } else
4455       // Calculate min reserved area.
4456       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4457                                                 Flags,
4458                                                 PtrByteSize);
4459 
4460     // FIXME the codegen can be much improved in some cases.
4461     // We do not have to keep everything in memory.
4462     if (Flags.isByVal()) {
4463       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4464 
4465       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4466       ObjSize = Flags.getByValSize();
4467       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4468       // Objects of size 1 and 2 are right justified, everything else is
4469       // left justified.  This means the memory address is adjusted forwards.
4470       if (ObjSize==1 || ObjSize==2) {
4471         CurArgOffset = CurArgOffset + (4 - ObjSize);
4472       }
4473       // The value of the object is its address.
4474       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4475       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4476       InVals.push_back(FIN);
4477       if (ObjSize==1 || ObjSize==2) {
4478         if (GPR_idx != Num_GPR_Regs) {
4479           unsigned VReg;
4480           if (isPPC64)
4481             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4482           else
4483             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4484           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4485           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4486           SDValue Store =
4487               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4488                                 MachinePointerInfo(&*FuncArg), ObjType);
4489           MemOps.push_back(Store);
4490           ++GPR_idx;
4491         }
4492 
4493         ArgOffset += PtrByteSize;
4494 
4495         continue;
4496       }
4497       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4498         // Store whatever pieces of the object are in registers
4499         // to memory.  ArgOffset will be the address of the beginning
4500         // of the object.
4501         if (GPR_idx != Num_GPR_Regs) {
4502           unsigned VReg;
4503           if (isPPC64)
4504             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4505           else
4506             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4507           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4508           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4509           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4510           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4511                                        MachinePointerInfo(&*FuncArg, j));
4512           MemOps.push_back(Store);
4513           ++GPR_idx;
4514           ArgOffset += PtrByteSize;
4515         } else {
4516           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4517           break;
4518         }
4519       }
4520       continue;
4521     }
4522 
4523     switch (ObjectVT.getSimpleVT().SimpleTy) {
4524     default: llvm_unreachable("Unhandled argument type!");
4525     case MVT::i1:
4526     case MVT::i32:
4527       if (!isPPC64) {
4528         if (GPR_idx != Num_GPR_Regs) {
4529           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4530           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4531 
4532           if (ObjectVT == MVT::i1)
4533             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4534 
4535           ++GPR_idx;
4536         } else {
4537           needsLoad = true;
4538           ArgSize = PtrByteSize;
4539         }
4540         // All int arguments reserve stack space in the Darwin ABI.
4541         ArgOffset += PtrByteSize;
4542         break;
4543       }
4544       LLVM_FALLTHROUGH;
4545     case MVT::i64:  // PPC64
4546       if (GPR_idx != Num_GPR_Regs) {
4547         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4548         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4549 
4550         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4551           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4552           // value to MVT::i64 and then truncate to the correct register size.
4553           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4554 
4555         ++GPR_idx;
4556       } else {
4557         needsLoad = true;
4558         ArgSize = PtrByteSize;
4559       }
4560       // All int arguments reserve stack space in the Darwin ABI.
4561       ArgOffset += 8;
4562       break;
4563 
4564     case MVT::f32:
4565     case MVT::f64:
4566       // Every 4 bytes of argument space consumes one of the GPRs available for
4567       // argument passing.
4568       if (GPR_idx != Num_GPR_Regs) {
4569         ++GPR_idx;
4570         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4571           ++GPR_idx;
4572       }
4573       if (FPR_idx != Num_FPR_Regs) {
4574         unsigned VReg;
4575 
4576         if (ObjectVT == MVT::f32)
4577           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4578         else
4579           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4580 
4581         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4582         ++FPR_idx;
4583       } else {
4584         needsLoad = true;
4585       }
4586 
4587       // All FP arguments reserve stack space in the Darwin ABI.
4588       ArgOffset += isPPC64 ? 8 : ObjSize;
4589       break;
4590     case MVT::v4f32:
4591     case MVT::v4i32:
4592     case MVT::v8i16:
4593     case MVT::v16i8:
4594       // Note that vector arguments in registers don't reserve stack space,
4595       // except in varargs functions.
4596       if (VR_idx != Num_VR_Regs) {
4597         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4598         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4599         if (isVarArg) {
4600           while ((ArgOffset % 16) != 0) {
4601             ArgOffset += PtrByteSize;
4602             if (GPR_idx != Num_GPR_Regs)
4603               GPR_idx++;
4604           }
4605           ArgOffset += 16;
4606           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4607         }
4608         ++VR_idx;
4609       } else {
4610         if (!isVarArg && !isPPC64) {
4611           // Vectors go after all the nonvectors.
4612           CurArgOffset = VecArgOffset;
4613           VecArgOffset += 16;
4614         } else {
4615           // Vectors are aligned.
4616           ArgOffset = ((ArgOffset+15)/16)*16;
4617           CurArgOffset = ArgOffset;
4618           ArgOffset += 16;
4619         }
4620         needsLoad = true;
4621       }
4622       break;
4623     }
4624 
4625     // We need to load the argument to a virtual register if we determined above
4626     // that we ran out of physical registers of the appropriate type.
4627     if (needsLoad) {
4628       int FI = MFI.CreateFixedObject(ObjSize,
4629                                      CurArgOffset + (ArgSize - ObjSize),
4630                                      isImmutable);
4631       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4632       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4633     }
4634 
4635     InVals.push_back(ArgVal);
4636   }
4637 
4638   // Allow for Altivec parameters at the end, if needed.
4639   if (nAltivecParamsAtEnd) {
4640     MinReservedArea = ((MinReservedArea+15)/16)*16;
4641     MinReservedArea += 16*nAltivecParamsAtEnd;
4642   }
4643 
4644   // Area that is at least reserved in the caller of this function.
4645   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4646 
4647   // Set the size that is at least reserved in caller of this function.  Tail
4648   // call optimized functions' reserved stack space needs to be aligned so that
4649   // taking the difference between two stack areas will result in an aligned
4650   // stack.
4651   MinReservedArea =
4652       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4653   FuncInfo->setMinReservedArea(MinReservedArea);
4654 
4655   // If the function takes variable number of arguments, make a frame index for
4656   // the start of the first vararg value... for expansion of llvm.va_start.
4657   if (isVarArg) {
4658     int Depth = ArgOffset;
4659 
4660     FuncInfo->setVarArgsFrameIndex(
4661       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4662                             Depth, true));
4663     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4664 
4665     // If this function is vararg, store any remaining integer argument regs
4666     // to their spots on the stack so that they may be loaded by dereferencing
4667     // the result of va_next.
4668     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4669       unsigned VReg;
4670 
4671       if (isPPC64)
4672         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4673       else
4674         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4675 
4676       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4677       SDValue Store =
4678           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4679       MemOps.push_back(Store);
4680       // Increment the address by four for the next argument to store
4681       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4682       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4683     }
4684   }
4685 
4686   if (!MemOps.empty())
4687     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4688 
4689   return Chain;
4690 }
4691 
4692 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4693 /// adjusted to accommodate the arguments for the tailcall.
4694 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4695                                    unsigned ParamSize) {
4696 
4697   if (!isTailCall) return 0;
4698 
4699   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4700   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4701   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4702   // Remember only if the new adjustment is bigger.
4703   if (SPDiff < FI->getTailCallSPDelta())
4704     FI->setTailCallSPDelta(SPDiff);
4705 
4706   return SPDiff;
4707 }
4708 
4709 static bool isFunctionGlobalAddress(SDValue Callee);
4710 
4711 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4712                               const TargetMachine &TM) {
4713   // It does not make sense to call callsShareTOCBase() with a caller that
4714   // is PC Relative since PC Relative callers do not have a TOC.
4715 #ifndef NDEBUG
4716   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4717   assert(!STICaller->isUsingPCRelativeCalls() &&
4718          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4719 #endif
4720 
4721   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4722   // don't have enough information to determine if the caller and callee share
4723   // the same  TOC base, so we have to pessimistically assume they don't for
4724   // correctness.
4725   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4726   if (!G)
4727     return false;
4728 
4729   const GlobalValue *GV = G->getGlobal();
4730 
4731   // If the callee is preemptable, then the static linker will use a plt-stub
4732   // which saves the toc to the stack, and needs a nop after the call
4733   // instruction to convert to a toc-restore.
4734   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4735     return false;
4736 
4737   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4738   // We may need a TOC restore in the situation where the caller requires a
4739   // valid TOC but the callee is PC Relative and does not.
4740   const Function *F = dyn_cast<Function>(GV);
4741   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4742 
4743   // If we have an Alias we can try to get the function from there.
4744   if (Alias) {
4745     const GlobalObject *GlobalObj = Alias->getBaseObject();
4746     F = dyn_cast<Function>(GlobalObj);
4747   }
4748 
4749   // If we still have no valid function pointer we do not have enough
4750   // information to determine if the callee uses PC Relative calls so we must
4751   // assume that it does.
4752   if (!F)
4753     return false;
4754 
4755   // If the callee uses PC Relative we cannot guarantee that the callee won't
4756   // clobber the TOC of the caller and so we must assume that the two
4757   // functions do not share a TOC base.
4758   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4759   if (STICallee->isUsingPCRelativeCalls())
4760     return false;
4761 
4762   // The medium and large code models are expected to provide a sufficiently
4763   // large TOC to provide all data addressing needs of a module with a
4764   // single TOC.
4765   if (CodeModel::Medium == TM.getCodeModel() ||
4766       CodeModel::Large == TM.getCodeModel())
4767     return true;
4768 
4769   // Otherwise we need to ensure callee and caller are in the same section,
4770   // since the linker may allocate multiple TOCs, and we don't know which
4771   // sections will belong to the same TOC base.
4772   if (!GV->isStrongDefinitionForLinker())
4773     return false;
4774 
4775   // Any explicitly-specified sections and section prefixes must also match.
4776   // Also, if we're using -ffunction-sections, then each function is always in
4777   // a different section (the same is true for COMDAT functions).
4778   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4779       GV->getSection() != Caller->getSection())
4780     return false;
4781   if (const auto *F = dyn_cast<Function>(GV)) {
4782     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4783       return false;
4784   }
4785 
4786   return true;
4787 }
4788 
4789 static bool
4790 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4791                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4792   assert(Subtarget.is64BitELFABI());
4793 
4794   const unsigned PtrByteSize = 8;
4795   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4796 
4797   static const MCPhysReg GPR[] = {
4798     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4799     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4800   };
4801   static const MCPhysReg VR[] = {
4802     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4803     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4804   };
4805 
4806   const unsigned NumGPRs = array_lengthof(GPR);
4807   const unsigned NumFPRs = 13;
4808   const unsigned NumVRs = array_lengthof(VR);
4809   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4810 
4811   unsigned NumBytes = LinkageSize;
4812   unsigned AvailableFPRs = NumFPRs;
4813   unsigned AvailableVRs = NumVRs;
4814 
4815   for (const ISD::OutputArg& Param : Outs) {
4816     if (Param.Flags.isNest()) continue;
4817 
4818     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4819                                PtrByteSize, LinkageSize, ParamAreaSize,
4820                                NumBytes, AvailableFPRs, AvailableVRs,
4821                                Subtarget.hasQPX()))
4822       return true;
4823   }
4824   return false;
4825 }
4826 
4827 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4828   if (CB.arg_size() != CallerFn->arg_size())
4829     return false;
4830 
4831   auto CalleeArgIter = CB.arg_begin();
4832   auto CalleeArgEnd = CB.arg_end();
4833   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4834 
4835   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4836     const Value* CalleeArg = *CalleeArgIter;
4837     const Value* CallerArg = &(*CallerArgIter);
4838     if (CalleeArg == CallerArg)
4839       continue;
4840 
4841     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4842     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4843     //      }
4844     // 1st argument of callee is undef and has the same type as caller.
4845     if (CalleeArg->getType() == CallerArg->getType() &&
4846         isa<UndefValue>(CalleeArg))
4847       continue;
4848 
4849     return false;
4850   }
4851 
4852   return true;
4853 }
4854 
4855 // Returns true if TCO is possible between the callers and callees
4856 // calling conventions.
4857 static bool
4858 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4859                                     CallingConv::ID CalleeCC) {
4860   // Tail calls are possible with fastcc and ccc.
4861   auto isTailCallableCC  = [] (CallingConv::ID CC){
4862       return  CC == CallingConv::C || CC == CallingConv::Fast;
4863   };
4864   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4865     return false;
4866 
4867   // We can safely tail call both fastcc and ccc callees from a c calling
4868   // convention caller. If the caller is fastcc, we may have less stack space
4869   // than a non-fastcc caller with the same signature so disable tail-calls in
4870   // that case.
4871   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4872 }
4873 
4874 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4875     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4876     const SmallVectorImpl<ISD::OutputArg> &Outs,
4877     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4878   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4879 
4880   if (DisableSCO && !TailCallOpt) return false;
4881 
4882   // Variadic argument functions are not supported.
4883   if (isVarArg) return false;
4884 
4885   auto &Caller = DAG.getMachineFunction().getFunction();
4886   // Check that the calling conventions are compatible for tco.
4887   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4888     return false;
4889 
4890   // Caller contains any byval parameter is not supported.
4891   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4892     return false;
4893 
4894   // Callee contains any byval parameter is not supported, too.
4895   // Note: This is a quick work around, because in some cases, e.g.
4896   // caller's stack size > callee's stack size, we are still able to apply
4897   // sibling call optimization. For example, gcc is able to do SCO for caller1
4898   // in the following example, but not for caller2.
4899   //   struct test {
4900   //     long int a;
4901   //     char ary[56];
4902   //   } gTest;
4903   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4904   //     b->a = v.a;
4905   //     return 0;
4906   //   }
4907   //   void caller1(struct test a, struct test c, struct test *b) {
4908   //     callee(gTest, b); }
4909   //   void caller2(struct test *b) { callee(gTest, b); }
4910   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4911     return false;
4912 
4913   // If callee and caller use different calling conventions, we cannot pass
4914   // parameters on stack since offsets for the parameter area may be different.
4915   if (Caller.getCallingConv() != CalleeCC &&
4916       needStackSlotPassParameters(Subtarget, Outs))
4917     return false;
4918 
4919   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4920   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4921   // callee potentially have different TOC bases then we cannot tail call since
4922   // we need to restore the TOC pointer after the call.
4923   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4924   // We cannot guarantee this for indirect calls or calls to external functions.
4925   // When PC-Relative addressing is used, the concept of the TOC is no longer
4926   // applicable so this check is not required.
4927   // Check first for indirect calls.
4928   if (!Subtarget.isUsingPCRelativeCalls() &&
4929       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4930     return false;
4931 
4932   // Check if we share the TOC base.
4933   if (!Subtarget.isUsingPCRelativeCalls() &&
4934       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4935     return false;
4936 
4937   // TCO allows altering callee ABI, so we don't have to check further.
4938   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4939     return true;
4940 
4941   if (DisableSCO) return false;
4942 
4943   // If callee use the same argument list that caller is using, then we can
4944   // apply SCO on this case. If it is not, then we need to check if callee needs
4945   // stack for passing arguments.
4946   // PC Relative tail calls may not have a CallBase.
4947   // If there is no CallBase we cannot verify if we have the same argument
4948   // list so assume that we don't have the same argument list.
4949   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4950       needStackSlotPassParameters(Subtarget, Outs))
4951     return false;
4952   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4953     return false;
4954 
4955   return true;
4956 }
4957 
4958 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4959 /// for tail call optimization. Targets which want to do tail call
4960 /// optimization should implement this function.
4961 bool
4962 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4963                                                      CallingConv::ID CalleeCC,
4964                                                      bool isVarArg,
4965                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4966                                                      SelectionDAG& DAG) const {
4967   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4968     return false;
4969 
4970   // Variable argument functions are not supported.
4971   if (isVarArg)
4972     return false;
4973 
4974   MachineFunction &MF = DAG.getMachineFunction();
4975   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4976   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4977     // Functions containing by val parameters are not supported.
4978     for (unsigned i = 0; i != Ins.size(); i++) {
4979        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4980        if (Flags.isByVal()) return false;
4981     }
4982 
4983     // Non-PIC/GOT tail calls are supported.
4984     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4985       return true;
4986 
4987     // At the moment we can only do local tail calls (in same module, hidden
4988     // or protected) if we are generating PIC.
4989     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4990       return G->getGlobal()->hasHiddenVisibility()
4991           || G->getGlobal()->hasProtectedVisibility();
4992   }
4993 
4994   return false;
4995 }
4996 
4997 /// isCallCompatibleAddress - Return the immediate to use if the specified
4998 /// 32-bit value is representable in the immediate field of a BxA instruction.
4999 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
5000   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
5001   if (!C) return nullptr;
5002 
5003   int Addr = C->getZExtValue();
5004   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
5005       SignExtend32<26>(Addr) != Addr)
5006     return nullptr;  // Top 6 bits have to be sext of immediate.
5007 
5008   return DAG
5009       .getConstant(
5010           (int)C->getZExtValue() >> 2, SDLoc(Op),
5011           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
5012       .getNode();
5013 }
5014 
5015 namespace {
5016 
5017 struct TailCallArgumentInfo {
5018   SDValue Arg;
5019   SDValue FrameIdxOp;
5020   int FrameIdx = 0;
5021 
5022   TailCallArgumentInfo() = default;
5023 };
5024 
5025 } // end anonymous namespace
5026 
5027 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
5028 static void StoreTailCallArgumentsToStackSlot(
5029     SelectionDAG &DAG, SDValue Chain,
5030     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
5031     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
5032   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
5033     SDValue Arg = TailCallArgs[i].Arg;
5034     SDValue FIN = TailCallArgs[i].FrameIdxOp;
5035     int FI = TailCallArgs[i].FrameIdx;
5036     // Store relative to framepointer.
5037     MemOpChains.push_back(DAG.getStore(
5038         Chain, dl, Arg, FIN,
5039         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
5040   }
5041 }
5042 
5043 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
5044 /// the appropriate stack slot for the tail call optimized function call.
5045 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
5046                                              SDValue OldRetAddr, SDValue OldFP,
5047                                              int SPDiff, const SDLoc &dl) {
5048   if (SPDiff) {
5049     // Calculate the new stack slot for the return address.
5050     MachineFunction &MF = DAG.getMachineFunction();
5051     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
5052     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
5053     bool isPPC64 = Subtarget.isPPC64();
5054     int SlotSize = isPPC64 ? 8 : 4;
5055     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
5056     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
5057                                                          NewRetAddrLoc, true);
5058     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5059     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
5060     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
5061                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
5062   }
5063   return Chain;
5064 }
5065 
5066 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
5067 /// the position of the argument.
5068 static void
5069 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
5070                          SDValue Arg, int SPDiff, unsigned ArgOffset,
5071                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
5072   int Offset = ArgOffset + SPDiff;
5073   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
5074   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
5075   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5076   SDValue FIN = DAG.getFrameIndex(FI, VT);
5077   TailCallArgumentInfo Info;
5078   Info.Arg = Arg;
5079   Info.FrameIdxOp = FIN;
5080   Info.FrameIdx = FI;
5081   TailCallArguments.push_back(Info);
5082 }
5083 
5084 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
5085 /// stack slot. Returns the chain as result and the loaded frame pointers in
5086 /// LROpOut/FPOpout. Used when tail calling.
5087 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
5088     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
5089     SDValue &FPOpOut, const SDLoc &dl) const {
5090   if (SPDiff) {
5091     // Load the LR and FP stack slot for later adjusting.
5092     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5093     LROpOut = getReturnAddrFrameIndex(DAG);
5094     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
5095     Chain = SDValue(LROpOut.getNode(), 1);
5096   }
5097   return Chain;
5098 }
5099 
5100 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
5101 /// by "Src" to address "Dst" of size "Size".  Alignment information is
5102 /// specified by the specific parameter attribute. The copy will be passed as
5103 /// a byval function parameter.
5104 /// Sometimes what we are copying is the end of a larger object, the part that
5105 /// does not fit in registers.
5106 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
5107                                          SDValue Chain, ISD::ArgFlagsTy Flags,
5108                                          SelectionDAG &DAG, const SDLoc &dl) {
5109   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5110   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5111                        Flags.getNonZeroByValAlign(), false, false, false,
5112                        MachinePointerInfo(), MachinePointerInfo());
5113 }
5114 
5115 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5116 /// tail calls.
5117 static void LowerMemOpCallTo(
5118     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5119     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5120     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5121     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5122   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5123   if (!isTailCall) {
5124     if (isVector) {
5125       SDValue StackPtr;
5126       if (isPPC64)
5127         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5128       else
5129         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5130       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5131                            DAG.getConstant(ArgOffset, dl, PtrVT));
5132     }
5133     MemOpChains.push_back(
5134         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5135     // Calculate and remember argument location.
5136   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5137                                   TailCallArguments);
5138 }
5139 
5140 static void
5141 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
5142                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5143                 SDValue FPOp,
5144                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5145   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5146   // might overwrite each other in case of tail call optimization.
5147   SmallVector<SDValue, 8> MemOpChains2;
5148   // Do not flag preceding copytoreg stuff together with the following stuff.
5149   InFlag = SDValue();
5150   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5151                                     MemOpChains2, dl);
5152   if (!MemOpChains2.empty())
5153     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5154 
5155   // Store the return address to the appropriate stack slot.
5156   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5157 
5158   // Emit callseq_end just before tailcall node.
5159   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5160                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5161   InFlag = Chain.getValue(1);
5162 }
5163 
5164 // Is this global address that of a function that can be called by name? (as
5165 // opposed to something that must hold a descriptor for an indirect call).
5166 static bool isFunctionGlobalAddress(SDValue Callee) {
5167   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5168     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5169         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5170       return false;
5171 
5172     return G->getGlobal()->getValueType()->isFunctionTy();
5173   }
5174 
5175   return false;
5176 }
5177 
5178 SDValue PPCTargetLowering::LowerCallResult(
5179     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5180     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5181     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5182   SmallVector<CCValAssign, 16> RVLocs;
5183   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5184                     *DAG.getContext());
5185 
5186   CCRetInfo.AnalyzeCallResult(
5187       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5188                ? RetCC_PPC_Cold
5189                : RetCC_PPC);
5190 
5191   // Copy all of the result registers out of their specified physreg.
5192   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5193     CCValAssign &VA = RVLocs[i];
5194     assert(VA.isRegLoc() && "Can only return in registers!");
5195 
5196     SDValue Val;
5197 
5198     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5199       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5200                                       InFlag);
5201       Chain = Lo.getValue(1);
5202       InFlag = Lo.getValue(2);
5203       VA = RVLocs[++i]; // skip ahead to next loc
5204       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5205                                       InFlag);
5206       Chain = Hi.getValue(1);
5207       InFlag = Hi.getValue(2);
5208       if (!Subtarget.isLittleEndian())
5209         std::swap (Lo, Hi);
5210       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5211     } else {
5212       Val = DAG.getCopyFromReg(Chain, dl,
5213                                VA.getLocReg(), VA.getLocVT(), InFlag);
5214       Chain = Val.getValue(1);
5215       InFlag = Val.getValue(2);
5216     }
5217 
5218     switch (VA.getLocInfo()) {
5219     default: llvm_unreachable("Unknown loc info!");
5220     case CCValAssign::Full: break;
5221     case CCValAssign::AExt:
5222       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5223       break;
5224     case CCValAssign::ZExt:
5225       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5226                         DAG.getValueType(VA.getValVT()));
5227       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5228       break;
5229     case CCValAssign::SExt:
5230       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5231                         DAG.getValueType(VA.getValVT()));
5232       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5233       break;
5234     }
5235 
5236     InVals.push_back(Val);
5237   }
5238 
5239   return Chain;
5240 }
5241 
5242 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5243                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5244   // PatchPoint calls are not indirect.
5245   if (isPatchPoint)
5246     return false;
5247 
5248   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5249     return false;
5250 
5251   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5252   // becuase the immediate function pointer points to a descriptor instead of
5253   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5254   // pointer immediate points to the global entry point, while the BLA would
5255   // need to jump to the local entry point (see rL211174).
5256   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5257       isBLACompatibleAddress(Callee, DAG))
5258     return false;
5259 
5260   return true;
5261 }
5262 
5263 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5264 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5265   return Subtarget.isAIXABI() ||
5266          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5267 }
5268 
5269 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5270                               const Function &Caller,
5271                               const SDValue &Callee,
5272                               const PPCSubtarget &Subtarget,
5273                               const TargetMachine &TM) {
5274   if (CFlags.IsTailCall)
5275     return PPCISD::TC_RETURN;
5276 
5277   // This is a call through a function pointer.
5278   if (CFlags.IsIndirect) {
5279     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5280     // indirect calls. The save of the caller's TOC pointer to the stack will be
5281     // inserted into the DAG as part of call lowering. The restore of the TOC
5282     // pointer is modeled by using a pseudo instruction for the call opcode that
5283     // represents the 2 instruction sequence of an indirect branch and link,
5284     // immediately followed by a load of the TOC pointer from the the stack save
5285     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5286     // as it is not saved or used.
5287     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5288                                                : PPCISD::BCTRL;
5289   }
5290 
5291   if (Subtarget.isUsingPCRelativeCalls()) {
5292     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5293     return PPCISD::CALL_NOTOC;
5294   }
5295 
5296   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5297   // immediately following the call instruction if the caller and callee may
5298   // have different TOC bases. At link time if the linker determines the calls
5299   // may not share a TOC base, the call is redirected to a trampoline inserted
5300   // by the linker. The trampoline will (among other things) save the callers
5301   // TOC pointer at an ABI designated offset in the linkage area and the linker
5302   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5303   // into gpr2.
5304   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5305     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5306                                                   : PPCISD::CALL_NOP;
5307 
5308   return PPCISD::CALL;
5309 }
5310 
5311 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5312                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5313   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5314     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5315       return SDValue(Dest, 0);
5316 
5317   // Returns true if the callee is local, and false otherwise.
5318   auto isLocalCallee = [&]() {
5319     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5320     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5321     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5322 
5323     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5324            !dyn_cast_or_null<GlobalIFunc>(GV);
5325   };
5326 
5327   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5328   // a static relocation model causes some versions of GNU LD (2.17.50, at
5329   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5330   // built with secure-PLT.
5331   bool UsePlt =
5332       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5333       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5334 
5335   // On AIX, direct function calls reference the symbol for the function's
5336   // entry point, which is named by prepending a "." before the function's
5337   // C-linkage name.
5338   const auto getFunctionEntryPointSymbol = [&](StringRef SymName) {
5339     auto &Context = DAG.getMachineFunction().getMMI().getContext();
5340     return cast<MCSymbolXCOFF>(
5341         Context.getOrCreateSymbol(Twine(".") + Twine(SymName)));
5342   };
5343 
5344   const auto getAIXFuncEntryPointSymbolSDNode =
5345       [&](StringRef FuncName, bool IsDeclaration,
5346           const XCOFF::StorageClass &SC) {
5347         MCSymbolXCOFF *S = getFunctionEntryPointSymbol(FuncName);
5348 
5349         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5350 
5351         if (IsDeclaration && !S->hasRepresentedCsectSet()) {
5352           // On AIX, an undefined symbol needs to be associated with a
5353           // MCSectionXCOFF to get the correct storage mapping class.
5354           // In this case, XCOFF::XMC_PR.
5355           MCSectionXCOFF *Sec = Context.getXCOFFSection(
5356               S->getSymbolTableName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC,
5357               SectionKind::getMetadata());
5358           S->setRepresentedCsect(Sec);
5359         }
5360 
5361         MVT PtrVT =
5362             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5363         return DAG.getMCSymbol(S, PtrVT);
5364       };
5365 
5366   if (isFunctionGlobalAddress(Callee)) {
5367     const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
5368     const GlobalValue *GV = G->getGlobal();
5369 
5370     if (!Subtarget.isAIXABI())
5371       return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5372                                         UsePlt ? PPCII::MO_PLT : 0);
5373 
5374     assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5375     const GlobalObject *GO = cast<GlobalObject>(GV);
5376     const XCOFF::StorageClass SC =
5377         TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GO);
5378     return getAIXFuncEntryPointSymbolSDNode(GO->getName(), GO->isDeclaration(),
5379                                             SC);
5380   }
5381 
5382   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5383     const char *SymName = S->getSymbol();
5384     if (Subtarget.isAIXABI()) {
5385       // If there exists a user-declared function whose name is the same as the
5386       // ExternalSymbol's, then we pick up the user-declared version.
5387       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5388       if (const Function *F =
5389               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) {
5390         const XCOFF::StorageClass SC =
5391             TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F);
5392         return getAIXFuncEntryPointSymbolSDNode(F->getName(),
5393                                                 F->isDeclaration(), SC);
5394       }
5395       SymName = getFunctionEntryPointSymbol(SymName)->getName().data();
5396     }
5397     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5398                                        UsePlt ? PPCII::MO_PLT : 0);
5399   }
5400 
5401   // No transformation needed.
5402   assert(Callee.getNode() && "What no callee?");
5403   return Callee;
5404 }
5405 
5406 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5407   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5408          "Expected a CALLSEQ_STARTSDNode.");
5409 
5410   // The last operand is the chain, except when the node has glue. If the node
5411   // has glue, then the last operand is the glue, and the chain is the second
5412   // last operand.
5413   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5414   if (LastValue.getValueType() != MVT::Glue)
5415     return LastValue;
5416 
5417   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5418 }
5419 
5420 // Creates the node that moves a functions address into the count register
5421 // to prepare for an indirect call instruction.
5422 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5423                                 SDValue &Glue, SDValue &Chain,
5424                                 const SDLoc &dl) {
5425   SDValue MTCTROps[] = {Chain, Callee, Glue};
5426   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5427   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5428                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5429   // The glue is the second value produced.
5430   Glue = Chain.getValue(1);
5431 }
5432 
5433 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5434                                           SDValue &Glue, SDValue &Chain,
5435                                           SDValue CallSeqStart,
5436                                           const CallBase *CB, const SDLoc &dl,
5437                                           bool hasNest,
5438                                           const PPCSubtarget &Subtarget) {
5439   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5440   // entry point, but to the function descriptor (the function entry point
5441   // address is part of the function descriptor though).
5442   // The function descriptor is a three doubleword structure with the
5443   // following fields: function entry point, TOC base address and
5444   // environment pointer.
5445   // Thus for a call through a function pointer, the following actions need
5446   // to be performed:
5447   //   1. Save the TOC of the caller in the TOC save area of its stack
5448   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5449   //   2. Load the address of the function entry point from the function
5450   //      descriptor.
5451   //   3. Load the TOC of the callee from the function descriptor into r2.
5452   //   4. Load the environment pointer from the function descriptor into
5453   //      r11.
5454   //   5. Branch to the function entry point address.
5455   //   6. On return of the callee, the TOC of the caller needs to be
5456   //      restored (this is done in FinishCall()).
5457   //
5458   // The loads are scheduled at the beginning of the call sequence, and the
5459   // register copies are flagged together to ensure that no other
5460   // operations can be scheduled in between. E.g. without flagging the
5461   // copies together, a TOC access in the caller could be scheduled between
5462   // the assignment of the callee TOC and the branch to the callee, which leads
5463   // to incorrect code.
5464 
5465   // Start by loading the function address from the descriptor.
5466   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5467   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5468                       ? (MachineMemOperand::MODereferenceable |
5469                          MachineMemOperand::MOInvariant)
5470                       : MachineMemOperand::MONone;
5471 
5472   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5473 
5474   // Registers used in building the DAG.
5475   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5476   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5477 
5478   // Offsets of descriptor members.
5479   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5480   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5481 
5482   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5483   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5484 
5485   // One load for the functions entry point address.
5486   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5487                                     Alignment, MMOFlags);
5488 
5489   // One for loading the TOC anchor for the module that contains the called
5490   // function.
5491   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5492   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5493   SDValue TOCPtr =
5494       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5495                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5496 
5497   // One for loading the environment pointer.
5498   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5499   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5500   SDValue LoadEnvPtr =
5501       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5502                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5503 
5504 
5505   // Then copy the newly loaded TOC anchor to the TOC pointer.
5506   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5507   Chain = TOCVal.getValue(0);
5508   Glue = TOCVal.getValue(1);
5509 
5510   // If the function call has an explicit 'nest' parameter, it takes the
5511   // place of the environment pointer.
5512   assert((!hasNest || !Subtarget.isAIXABI()) &&
5513          "Nest parameter is not supported on AIX.");
5514   if (!hasNest) {
5515     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5516     Chain = EnvVal.getValue(0);
5517     Glue = EnvVal.getValue(1);
5518   }
5519 
5520   // The rest of the indirect call sequence is the same as the non-descriptor
5521   // DAG.
5522   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5523 }
5524 
5525 static void
5526 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5527                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5528                   SelectionDAG &DAG,
5529                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5530                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5531                   const PPCSubtarget &Subtarget) {
5532   const bool IsPPC64 = Subtarget.isPPC64();
5533   // MVT for a general purpose register.
5534   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5535 
5536   // First operand is always the chain.
5537   Ops.push_back(Chain);
5538 
5539   // If it's a direct call pass the callee as the second operand.
5540   if (!CFlags.IsIndirect)
5541     Ops.push_back(Callee);
5542   else {
5543     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5544 
5545     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5546     // on the stack (this would have been done in `LowerCall_64SVR4` or
5547     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5548     // represents both the indirect branch and a load that restores the TOC
5549     // pointer from the linkage area. The operand for the TOC restore is an add
5550     // of the TOC save offset to the stack pointer. This must be the second
5551     // operand: after the chain input but before any other variadic arguments.
5552     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5553     // saved or used.
5554     if (isTOCSaveRestoreRequired(Subtarget)) {
5555       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5556 
5557       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5558       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5559       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5560       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5561       Ops.push_back(AddTOC);
5562     }
5563 
5564     // Add the register used for the environment pointer.
5565     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5566       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5567                                     RegVT));
5568 
5569 
5570     // Add CTR register as callee so a bctr can be emitted later.
5571     if (CFlags.IsTailCall)
5572       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5573   }
5574 
5575   // If this is a tail call add stack pointer delta.
5576   if (CFlags.IsTailCall)
5577     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5578 
5579   // Add argument registers to the end of the list so that they are known live
5580   // into the call.
5581   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5582     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5583                                   RegsToPass[i].second.getValueType()));
5584 
5585   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5586   // no way to mark dependencies as implicit here.
5587   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5588   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5589        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5590     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5591 
5592   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5593   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5594     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5595 
5596   // Add a register mask operand representing the call-preserved registers.
5597   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5598   const uint32_t *Mask =
5599       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5600   assert(Mask && "Missing call preserved mask for calling convention");
5601   Ops.push_back(DAG.getRegisterMask(Mask));
5602 
5603   // If the glue is valid, it is the last operand.
5604   if (Glue.getNode())
5605     Ops.push_back(Glue);
5606 }
5607 
5608 SDValue PPCTargetLowering::FinishCall(
5609     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5610     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5611     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5612     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5613     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5614 
5615   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5616       Subtarget.isAIXABI())
5617     setUsesTOCBasePtr(DAG);
5618 
5619   unsigned CallOpc =
5620       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5621                     Subtarget, DAG.getTarget());
5622 
5623   if (!CFlags.IsIndirect)
5624     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5625   else if (Subtarget.usesFunctionDescriptors())
5626     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5627                                   dl, CFlags.HasNest, Subtarget);
5628   else
5629     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5630 
5631   // Build the operand list for the call instruction.
5632   SmallVector<SDValue, 8> Ops;
5633   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5634                     SPDiff, Subtarget);
5635 
5636   // Emit tail call.
5637   if (CFlags.IsTailCall) {
5638     // Indirect tail call when using PC Relative calls do not have the same
5639     // constraints.
5640     assert(((Callee.getOpcode() == ISD::Register &&
5641              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5642             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5643             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5644             isa<ConstantSDNode>(Callee) ||
5645             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5646            "Expecting a global address, external symbol, absolute value, "
5647            "register or an indirect tail call when PC Relative calls are "
5648            "used.");
5649     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5650     assert(CallOpc == PPCISD::TC_RETURN &&
5651            "Unexpected call opcode for a tail call.");
5652     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5653     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5654   }
5655 
5656   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5657   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5658   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5659   Glue = Chain.getValue(1);
5660 
5661   // When performing tail call optimization the callee pops its arguments off
5662   // the stack. Account for this here so these bytes can be pushed back on in
5663   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5664   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5665                          getTargetMachine().Options.GuaranteedTailCallOpt)
5666                             ? NumBytes
5667                             : 0;
5668 
5669   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5670                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5671                              Glue, dl);
5672   Glue = Chain.getValue(1);
5673 
5674   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5675                          DAG, InVals);
5676 }
5677 
5678 SDValue
5679 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5680                              SmallVectorImpl<SDValue> &InVals) const {
5681   SelectionDAG &DAG                     = CLI.DAG;
5682   SDLoc &dl                             = CLI.DL;
5683   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5684   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5685   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5686   SDValue Chain                         = CLI.Chain;
5687   SDValue Callee                        = CLI.Callee;
5688   bool &isTailCall                      = CLI.IsTailCall;
5689   CallingConv::ID CallConv              = CLI.CallConv;
5690   bool isVarArg                         = CLI.IsVarArg;
5691   bool isPatchPoint                     = CLI.IsPatchPoint;
5692   const CallBase *CB                    = CLI.CB;
5693 
5694   if (isTailCall) {
5695     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5696       isTailCall = false;
5697     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5698       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5699           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5700     else
5701       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5702                                                      Ins, DAG);
5703     if (isTailCall) {
5704       ++NumTailCalls;
5705       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5706         ++NumSiblingCalls;
5707 
5708       // PC Relative calls no longer guarantee that the callee is a Global
5709       // Address Node. The callee could be an indirect tail call in which
5710       // case the SDValue for the callee could be a load (to load the address
5711       // of a function pointer) or it may be a register copy (to move the
5712       // address of the callee from a function parameter into a virtual
5713       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5714       assert((Subtarget.isUsingPCRelativeCalls() ||
5715               isa<GlobalAddressSDNode>(Callee)) &&
5716              "Callee should be an llvm::Function object.");
5717 
5718       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5719                         << "\nTCO callee: ");
5720       LLVM_DEBUG(Callee.dump());
5721     }
5722   }
5723 
5724   if (!isTailCall && CB && CB->isMustTailCall())
5725     report_fatal_error("failed to perform tail call elimination on a call "
5726                        "site marked musttail");
5727 
5728   // When long calls (i.e. indirect calls) are always used, calls are always
5729   // made via function pointer. If we have a function name, first translate it
5730   // into a pointer.
5731   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5732       !isTailCall)
5733     Callee = LowerGlobalAddress(Callee, DAG);
5734 
5735   CallFlags CFlags(
5736       CallConv, isTailCall, isVarArg, isPatchPoint,
5737       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5738       // hasNest
5739       Subtarget.is64BitELFABI() &&
5740           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5741       CLI.NoMerge);
5742 
5743   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5744     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5745                             InVals, CB);
5746 
5747   if (Subtarget.isSVR4ABI())
5748     return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5749                             InVals, CB);
5750 
5751   if (Subtarget.isAIXABI())
5752     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5753                          InVals, CB);
5754 
5755   return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5756                           InVals, CB);
5757 }
5758 
5759 SDValue PPCTargetLowering::LowerCall_32SVR4(
5760     SDValue Chain, SDValue Callee, CallFlags CFlags,
5761     const SmallVectorImpl<ISD::OutputArg> &Outs,
5762     const SmallVectorImpl<SDValue> &OutVals,
5763     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5764     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5765     const CallBase *CB) const {
5766   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5767   // of the 32-bit SVR4 ABI stack frame layout.
5768 
5769   const CallingConv::ID CallConv = CFlags.CallConv;
5770   const bool IsVarArg = CFlags.IsVarArg;
5771   const bool IsTailCall = CFlags.IsTailCall;
5772 
5773   assert((CallConv == CallingConv::C ||
5774           CallConv == CallingConv::Cold ||
5775           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5776 
5777   const Align PtrAlign(4);
5778 
5779   MachineFunction &MF = DAG.getMachineFunction();
5780 
5781   // Mark this function as potentially containing a function that contains a
5782   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5783   // and restoring the callers stack pointer in this functions epilog. This is
5784   // done because by tail calling the called function might overwrite the value
5785   // in this function's (MF) stack pointer stack slot 0(SP).
5786   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5787       CallConv == CallingConv::Fast)
5788     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5789 
5790   // Count how many bytes are to be pushed on the stack, including the linkage
5791   // area, parameter list area and the part of the local variable space which
5792   // contains copies of aggregates which are passed by value.
5793 
5794   // Assign locations to all of the outgoing arguments.
5795   SmallVector<CCValAssign, 16> ArgLocs;
5796   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5797 
5798   // Reserve space for the linkage area on the stack.
5799   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5800                        PtrAlign);
5801   if (useSoftFloat())
5802     CCInfo.PreAnalyzeCallOperands(Outs);
5803 
5804   if (IsVarArg) {
5805     // Handle fixed and variable vector arguments differently.
5806     // Fixed vector arguments go into registers as long as registers are
5807     // available. Variable vector arguments always go into memory.
5808     unsigned NumArgs = Outs.size();
5809 
5810     for (unsigned i = 0; i != NumArgs; ++i) {
5811       MVT ArgVT = Outs[i].VT;
5812       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5813       bool Result;
5814 
5815       if (Outs[i].IsFixed) {
5816         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5817                                CCInfo);
5818       } else {
5819         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5820                                       ArgFlags, CCInfo);
5821       }
5822 
5823       if (Result) {
5824 #ifndef NDEBUG
5825         errs() << "Call operand #" << i << " has unhandled type "
5826              << EVT(ArgVT).getEVTString() << "\n";
5827 #endif
5828         llvm_unreachable(nullptr);
5829       }
5830     }
5831   } else {
5832     // All arguments are treated the same.
5833     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5834   }
5835   CCInfo.clearWasPPCF128();
5836 
5837   // Assign locations to all of the outgoing aggregate by value arguments.
5838   SmallVector<CCValAssign, 16> ByValArgLocs;
5839   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5840 
5841   // Reserve stack space for the allocations in CCInfo.
5842   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5843 
5844   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5845 
5846   // Size of the linkage area, parameter list area and the part of the local
5847   // space variable where copies of aggregates which are passed by value are
5848   // stored.
5849   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5850 
5851   // Calculate by how many bytes the stack has to be adjusted in case of tail
5852   // call optimization.
5853   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5854 
5855   // Adjust the stack pointer for the new arguments...
5856   // These operations are automatically eliminated by the prolog/epilog pass
5857   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5858   SDValue CallSeqStart = Chain;
5859 
5860   // Load the return address and frame pointer so it can be moved somewhere else
5861   // later.
5862   SDValue LROp, FPOp;
5863   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5864 
5865   // Set up a copy of the stack pointer for use loading and storing any
5866   // arguments that may not fit in the registers available for argument
5867   // passing.
5868   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5869 
5870   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5871   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5872   SmallVector<SDValue, 8> MemOpChains;
5873 
5874   bool seenFloatArg = false;
5875   // Walk the register/memloc assignments, inserting copies/loads.
5876   // i - Tracks the index into the list of registers allocated for the call
5877   // RealArgIdx - Tracks the index into the list of actual function arguments
5878   // j - Tracks the index into the list of byval arguments
5879   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5880        i != e;
5881        ++i, ++RealArgIdx) {
5882     CCValAssign &VA = ArgLocs[i];
5883     SDValue Arg = OutVals[RealArgIdx];
5884     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5885 
5886     if (Flags.isByVal()) {
5887       // Argument is an aggregate which is passed by value, thus we need to
5888       // create a copy of it in the local variable space of the current stack
5889       // frame (which is the stack frame of the caller) and pass the address of
5890       // this copy to the callee.
5891       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5892       CCValAssign &ByValVA = ByValArgLocs[j++];
5893       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5894 
5895       // Memory reserved in the local variable space of the callers stack frame.
5896       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5897 
5898       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5899       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5900                            StackPtr, PtrOff);
5901 
5902       // Create a copy of the argument in the local area of the current
5903       // stack frame.
5904       SDValue MemcpyCall =
5905         CreateCopyOfByValArgument(Arg, PtrOff,
5906                                   CallSeqStart.getNode()->getOperand(0),
5907                                   Flags, DAG, dl);
5908 
5909       // This must go outside the CALLSEQ_START..END.
5910       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5911                                                      SDLoc(MemcpyCall));
5912       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5913                              NewCallSeqStart.getNode());
5914       Chain = CallSeqStart = NewCallSeqStart;
5915 
5916       // Pass the address of the aggregate copy on the stack either in a
5917       // physical register or in the parameter list area of the current stack
5918       // frame to the callee.
5919       Arg = PtrOff;
5920     }
5921 
5922     // When useCRBits() is true, there can be i1 arguments.
5923     // It is because getRegisterType(MVT::i1) => MVT::i1,
5924     // and for other integer types getRegisterType() => MVT::i32.
5925     // Extend i1 and ensure callee will get i32.
5926     if (Arg.getValueType() == MVT::i1)
5927       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5928                         dl, MVT::i32, Arg);
5929 
5930     if (VA.isRegLoc()) {
5931       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5932       // Put argument in a physical register.
5933       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5934         bool IsLE = Subtarget.isLittleEndian();
5935         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5936                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5937         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5938         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5939                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5940         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5941                              SVal.getValue(0)));
5942       } else
5943         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5944     } else {
5945       // Put argument in the parameter list area of the current stack frame.
5946       assert(VA.isMemLoc());
5947       unsigned LocMemOffset = VA.getLocMemOffset();
5948 
5949       if (!IsTailCall) {
5950         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5951         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5952                              StackPtr, PtrOff);
5953 
5954         MemOpChains.push_back(
5955             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5956       } else {
5957         // Calculate and remember argument location.
5958         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5959                                  TailCallArguments);
5960       }
5961     }
5962   }
5963 
5964   if (!MemOpChains.empty())
5965     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5966 
5967   // Build a sequence of copy-to-reg nodes chained together with token chain
5968   // and flag operands which copy the outgoing args into the appropriate regs.
5969   SDValue InFlag;
5970   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5971     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5972                              RegsToPass[i].second, InFlag);
5973     InFlag = Chain.getValue(1);
5974   }
5975 
5976   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5977   // registers.
5978   if (IsVarArg) {
5979     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5980     SDValue Ops[] = { Chain, InFlag };
5981 
5982     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5983                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5984 
5985     InFlag = Chain.getValue(1);
5986   }
5987 
5988   if (IsTailCall)
5989     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5990                     TailCallArguments);
5991 
5992   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5993                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5994 }
5995 
5996 // Copy an argument into memory, being careful to do this outside the
5997 // call sequence for the call to which the argument belongs.
5998 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5999     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
6000     SelectionDAG &DAG, const SDLoc &dl) const {
6001   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
6002                         CallSeqStart.getNode()->getOperand(0),
6003                         Flags, DAG, dl);
6004   // The MEMCPY must go outside the CALLSEQ_START..END.
6005   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
6006   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
6007                                                  SDLoc(MemcpyCall));
6008   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
6009                          NewCallSeqStart.getNode());
6010   return NewCallSeqStart;
6011 }
6012 
6013 SDValue PPCTargetLowering::LowerCall_64SVR4(
6014     SDValue Chain, SDValue Callee, CallFlags CFlags,
6015     const SmallVectorImpl<ISD::OutputArg> &Outs,
6016     const SmallVectorImpl<SDValue> &OutVals,
6017     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6018     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6019     const CallBase *CB) const {
6020   bool isELFv2ABI = Subtarget.isELFv2ABI();
6021   bool isLittleEndian = Subtarget.isLittleEndian();
6022   unsigned NumOps = Outs.size();
6023   bool IsSibCall = false;
6024   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
6025 
6026   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6027   unsigned PtrByteSize = 8;
6028 
6029   MachineFunction &MF = DAG.getMachineFunction();
6030 
6031   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
6032     IsSibCall = true;
6033 
6034   // Mark this function as potentially containing a function that contains a
6035   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6036   // and restoring the callers stack pointer in this functions epilog. This is
6037   // done because by tail calling the called function might overwrite the value
6038   // in this function's (MF) stack pointer stack slot 0(SP).
6039   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6040     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6041 
6042   assert(!(IsFastCall && CFlags.IsVarArg) &&
6043          "fastcc not supported on varargs functions");
6044 
6045   // Count how many bytes are to be pushed on the stack, including the linkage
6046   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
6047   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
6048   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
6049   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6050   unsigned NumBytes = LinkageSize;
6051   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6052   unsigned &QFPR_idx = FPR_idx;
6053 
6054   static const MCPhysReg GPR[] = {
6055     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6056     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6057   };
6058   static const MCPhysReg VR[] = {
6059     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6060     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6061   };
6062 
6063   const unsigned NumGPRs = array_lengthof(GPR);
6064   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
6065   const unsigned NumVRs  = array_lengthof(VR);
6066   const unsigned NumQFPRs = NumFPRs;
6067 
6068   // On ELFv2, we can avoid allocating the parameter area if all the arguments
6069   // can be passed to the callee in registers.
6070   // For the fast calling convention, there is another check below.
6071   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
6072   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
6073   if (!HasParameterArea) {
6074     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
6075     unsigned AvailableFPRs = NumFPRs;
6076     unsigned AvailableVRs = NumVRs;
6077     unsigned NumBytesTmp = NumBytes;
6078     for (unsigned i = 0; i != NumOps; ++i) {
6079       if (Outs[i].Flags.isNest()) continue;
6080       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
6081                                 PtrByteSize, LinkageSize, ParamAreaSize,
6082                                 NumBytesTmp, AvailableFPRs, AvailableVRs,
6083                                 Subtarget.hasQPX()))
6084         HasParameterArea = true;
6085     }
6086   }
6087 
6088   // When using the fast calling convention, we don't provide backing for
6089   // arguments that will be in registers.
6090   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
6091 
6092   // Avoid allocating parameter area for fastcc functions if all the arguments
6093   // can be passed in the registers.
6094   if (IsFastCall)
6095     HasParameterArea = false;
6096 
6097   // Add up all the space actually used.
6098   for (unsigned i = 0; i != NumOps; ++i) {
6099     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6100     EVT ArgVT = Outs[i].VT;
6101     EVT OrigVT = Outs[i].ArgVT;
6102 
6103     if (Flags.isNest())
6104       continue;
6105 
6106     if (IsFastCall) {
6107       if (Flags.isByVal()) {
6108         NumGPRsUsed += (Flags.getByValSize()+7)/8;
6109         if (NumGPRsUsed > NumGPRs)
6110           HasParameterArea = true;
6111       } else {
6112         switch (ArgVT.getSimpleVT().SimpleTy) {
6113         default: llvm_unreachable("Unexpected ValueType for argument!");
6114         case MVT::i1:
6115         case MVT::i32:
6116         case MVT::i64:
6117           if (++NumGPRsUsed <= NumGPRs)
6118             continue;
6119           break;
6120         case MVT::v4i32:
6121         case MVT::v8i16:
6122         case MVT::v16i8:
6123         case MVT::v2f64:
6124         case MVT::v2i64:
6125         case MVT::v1i128:
6126         case MVT::f128:
6127           if (++NumVRsUsed <= NumVRs)
6128             continue;
6129           break;
6130         case MVT::v4f32:
6131           // When using QPX, this is handled like a FP register, otherwise, it
6132           // is an Altivec register.
6133           if (Subtarget.hasQPX()) {
6134             if (++NumFPRsUsed <= NumFPRs)
6135               continue;
6136           } else {
6137             if (++NumVRsUsed <= NumVRs)
6138               continue;
6139           }
6140           break;
6141         case MVT::f32:
6142         case MVT::f64:
6143         case MVT::v4f64: // QPX
6144         case MVT::v4i1:  // QPX
6145           if (++NumFPRsUsed <= NumFPRs)
6146             continue;
6147           break;
6148         }
6149         HasParameterArea = true;
6150       }
6151     }
6152 
6153     /* Respect alignment of argument on the stack.  */
6154     auto Alignement =
6155         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6156     NumBytes = alignTo(NumBytes, Alignement);
6157 
6158     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6159     if (Flags.isInConsecutiveRegsLast())
6160       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6161   }
6162 
6163   unsigned NumBytesActuallyUsed = NumBytes;
6164 
6165   // In the old ELFv1 ABI,
6166   // the prolog code of the callee may store up to 8 GPR argument registers to
6167   // the stack, allowing va_start to index over them in memory if its varargs.
6168   // Because we cannot tell if this is needed on the caller side, we have to
6169   // conservatively assume that it is needed.  As such, make sure we have at
6170   // least enough stack space for the caller to store the 8 GPRs.
6171   // In the ELFv2 ABI, we allocate the parameter area iff a callee
6172   // really requires memory operands, e.g. a vararg function.
6173   if (HasParameterArea)
6174     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6175   else
6176     NumBytes = LinkageSize;
6177 
6178   // Tail call needs the stack to be aligned.
6179   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6180     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6181 
6182   int SPDiff = 0;
6183 
6184   // Calculate by how many bytes the stack has to be adjusted in case of tail
6185   // call optimization.
6186   if (!IsSibCall)
6187     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6188 
6189   // To protect arguments on the stack from being clobbered in a tail call,
6190   // force all the loads to happen before doing any other lowering.
6191   if (CFlags.IsTailCall)
6192     Chain = DAG.getStackArgumentTokenFactor(Chain);
6193 
6194   // Adjust the stack pointer for the new arguments...
6195   // These operations are automatically eliminated by the prolog/epilog pass
6196   if (!IsSibCall)
6197     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6198   SDValue CallSeqStart = Chain;
6199 
6200   // Load the return address and frame pointer so it can be move somewhere else
6201   // later.
6202   SDValue LROp, FPOp;
6203   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6204 
6205   // Set up a copy of the stack pointer for use loading and storing any
6206   // arguments that may not fit in the registers available for argument
6207   // passing.
6208   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6209 
6210   // Figure out which arguments are going to go in registers, and which in
6211   // memory.  Also, if this is a vararg function, floating point operations
6212   // must be stored to our stack, and loaded into integer regs as well, if
6213   // any integer regs are available for argument passing.
6214   unsigned ArgOffset = LinkageSize;
6215 
6216   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6217   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6218 
6219   SmallVector<SDValue, 8> MemOpChains;
6220   for (unsigned i = 0; i != NumOps; ++i) {
6221     SDValue Arg = OutVals[i];
6222     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6223     EVT ArgVT = Outs[i].VT;
6224     EVT OrigVT = Outs[i].ArgVT;
6225 
6226     // PtrOff will be used to store the current argument to the stack if a
6227     // register cannot be found for it.
6228     SDValue PtrOff;
6229 
6230     // We re-align the argument offset for each argument, except when using the
6231     // fast calling convention, when we need to make sure we do that only when
6232     // we'll actually use a stack slot.
6233     auto ComputePtrOff = [&]() {
6234       /* Respect alignment of argument on the stack.  */
6235       auto Alignment =
6236           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6237       ArgOffset = alignTo(ArgOffset, Alignment);
6238 
6239       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6240 
6241       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6242     };
6243 
6244     if (!IsFastCall) {
6245       ComputePtrOff();
6246 
6247       /* Compute GPR index associated with argument offset.  */
6248       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6249       GPR_idx = std::min(GPR_idx, NumGPRs);
6250     }
6251 
6252     // Promote integers to 64-bit values.
6253     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6254       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6255       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6256       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6257     }
6258 
6259     // FIXME memcpy is used way more than necessary.  Correctness first.
6260     // Note: "by value" is code for passing a structure by value, not
6261     // basic types.
6262     if (Flags.isByVal()) {
6263       // Note: Size includes alignment padding, so
6264       //   struct x { short a; char b; }
6265       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6266       // These are the proper values we need for right-justifying the
6267       // aggregate in a parameter register.
6268       unsigned Size = Flags.getByValSize();
6269 
6270       // An empty aggregate parameter takes up no storage and no
6271       // registers.
6272       if (Size == 0)
6273         continue;
6274 
6275       if (IsFastCall)
6276         ComputePtrOff();
6277 
6278       // All aggregates smaller than 8 bytes must be passed right-justified.
6279       if (Size==1 || Size==2 || Size==4) {
6280         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6281         if (GPR_idx != NumGPRs) {
6282           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6283                                         MachinePointerInfo(), VT);
6284           MemOpChains.push_back(Load.getValue(1));
6285           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6286 
6287           ArgOffset += PtrByteSize;
6288           continue;
6289         }
6290       }
6291 
6292       if (GPR_idx == NumGPRs && Size < 8) {
6293         SDValue AddPtr = PtrOff;
6294         if (!isLittleEndian) {
6295           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6296                                           PtrOff.getValueType());
6297           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6298         }
6299         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6300                                                           CallSeqStart,
6301                                                           Flags, DAG, dl);
6302         ArgOffset += PtrByteSize;
6303         continue;
6304       }
6305       // Copy entire object into memory.  There are cases where gcc-generated
6306       // code assumes it is there, even if it could be put entirely into
6307       // registers.  (This is not what the doc says.)
6308 
6309       // FIXME: The above statement is likely due to a misunderstanding of the
6310       // documents.  All arguments must be copied into the parameter area BY
6311       // THE CALLEE in the event that the callee takes the address of any
6312       // formal argument.  That has not yet been implemented.  However, it is
6313       // reasonable to use the stack area as a staging area for the register
6314       // load.
6315 
6316       // Skip this for small aggregates, as we will use the same slot for a
6317       // right-justified copy, below.
6318       if (Size >= 8)
6319         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6320                                                           CallSeqStart,
6321                                                           Flags, DAG, dl);
6322 
6323       // When a register is available, pass a small aggregate right-justified.
6324       if (Size < 8 && GPR_idx != NumGPRs) {
6325         // The easiest way to get this right-justified in a register
6326         // is to copy the structure into the rightmost portion of a
6327         // local variable slot, then load the whole slot into the
6328         // register.
6329         // FIXME: The memcpy seems to produce pretty awful code for
6330         // small aggregates, particularly for packed ones.
6331         // FIXME: It would be preferable to use the slot in the
6332         // parameter save area instead of a new local variable.
6333         SDValue AddPtr = PtrOff;
6334         if (!isLittleEndian) {
6335           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6336           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6337         }
6338         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6339                                                           CallSeqStart,
6340                                                           Flags, DAG, dl);
6341 
6342         // Load the slot into the register.
6343         SDValue Load =
6344             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6345         MemOpChains.push_back(Load.getValue(1));
6346         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6347 
6348         // Done with this argument.
6349         ArgOffset += PtrByteSize;
6350         continue;
6351       }
6352 
6353       // For aggregates larger than PtrByteSize, copy the pieces of the
6354       // object that fit into registers from the parameter save area.
6355       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6356         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6357         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6358         if (GPR_idx != NumGPRs) {
6359           SDValue Load =
6360               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6361           MemOpChains.push_back(Load.getValue(1));
6362           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6363           ArgOffset += PtrByteSize;
6364         } else {
6365           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6366           break;
6367         }
6368       }
6369       continue;
6370     }
6371 
6372     switch (Arg.getSimpleValueType().SimpleTy) {
6373     default: llvm_unreachable("Unexpected ValueType for argument!");
6374     case MVT::i1:
6375     case MVT::i32:
6376     case MVT::i64:
6377       if (Flags.isNest()) {
6378         // The 'nest' parameter, if any, is passed in R11.
6379         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6380         break;
6381       }
6382 
6383       // These can be scalar arguments or elements of an integer array type
6384       // passed directly.  Clang may use those instead of "byval" aggregate
6385       // types to avoid forcing arguments to memory unnecessarily.
6386       if (GPR_idx != NumGPRs) {
6387         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6388       } else {
6389         if (IsFastCall)
6390           ComputePtrOff();
6391 
6392         assert(HasParameterArea &&
6393                "Parameter area must exist to pass an argument in memory.");
6394         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6395                          true, CFlags.IsTailCall, false, MemOpChains,
6396                          TailCallArguments, dl);
6397         if (IsFastCall)
6398           ArgOffset += PtrByteSize;
6399       }
6400       if (!IsFastCall)
6401         ArgOffset += PtrByteSize;
6402       break;
6403     case MVT::f32:
6404     case MVT::f64: {
6405       // These can be scalar arguments or elements of a float array type
6406       // passed directly.  The latter are used to implement ELFv2 homogenous
6407       // float aggregates.
6408 
6409       // Named arguments go into FPRs first, and once they overflow, the
6410       // remaining arguments go into GPRs and then the parameter save area.
6411       // Unnamed arguments for vararg functions always go to GPRs and
6412       // then the parameter save area.  For now, put all arguments to vararg
6413       // routines always in both locations (FPR *and* GPR or stack slot).
6414       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6415       bool NeededLoad = false;
6416 
6417       // First load the argument into the next available FPR.
6418       if (FPR_idx != NumFPRs)
6419         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6420 
6421       // Next, load the argument into GPR or stack slot if needed.
6422       if (!NeedGPROrStack)
6423         ;
6424       else if (GPR_idx != NumGPRs && !IsFastCall) {
6425         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6426         // once we support fp <-> gpr moves.
6427 
6428         // In the non-vararg case, this can only ever happen in the
6429         // presence of f32 array types, since otherwise we never run
6430         // out of FPRs before running out of GPRs.
6431         SDValue ArgVal;
6432 
6433         // Double values are always passed in a single GPR.
6434         if (Arg.getValueType() != MVT::f32) {
6435           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6436 
6437         // Non-array float values are extended and passed in a GPR.
6438         } else if (!Flags.isInConsecutiveRegs()) {
6439           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6440           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6441 
6442         // If we have an array of floats, we collect every odd element
6443         // together with its predecessor into one GPR.
6444         } else if (ArgOffset % PtrByteSize != 0) {
6445           SDValue Lo, Hi;
6446           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6447           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6448           if (!isLittleEndian)
6449             std::swap(Lo, Hi);
6450           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6451 
6452         // The final element, if even, goes into the first half of a GPR.
6453         } else if (Flags.isInConsecutiveRegsLast()) {
6454           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6455           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6456           if (!isLittleEndian)
6457             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6458                                  DAG.getConstant(32, dl, MVT::i32));
6459 
6460         // Non-final even elements are skipped; they will be handled
6461         // together the with subsequent argument on the next go-around.
6462         } else
6463           ArgVal = SDValue();
6464 
6465         if (ArgVal.getNode())
6466           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6467       } else {
6468         if (IsFastCall)
6469           ComputePtrOff();
6470 
6471         // Single-precision floating-point values are mapped to the
6472         // second (rightmost) word of the stack doubleword.
6473         if (Arg.getValueType() == MVT::f32 &&
6474             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6475           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6476           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6477         }
6478 
6479         assert(HasParameterArea &&
6480                "Parameter area must exist to pass an argument in memory.");
6481         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6482                          true, CFlags.IsTailCall, false, MemOpChains,
6483                          TailCallArguments, dl);
6484 
6485         NeededLoad = true;
6486       }
6487       // When passing an array of floats, the array occupies consecutive
6488       // space in the argument area; only round up to the next doubleword
6489       // at the end of the array.  Otherwise, each float takes 8 bytes.
6490       if (!IsFastCall || NeededLoad) {
6491         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6492                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6493         if (Flags.isInConsecutiveRegsLast())
6494           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6495       }
6496       break;
6497     }
6498     case MVT::v4f32:
6499     case MVT::v4i32:
6500     case MVT::v8i16:
6501     case MVT::v16i8:
6502     case MVT::v2f64:
6503     case MVT::v2i64:
6504     case MVT::v1i128:
6505     case MVT::f128:
6506       if (!Subtarget.hasQPX()) {
6507       // These can be scalar arguments or elements of a vector array type
6508       // passed directly.  The latter are used to implement ELFv2 homogenous
6509       // vector aggregates.
6510 
6511       // For a varargs call, named arguments go into VRs or on the stack as
6512       // usual; unnamed arguments always go to the stack or the corresponding
6513       // GPRs when within range.  For now, we always put the value in both
6514       // locations (or even all three).
6515       if (CFlags.IsVarArg) {
6516         assert(HasParameterArea &&
6517                "Parameter area must exist if we have a varargs call.");
6518         // We could elide this store in the case where the object fits
6519         // entirely in R registers.  Maybe later.
6520         SDValue Store =
6521             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6522         MemOpChains.push_back(Store);
6523         if (VR_idx != NumVRs) {
6524           SDValue Load =
6525               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6526           MemOpChains.push_back(Load.getValue(1));
6527           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6528         }
6529         ArgOffset += 16;
6530         for (unsigned i=0; i<16; i+=PtrByteSize) {
6531           if (GPR_idx == NumGPRs)
6532             break;
6533           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6534                                    DAG.getConstant(i, dl, PtrVT));
6535           SDValue Load =
6536               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6537           MemOpChains.push_back(Load.getValue(1));
6538           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6539         }
6540         break;
6541       }
6542 
6543       // Non-varargs Altivec params go into VRs or on the stack.
6544       if (VR_idx != NumVRs) {
6545         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6546       } else {
6547         if (IsFastCall)
6548           ComputePtrOff();
6549 
6550         assert(HasParameterArea &&
6551                "Parameter area must exist to pass an argument in memory.");
6552         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6553                          true, CFlags.IsTailCall, true, MemOpChains,
6554                          TailCallArguments, dl);
6555         if (IsFastCall)
6556           ArgOffset += 16;
6557       }
6558 
6559       if (!IsFastCall)
6560         ArgOffset += 16;
6561       break;
6562       } // not QPX
6563 
6564       assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6565              "Invalid QPX parameter type");
6566 
6567       LLVM_FALLTHROUGH;
6568     case MVT::v4f64:
6569     case MVT::v4i1: {
6570       bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6571       if (CFlags.IsVarArg) {
6572         assert(HasParameterArea &&
6573                "Parameter area must exist if we have a varargs call.");
6574         // We could elide this store in the case where the object fits
6575         // entirely in R registers.  Maybe later.
6576         SDValue Store =
6577             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6578         MemOpChains.push_back(Store);
6579         if (QFPR_idx != NumQFPRs) {
6580           SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6581                                      PtrOff, MachinePointerInfo());
6582           MemOpChains.push_back(Load.getValue(1));
6583           RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6584         }
6585         ArgOffset += (IsF32 ? 16 : 32);
6586         for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6587           if (GPR_idx == NumGPRs)
6588             break;
6589           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6590                                    DAG.getConstant(i, dl, PtrVT));
6591           SDValue Load =
6592               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6593           MemOpChains.push_back(Load.getValue(1));
6594           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6595         }
6596         break;
6597       }
6598 
6599       // Non-varargs QPX params go into registers or on the stack.
6600       if (QFPR_idx != NumQFPRs) {
6601         RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6602       } else {
6603         if (IsFastCall)
6604           ComputePtrOff();
6605 
6606         assert(HasParameterArea &&
6607                "Parameter area must exist to pass an argument in memory.");
6608         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6609                          true, CFlags.IsTailCall, true, MemOpChains,
6610                          TailCallArguments, dl);
6611         if (IsFastCall)
6612           ArgOffset += (IsF32 ? 16 : 32);
6613       }
6614 
6615       if (!IsFastCall)
6616         ArgOffset += (IsF32 ? 16 : 32);
6617       break;
6618       }
6619     }
6620   }
6621 
6622   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6623          "mismatch in size of parameter area");
6624   (void)NumBytesActuallyUsed;
6625 
6626   if (!MemOpChains.empty())
6627     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6628 
6629   // Check if this is an indirect call (MTCTR/BCTRL).
6630   // See prepareDescriptorIndirectCall and buildCallOperands for more
6631   // information about calls through function pointers in the 64-bit SVR4 ABI.
6632   if (CFlags.IsIndirect) {
6633     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6634     // caller in the TOC save area.
6635     if (isTOCSaveRestoreRequired(Subtarget)) {
6636       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6637       // Load r2 into a virtual register and store it to the TOC save area.
6638       setUsesTOCBasePtr(DAG);
6639       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6640       // TOC save area offset.
6641       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6642       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6643       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6644       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6645                            MachinePointerInfo::getStack(
6646                                DAG.getMachineFunction(), TOCSaveOffset));
6647     }
6648     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6649     // This does not mean the MTCTR instruction must use R12; it's easier
6650     // to model this as an extra parameter, so do that.
6651     if (isELFv2ABI && !CFlags.IsPatchPoint)
6652       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6653   }
6654 
6655   // Build a sequence of copy-to-reg nodes chained together with token chain
6656   // and flag operands which copy the outgoing args into the appropriate regs.
6657   SDValue InFlag;
6658   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6659     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6660                              RegsToPass[i].second, InFlag);
6661     InFlag = Chain.getValue(1);
6662   }
6663 
6664   if (CFlags.IsTailCall && !IsSibCall)
6665     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6666                     TailCallArguments);
6667 
6668   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6669                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6670 }
6671 
6672 SDValue PPCTargetLowering::LowerCall_Darwin(
6673     SDValue Chain, SDValue Callee, CallFlags CFlags,
6674     const SmallVectorImpl<ISD::OutputArg> &Outs,
6675     const SmallVectorImpl<SDValue> &OutVals,
6676     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6677     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6678     const CallBase *CB) const {
6679   unsigned NumOps = Outs.size();
6680 
6681   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6682   bool isPPC64 = PtrVT == MVT::i64;
6683   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6684 
6685   MachineFunction &MF = DAG.getMachineFunction();
6686 
6687   // Mark this function as potentially containing a function that contains a
6688   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6689   // and restoring the callers stack pointer in this functions epilog. This is
6690   // done because by tail calling the called function might overwrite the value
6691   // in this function's (MF) stack pointer stack slot 0(SP).
6692   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6693       CFlags.CallConv == CallingConv::Fast)
6694     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6695 
6696   // Count how many bytes are to be pushed on the stack, including the linkage
6697   // area, and parameter passing area.  We start with 24/48 bytes, which is
6698   // prereserved space for [SP][CR][LR][3 x unused].
6699   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6700   unsigned NumBytes = LinkageSize;
6701 
6702   // Add up all the space actually used.
6703   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6704   // they all go in registers, but we must reserve stack space for them for
6705   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6706   // assigned stack space in order, with padding so Altivec parameters are
6707   // 16-byte aligned.
6708   unsigned nAltivecParamsAtEnd = 0;
6709   for (unsigned i = 0; i != NumOps; ++i) {
6710     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6711     EVT ArgVT = Outs[i].VT;
6712     // Varargs Altivec parameters are padded to a 16 byte boundary.
6713     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6714         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6715         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6716       if (!CFlags.IsVarArg && !isPPC64) {
6717         // Non-varargs Altivec parameters go after all the non-Altivec
6718         // parameters; handle those later so we know how much padding we need.
6719         nAltivecParamsAtEnd++;
6720         continue;
6721       }
6722       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6723       NumBytes = ((NumBytes+15)/16)*16;
6724     }
6725     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6726   }
6727 
6728   // Allow for Altivec parameters at the end, if needed.
6729   if (nAltivecParamsAtEnd) {
6730     NumBytes = ((NumBytes+15)/16)*16;
6731     NumBytes += 16*nAltivecParamsAtEnd;
6732   }
6733 
6734   // The prolog code of the callee may store up to 8 GPR argument registers to
6735   // the stack, allowing va_start to index over them in memory if its varargs.
6736   // Because we cannot tell if this is needed on the caller side, we have to
6737   // conservatively assume that it is needed.  As such, make sure we have at
6738   // least enough stack space for the caller to store the 8 GPRs.
6739   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6740 
6741   // Tail call needs the stack to be aligned.
6742   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6743       CFlags.CallConv == CallingConv::Fast)
6744     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6745 
6746   // Calculate by how many bytes the stack has to be adjusted in case of tail
6747   // call optimization.
6748   int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6749 
6750   // To protect arguments on the stack from being clobbered in a tail call,
6751   // force all the loads to happen before doing any other lowering.
6752   if (CFlags.IsTailCall)
6753     Chain = DAG.getStackArgumentTokenFactor(Chain);
6754 
6755   // Adjust the stack pointer for the new arguments...
6756   // These operations are automatically eliminated by the prolog/epilog pass
6757   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6758   SDValue CallSeqStart = Chain;
6759 
6760   // Load the return address and frame pointer so it can be move somewhere else
6761   // later.
6762   SDValue LROp, FPOp;
6763   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6764 
6765   // Set up a copy of the stack pointer for use loading and storing any
6766   // arguments that may not fit in the registers available for argument
6767   // passing.
6768   SDValue StackPtr;
6769   if (isPPC64)
6770     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6771   else
6772     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6773 
6774   // Figure out which arguments are going to go in registers, and which in
6775   // memory.  Also, if this is a vararg function, floating point operations
6776   // must be stored to our stack, and loaded into integer regs as well, if
6777   // any integer regs are available for argument passing.
6778   unsigned ArgOffset = LinkageSize;
6779   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6780 
6781   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6782     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6783     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6784   };
6785   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6786     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6787     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6788   };
6789   static const MCPhysReg VR[] = {
6790     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6791     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6792   };
6793   const unsigned NumGPRs = array_lengthof(GPR_32);
6794   const unsigned NumFPRs = 13;
6795   const unsigned NumVRs  = array_lengthof(VR);
6796 
6797   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6798 
6799   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6800   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6801 
6802   SmallVector<SDValue, 8> MemOpChains;
6803   for (unsigned i = 0; i != NumOps; ++i) {
6804     SDValue Arg = OutVals[i];
6805     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6806 
6807     // PtrOff will be used to store the current argument to the stack if a
6808     // register cannot be found for it.
6809     SDValue PtrOff;
6810 
6811     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6812 
6813     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6814 
6815     // On PPC64, promote integers to 64-bit values.
6816     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6817       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6818       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6819       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6820     }
6821 
6822     // FIXME memcpy is used way more than necessary.  Correctness first.
6823     // Note: "by value" is code for passing a structure by value, not
6824     // basic types.
6825     if (Flags.isByVal()) {
6826       unsigned Size = Flags.getByValSize();
6827       // Very small objects are passed right-justified.  Everything else is
6828       // passed left-justified.
6829       if (Size==1 || Size==2) {
6830         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6831         if (GPR_idx != NumGPRs) {
6832           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6833                                         MachinePointerInfo(), VT);
6834           MemOpChains.push_back(Load.getValue(1));
6835           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6836 
6837           ArgOffset += PtrByteSize;
6838         } else {
6839           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6840                                           PtrOff.getValueType());
6841           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6842           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6843                                                             CallSeqStart,
6844                                                             Flags, DAG, dl);
6845           ArgOffset += PtrByteSize;
6846         }
6847         continue;
6848       }
6849       // Copy entire object into memory.  There are cases where gcc-generated
6850       // code assumes it is there, even if it could be put entirely into
6851       // registers.  (This is not what the doc says.)
6852       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6853                                                         CallSeqStart,
6854                                                         Flags, DAG, dl);
6855 
6856       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6857       // copy the pieces of the object that fit into registers from the
6858       // parameter save area.
6859       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6860         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6861         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6862         if (GPR_idx != NumGPRs) {
6863           SDValue Load =
6864               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6865           MemOpChains.push_back(Load.getValue(1));
6866           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6867           ArgOffset += PtrByteSize;
6868         } else {
6869           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6870           break;
6871         }
6872       }
6873       continue;
6874     }
6875 
6876     switch (Arg.getSimpleValueType().SimpleTy) {
6877     default: llvm_unreachable("Unexpected ValueType for argument!");
6878     case MVT::i1:
6879     case MVT::i32:
6880     case MVT::i64:
6881       if (GPR_idx != NumGPRs) {
6882         if (Arg.getValueType() == MVT::i1)
6883           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6884 
6885         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6886       } else {
6887         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6888                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6889                          TailCallArguments, dl);
6890       }
6891       ArgOffset += PtrByteSize;
6892       break;
6893     case MVT::f32:
6894     case MVT::f64:
6895       if (FPR_idx != NumFPRs) {
6896         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6897 
6898         if (CFlags.IsVarArg) {
6899           SDValue Store =
6900               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6901           MemOpChains.push_back(Store);
6902 
6903           // Float varargs are always shadowed in available integer registers
6904           if (GPR_idx != NumGPRs) {
6905             SDValue Load =
6906                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6907             MemOpChains.push_back(Load.getValue(1));
6908             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6909           }
6910           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6911             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6912             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6913             SDValue Load =
6914                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6915             MemOpChains.push_back(Load.getValue(1));
6916             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6917           }
6918         } else {
6919           // If we have any FPRs remaining, we may also have GPRs remaining.
6920           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6921           // GPRs.
6922           if (GPR_idx != NumGPRs)
6923             ++GPR_idx;
6924           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6925               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6926             ++GPR_idx;
6927         }
6928       } else
6929         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6930                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6931                          TailCallArguments, dl);
6932       if (isPPC64)
6933         ArgOffset += 8;
6934       else
6935         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6936       break;
6937     case MVT::v4f32:
6938     case MVT::v4i32:
6939     case MVT::v8i16:
6940     case MVT::v16i8:
6941       if (CFlags.IsVarArg) {
6942         // These go aligned on the stack, or in the corresponding R registers
6943         // when within range.  The Darwin PPC ABI doc claims they also go in
6944         // V registers; in fact gcc does this only for arguments that are
6945         // prototyped, not for those that match the ...  We do it for all
6946         // arguments, seems to work.
6947         while (ArgOffset % 16 !=0) {
6948           ArgOffset += PtrByteSize;
6949           if (GPR_idx != NumGPRs)
6950             GPR_idx++;
6951         }
6952         // We could elide this store in the case where the object fits
6953         // entirely in R registers.  Maybe later.
6954         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6955                              DAG.getConstant(ArgOffset, dl, PtrVT));
6956         SDValue Store =
6957             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6958         MemOpChains.push_back(Store);
6959         if (VR_idx != NumVRs) {
6960           SDValue Load =
6961               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6962           MemOpChains.push_back(Load.getValue(1));
6963           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6964         }
6965         ArgOffset += 16;
6966         for (unsigned i=0; i<16; i+=PtrByteSize) {
6967           if (GPR_idx == NumGPRs)
6968             break;
6969           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6970                                    DAG.getConstant(i, dl, PtrVT));
6971           SDValue Load =
6972               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6973           MemOpChains.push_back(Load.getValue(1));
6974           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6975         }
6976         break;
6977       }
6978 
6979       // Non-varargs Altivec params generally go in registers, but have
6980       // stack space allocated at the end.
6981       if (VR_idx != NumVRs) {
6982         // Doesn't have GPR space allocated.
6983         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6984       } else if (nAltivecParamsAtEnd==0) {
6985         // We are emitting Altivec params in order.
6986         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6987                          isPPC64, CFlags.IsTailCall, true, MemOpChains,
6988                          TailCallArguments, dl);
6989         ArgOffset += 16;
6990       }
6991       break;
6992     }
6993   }
6994   // If all Altivec parameters fit in registers, as they usually do,
6995   // they get stack space following the non-Altivec parameters.  We
6996   // don't track this here because nobody below needs it.
6997   // If there are more Altivec parameters than fit in registers emit
6998   // the stores here.
6999   if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
7000     unsigned j = 0;
7001     // Offset is aligned; skip 1st 12 params which go in V registers.
7002     ArgOffset = ((ArgOffset+15)/16)*16;
7003     ArgOffset += 12*16;
7004     for (unsigned i = 0; i != NumOps; ++i) {
7005       SDValue Arg = OutVals[i];
7006       EVT ArgType = Outs[i].VT;
7007       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
7008           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
7009         if (++j > NumVRs) {
7010           SDValue PtrOff;
7011           // We are emitting Altivec params in order.
7012           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
7013                            isPPC64, CFlags.IsTailCall, true, MemOpChains,
7014                            TailCallArguments, dl);
7015           ArgOffset += 16;
7016         }
7017       }
7018     }
7019   }
7020 
7021   if (!MemOpChains.empty())
7022     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7023 
7024   // On Darwin, R12 must contain the address of an indirect callee.  This does
7025   // not mean the MTCTR instruction must use R12; it's easier to model this as
7026   // an extra parameter, so do that.
7027   if (CFlags.IsIndirect) {
7028     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7029     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
7030                                                    PPC::R12), Callee));
7031   }
7032 
7033   // Build a sequence of copy-to-reg nodes chained together with token chain
7034   // and flag operands which copy the outgoing args into the appropriate regs.
7035   SDValue InFlag;
7036   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
7037     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
7038                              RegsToPass[i].second, InFlag);
7039     InFlag = Chain.getValue(1);
7040   }
7041 
7042   if (CFlags.IsTailCall)
7043     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
7044                     TailCallArguments);
7045 
7046   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7047                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7048 }
7049 
7050 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
7051                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
7052                    CCState &State) {
7053 
7054   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
7055       State.getMachineFunction().getSubtarget());
7056   const bool IsPPC64 = Subtarget.isPPC64();
7057   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
7058   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
7059 
7060   assert((!ValVT.isInteger() ||
7061           (ValVT.getSizeInBits() <= RegVT.getSizeInBits())) &&
7062          "Integer argument exceeds register size: should have been legalized");
7063 
7064   if (ValVT == MVT::f128)
7065     report_fatal_error("f128 is unimplemented on AIX.");
7066 
7067   if (ArgFlags.isNest())
7068     report_fatal_error("Nest arguments are unimplemented.");
7069 
7070   if (ValVT.isVector() || LocVT.isVector())
7071     report_fatal_error("Vector arguments are unimplemented on AIX.");
7072 
7073   static const MCPhysReg GPR_32[] = {// 32-bit registers.
7074                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7075                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7076   static const MCPhysReg GPR_64[] = {// 64-bit registers.
7077                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7078                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7079 
7080   if (ArgFlags.isByVal()) {
7081     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
7082       report_fatal_error("Pass-by-value arguments with alignment greater than "
7083                          "register width are not supported.");
7084 
7085     const unsigned ByValSize = ArgFlags.getByValSize();
7086 
7087     // An empty aggregate parameter takes up no storage and no registers,
7088     // but needs a MemLoc for a stack slot for the formal arguments side.
7089     if (ByValSize == 0) {
7090       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7091                                        State.getNextStackOffset(), RegVT,
7092                                        LocInfo));
7093       return false;
7094     }
7095 
7096     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
7097     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
7098     for (const unsigned E = Offset + StackSize; Offset < E;
7099          Offset += PtrAlign.value()) {
7100       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7101         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7102       else {
7103         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7104                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
7105                                          LocInfo));
7106         break;
7107       }
7108     }
7109     return false;
7110   }
7111 
7112   // Arguments always reserve parameter save area.
7113   switch (ValVT.SimpleTy) {
7114   default:
7115     report_fatal_error("Unhandled value type for argument.");
7116   case MVT::i64:
7117     // i64 arguments should have been split to i32 for PPC32.
7118     assert(IsPPC64 && "PPC32 should have split i64 values.");
7119     LLVM_FALLTHROUGH;
7120   case MVT::i1:
7121   case MVT::i32: {
7122     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
7123     // AIX integer arguments are always passed in register width.
7124     if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
7125       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
7126                                   : CCValAssign::LocInfo::ZExt;
7127     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7128       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7129     else
7130       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
7131 
7132     return false;
7133   }
7134   case MVT::f32:
7135   case MVT::f64: {
7136     // Parameter save area (PSA) is reserved even if the float passes in fpr.
7137     const unsigned StoreSize = LocVT.getStoreSize();
7138     // Floats are always 4-byte aligned in the PSA on AIX.
7139     // This includes f64 in 64-bit mode for ABI compatibility.
7140     const unsigned Offset =
7141         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
7142     unsigned FReg = State.AllocateReg(FPR);
7143     if (FReg)
7144       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
7145 
7146     // Reserve and initialize GPRs or initialize the PSA as required.
7147     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
7148       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
7149         assert(FReg && "An FPR should be available when a GPR is reserved.");
7150         if (State.isVarArg()) {
7151           // Successfully reserved GPRs are only initialized for vararg calls.
7152           // Custom handling is required for:
7153           //   f64 in PPC32 needs to be split into 2 GPRs.
7154           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
7155           State.addLoc(
7156               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7157         }
7158       } else {
7159         // If there are insufficient GPRs, the PSA needs to be initialized.
7160         // Initialization occurs even if an FPR was initialized for
7161         // compatibility with the AIX XL compiler. The full memory for the
7162         // argument will be initialized even if a prior word is saved in GPR.
7163         // A custom memLoc is used when the argument also passes in FPR so
7164         // that the callee handling can skip over it easily.
7165         State.addLoc(
7166             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
7167                                              LocInfo)
7168                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
7169         break;
7170       }
7171     }
7172 
7173     return false;
7174   }
7175   }
7176   return true;
7177 }
7178 
7179 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
7180                                                     bool IsPPC64) {
7181   assert((IsPPC64 || SVT != MVT::i64) &&
7182          "i64 should have been split for 32-bit codegen.");
7183 
7184   switch (SVT) {
7185   default:
7186     report_fatal_error("Unexpected value type for formal argument");
7187   case MVT::i1:
7188   case MVT::i32:
7189   case MVT::i64:
7190     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7191   case MVT::f32:
7192     return &PPC::F4RCRegClass;
7193   case MVT::f64:
7194     return &PPC::F8RCRegClass;
7195   }
7196 }
7197 
7198 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
7199                                         SelectionDAG &DAG, SDValue ArgValue,
7200                                         MVT LocVT, const SDLoc &dl) {
7201   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
7202   assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
7203 
7204   if (Flags.isSExt())
7205     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
7206                            DAG.getValueType(ValVT));
7207   else if (Flags.isZExt())
7208     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
7209                            DAG.getValueType(ValVT));
7210 
7211   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
7212 }
7213 
7214 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
7215   const unsigned LASize = FL->getLinkageSize();
7216 
7217   if (PPC::GPRCRegClass.contains(Reg)) {
7218     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
7219            "Reg must be a valid argument register!");
7220     return LASize + 4 * (Reg - PPC::R3);
7221   }
7222 
7223   if (PPC::G8RCRegClass.contains(Reg)) {
7224     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
7225            "Reg must be a valid argument register!");
7226     return LASize + 8 * (Reg - PPC::X3);
7227   }
7228 
7229   llvm_unreachable("Only general purpose registers expected.");
7230 }
7231 
7232 //   AIX ABI Stack Frame Layout:
7233 //
7234 //   Low Memory +--------------------------------------------+
7235 //   SP   +---> | Back chain                                 | ---+
7236 //        |     +--------------------------------------------+    |
7237 //        |     | Saved Condition Register                   |    |
7238 //        |     +--------------------------------------------+    |
7239 //        |     | Saved Linkage Register                     |    |
7240 //        |     +--------------------------------------------+    | Linkage Area
7241 //        |     | Reserved for compilers                     |    |
7242 //        |     +--------------------------------------------+    |
7243 //        |     | Reserved for binders                       |    |
7244 //        |     +--------------------------------------------+    |
7245 //        |     | Saved TOC pointer                          | ---+
7246 //        |     +--------------------------------------------+
7247 //        |     | Parameter save area                        |
7248 //        |     +--------------------------------------------+
7249 //        |     | Alloca space                               |
7250 //        |     +--------------------------------------------+
7251 //        |     | Local variable space                       |
7252 //        |     +--------------------------------------------+
7253 //        |     | Float/int conversion temporary             |
7254 //        |     +--------------------------------------------+
7255 //        |     | Save area for AltiVec registers            |
7256 //        |     +--------------------------------------------+
7257 //        |     | AltiVec alignment padding                  |
7258 //        |     +--------------------------------------------+
7259 //        |     | Save area for VRSAVE register              |
7260 //        |     +--------------------------------------------+
7261 //        |     | Save area for General Purpose registers    |
7262 //        |     +--------------------------------------------+
7263 //        |     | Save area for Floating Point registers     |
7264 //        |     +--------------------------------------------+
7265 //        +---- | Back chain                                 |
7266 // High Memory  +--------------------------------------------+
7267 //
7268 //  Specifications:
7269 //  AIX 7.2 Assembler Language Reference
7270 //  Subroutine linkage convention
7271 
7272 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7273     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7274     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7275     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7276 
7277   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7278           CallConv == CallingConv::Fast) &&
7279          "Unexpected calling convention!");
7280 
7281   if (getTargetMachine().Options.GuaranteedTailCallOpt)
7282     report_fatal_error("Tail call support is unimplemented on AIX.");
7283 
7284   if (useSoftFloat())
7285     report_fatal_error("Soft float support is unimplemented on AIX.");
7286 
7287   const PPCSubtarget &Subtarget =
7288       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7289   if (Subtarget.hasQPX())
7290     report_fatal_error("QPX support is not supported on AIX.");
7291 
7292   const bool IsPPC64 = Subtarget.isPPC64();
7293   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7294 
7295   // Assign locations to all of the incoming arguments.
7296   SmallVector<CCValAssign, 16> ArgLocs;
7297   MachineFunction &MF = DAG.getMachineFunction();
7298   MachineFrameInfo &MFI = MF.getFrameInfo();
7299   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7300 
7301   const EVT PtrVT = getPointerTy(MF.getDataLayout());
7302   // Reserve space for the linkage area on the stack.
7303   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7304   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7305   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7306 
7307   SmallVector<SDValue, 8> MemOps;
7308 
7309   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
7310     CCValAssign &VA = ArgLocs[I++];
7311     MVT LocVT = VA.getLocVT();
7312     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7313 
7314     // For compatibility with the AIX XL compiler, the float args in the
7315     // parameter save area are initialized even if the argument is available
7316     // in register.  The caller is required to initialize both the register
7317     // and memory, however, the callee can choose to expect it in either.
7318     // The memloc is dismissed here because the argument is retrieved from
7319     // the register.
7320     if (VA.isMemLoc() && VA.needsCustom())
7321       continue;
7322 
7323     if (Flags.isByVal() && VA.isMemLoc()) {
7324       const unsigned Size =
7325           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7326                   PtrByteSize);
7327       const int FI = MF.getFrameInfo().CreateFixedObject(
7328           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7329           /* IsAliased */ true);
7330       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7331       InVals.push_back(FIN);
7332 
7333       continue;
7334     }
7335 
7336     if (Flags.isByVal()) {
7337       assert(VA.isRegLoc() && "MemLocs should already be handled.");
7338 
7339       const MCPhysReg ArgReg = VA.getLocReg();
7340       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7341 
7342       if (Flags.getNonZeroByValAlign() > PtrByteSize)
7343         report_fatal_error("Over aligned byvals not supported yet.");
7344 
7345       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7346       const int FI = MF.getFrameInfo().CreateFixedObject(
7347           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7348           /* IsAliased */ true);
7349       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7350       InVals.push_back(FIN);
7351 
7352       // Add live ins for all the RegLocs for the same ByVal.
7353       const TargetRegisterClass *RegClass =
7354           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7355 
7356       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7357                                                unsigned Offset) {
7358         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
7359         // Since the callers side has left justified the aggregate in the
7360         // register, we can simply store the entire register into the stack
7361         // slot.
7362         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7363         // The store to the fixedstack object is needed becuase accessing a
7364         // field of the ByVal will use a gep and load. Ideally we will optimize
7365         // to extracting the value from the register directly, and elide the
7366         // stores when the arguments address is not taken, but that will need to
7367         // be future work.
7368         SDValue Store =
7369             DAG.getStore(CopyFrom.getValue(1), dl, CopyFrom,
7370                          DAG.getObjectPtrOffset(dl, FIN, Offset),
7371                          MachinePointerInfo::getFixedStack(MF, FI, Offset));
7372 
7373         MemOps.push_back(Store);
7374       };
7375 
7376       unsigned Offset = 0;
7377       HandleRegLoc(VA.getLocReg(), Offset);
7378       Offset += PtrByteSize;
7379       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7380            Offset += PtrByteSize) {
7381         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7382                "RegLocs should be for ByVal argument.");
7383 
7384         const CCValAssign RL = ArgLocs[I++];
7385         HandleRegLoc(RL.getLocReg(), Offset);
7386       }
7387 
7388       if (Offset != StackSize) {
7389         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7390                "Expected MemLoc for remaining bytes.");
7391         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7392         // Consume the MemLoc.The InVal has already been emitted, so nothing
7393         // more needs to be done.
7394         ++I;
7395       }
7396 
7397       continue;
7398     }
7399 
7400     EVT ValVT = VA.getValVT();
7401     if (VA.isRegLoc() && !VA.needsCustom()) {
7402       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7403       unsigned VReg =
7404           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7405       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7406       if (ValVT.isScalarInteger() &&
7407           (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7408         ArgValue =
7409             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7410       }
7411       InVals.push_back(ArgValue);
7412       continue;
7413     }
7414     if (VA.isMemLoc()) {
7415       const unsigned LocSize = LocVT.getStoreSize();
7416       const unsigned ValSize = ValVT.getStoreSize();
7417       assert((ValSize <= LocSize) &&
7418              "Object size is larger than size of MemLoc");
7419       int CurArgOffset = VA.getLocMemOffset();
7420       // Objects are right-justified because AIX is big-endian.
7421       if (LocSize > ValSize)
7422         CurArgOffset += LocSize - ValSize;
7423       // Potential tail calls could cause overwriting of argument stack slots.
7424       const bool IsImmutable =
7425           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
7426             (CallConv == CallingConv::Fast));
7427       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7428       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7429       SDValue ArgValue =
7430           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7431       InVals.push_back(ArgValue);
7432       continue;
7433     }
7434   }
7435 
7436   // On AIX a minimum of 8 words is saved to the parameter save area.
7437   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7438   // Area that is at least reserved in the caller of this function.
7439   unsigned CallerReservedArea =
7440       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7441 
7442   // Set the size that is at least reserved in caller of this function. Tail
7443   // call optimized function's reserved stack space needs to be aligned so
7444   // that taking the difference between two stack areas will result in an
7445   // aligned stack.
7446   CallerReservedArea =
7447       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7448   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7449   FuncInfo->setMinReservedArea(CallerReservedArea);
7450 
7451   if (isVarArg) {
7452     FuncInfo->setVarArgsFrameIndex(
7453         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7454     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7455 
7456     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7457                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7458 
7459     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7460                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7461     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7462 
7463     // The fixed integer arguments of a variadic function are stored to the
7464     // VarArgsFrameIndex on the stack so that they may be loaded by
7465     // dereferencing the result of va_next.
7466     for (unsigned GPRIndex =
7467              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7468          GPRIndex < NumGPArgRegs; ++GPRIndex) {
7469 
7470       const unsigned VReg =
7471           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7472                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7473 
7474       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7475       SDValue Store =
7476           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7477       MemOps.push_back(Store);
7478       // Increment the address for the next argument to store.
7479       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7480       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7481     }
7482   }
7483 
7484   if (!MemOps.empty())
7485     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7486 
7487   return Chain;
7488 }
7489 
7490 SDValue PPCTargetLowering::LowerCall_AIX(
7491     SDValue Chain, SDValue Callee, CallFlags CFlags,
7492     const SmallVectorImpl<ISD::OutputArg> &Outs,
7493     const SmallVectorImpl<SDValue> &OutVals,
7494     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7495     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7496     const CallBase *CB) const {
7497   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7498   // AIX ABI stack frame layout.
7499 
7500   assert((CFlags.CallConv == CallingConv::C ||
7501           CFlags.CallConv == CallingConv::Cold ||
7502           CFlags.CallConv == CallingConv::Fast) &&
7503          "Unexpected calling convention!");
7504 
7505   if (CFlags.IsPatchPoint)
7506     report_fatal_error("This call type is unimplemented on AIX.");
7507 
7508   const PPCSubtarget& Subtarget =
7509       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7510   if (Subtarget.hasQPX())
7511     report_fatal_error("QPX is not supported on AIX.");
7512   if (Subtarget.hasAltivec())
7513     report_fatal_error("Altivec support is unimplemented on AIX.");
7514 
7515   MachineFunction &MF = DAG.getMachineFunction();
7516   SmallVector<CCValAssign, 16> ArgLocs;
7517   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7518                  *DAG.getContext());
7519 
7520   // Reserve space for the linkage save area (LSA) on the stack.
7521   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7522   //   [SP][CR][LR][2 x reserved][TOC].
7523   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7524   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7525   const bool IsPPC64 = Subtarget.isPPC64();
7526   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7527   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7528   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7529   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7530 
7531   // The prolog code of the callee may store up to 8 GPR argument registers to
7532   // the stack, allowing va_start to index over them in memory if the callee
7533   // is variadic.
7534   // Because we cannot tell if this is needed on the caller side, we have to
7535   // conservatively assume that it is needed.  As such, make sure we have at
7536   // least enough stack space for the caller to store the 8 GPRs.
7537   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7538   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7539                                      CCInfo.getNextStackOffset());
7540 
7541   // Adjust the stack pointer for the new arguments...
7542   // These operations are automatically eliminated by the prolog/epilog pass.
7543   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7544   SDValue CallSeqStart = Chain;
7545 
7546   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7547   SmallVector<SDValue, 8> MemOpChains;
7548 
7549   // Set up a copy of the stack pointer for loading and storing any
7550   // arguments that may not fit in the registers available for argument
7551   // passing.
7552   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7553                                    : DAG.getRegister(PPC::R1, MVT::i32);
7554 
7555   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7556     const unsigned ValNo = ArgLocs[I].getValNo();
7557     SDValue Arg = OutVals[ValNo];
7558     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7559 
7560     if (Flags.isByVal()) {
7561       const unsigned ByValSize = Flags.getByValSize();
7562 
7563       // Nothing to do for zero-sized ByVals on the caller side.
7564       if (!ByValSize) {
7565         ++I;
7566         continue;
7567       }
7568 
7569       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7570         return DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain,
7571                               (LoadOffset != 0)
7572                                   ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7573                                   : Arg,
7574                               MachinePointerInfo(), VT);
7575       };
7576 
7577       unsigned LoadOffset = 0;
7578 
7579       // Initialize registers, which are fully occupied by the by-val argument.
7580       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7581         SDValue Load = GetLoad(PtrVT, LoadOffset);
7582         MemOpChains.push_back(Load.getValue(1));
7583         LoadOffset += PtrByteSize;
7584         const CCValAssign &ByValVA = ArgLocs[I++];
7585         assert(ByValVA.getValNo() == ValNo &&
7586                "Unexpected location for pass-by-value argument.");
7587         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7588       }
7589 
7590       if (LoadOffset == ByValSize)
7591         continue;
7592 
7593       // There must be one more loc to handle the remainder.
7594       assert(ArgLocs[I].getValNo() == ValNo &&
7595              "Expected additional location for by-value argument.");
7596 
7597       if (ArgLocs[I].isMemLoc()) {
7598         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7599         const CCValAssign &ByValVA = ArgLocs[I++];
7600         ISD::ArgFlagsTy MemcpyFlags = Flags;
7601         // Only memcpy the bytes that don't pass in register.
7602         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7603         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7604             (LoadOffset != 0) ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7605                               : Arg,
7606             DAG.getObjectPtrOffset(dl, StackPtr, ByValVA.getLocMemOffset()),
7607             CallSeqStart, MemcpyFlags, DAG, dl);
7608         continue;
7609       }
7610 
7611       // Initialize the final register residue.
7612       // Any residue that occupies the final by-val arg register must be
7613       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7614       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7615       // 2 and 1 byte loads.
7616       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7617       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7618              "Unexpected register residue for by-value argument.");
7619       SDValue ResidueVal;
7620       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7621         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7622         const MVT VT =
7623             N == 1 ? MVT::i8
7624                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7625         SDValue Load = GetLoad(VT, LoadOffset);
7626         MemOpChains.push_back(Load.getValue(1));
7627         LoadOffset += N;
7628         Bytes += N;
7629 
7630         // By-val arguments are passed left-justfied in register.
7631         // Every load here needs to be shifted, otherwise a full register load
7632         // should have been used.
7633         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7634                "Unexpected load emitted during handling of pass-by-value "
7635                "argument.");
7636         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7637         EVT ShiftAmountTy =
7638             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7639         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7640         SDValue ShiftedLoad =
7641             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7642         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7643                                               ShiftedLoad)
7644                                 : ShiftedLoad;
7645       }
7646 
7647       const CCValAssign &ByValVA = ArgLocs[I++];
7648       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7649       continue;
7650     }
7651 
7652     CCValAssign &VA = ArgLocs[I++];
7653     const MVT LocVT = VA.getLocVT();
7654     const MVT ValVT = VA.getValVT();
7655 
7656     switch (VA.getLocInfo()) {
7657     default:
7658       report_fatal_error("Unexpected argument extension type.");
7659     case CCValAssign::Full:
7660       break;
7661     case CCValAssign::ZExt:
7662       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7663       break;
7664     case CCValAssign::SExt:
7665       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7666       break;
7667     }
7668 
7669     if (VA.isRegLoc() && !VA.needsCustom()) {
7670       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7671       continue;
7672     }
7673 
7674     if (VA.isMemLoc()) {
7675       SDValue PtrOff =
7676           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7677       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7678       MemOpChains.push_back(
7679           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7680 
7681       continue;
7682     }
7683 
7684     // Custom handling is used for GPR initializations for vararg float
7685     // arguments.
7686     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7687            ValVT.isFloatingPoint() && LocVT.isInteger() &&
7688            "Unexpected register handling for calling convention.");
7689 
7690     SDValue ArgAsInt =
7691         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7692 
7693     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7694       // f32 in 32-bit GPR
7695       // f64 in 64-bit GPR
7696       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7697     else if (Arg.getValueType().getSizeInBits() < LocVT.getSizeInBits())
7698       // f32 in 64-bit GPR.
7699       RegsToPass.push_back(std::make_pair(
7700           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7701     else {
7702       // f64 in two 32-bit GPRs
7703       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7704       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7705              "Unexpected custom register for argument!");
7706       CCValAssign &GPR1 = VA;
7707       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7708                                      DAG.getConstant(32, dl, MVT::i8));
7709       RegsToPass.push_back(std::make_pair(
7710           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7711 
7712       if (I != E) {
7713         // If only 1 GPR was available, there will only be one custom GPR and
7714         // the argument will also pass in memory.
7715         CCValAssign &PeekArg = ArgLocs[I];
7716         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7717           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7718           CCValAssign &GPR2 = ArgLocs[I++];
7719           RegsToPass.push_back(std::make_pair(
7720               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7721         }
7722       }
7723     }
7724   }
7725 
7726   if (!MemOpChains.empty())
7727     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7728 
7729   // For indirect calls, we need to save the TOC base to the stack for
7730   // restoration after the call.
7731   if (CFlags.IsIndirect) {
7732     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7733     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7734     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7735     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7736     const unsigned TOCSaveOffset =
7737         Subtarget.getFrameLowering()->getTOCSaveOffset();
7738 
7739     setUsesTOCBasePtr(DAG);
7740     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7741     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7742     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7743     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7744     Chain = DAG.getStore(
7745         Val.getValue(1), dl, Val, AddPtr,
7746         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7747   }
7748 
7749   // Build a sequence of copy-to-reg nodes chained together with token chain
7750   // and flag operands which copy the outgoing args into the appropriate regs.
7751   SDValue InFlag;
7752   for (auto Reg : RegsToPass) {
7753     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7754     InFlag = Chain.getValue(1);
7755   }
7756 
7757   const int SPDiff = 0;
7758   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7759                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7760 }
7761 
7762 bool
7763 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7764                                   MachineFunction &MF, bool isVarArg,
7765                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7766                                   LLVMContext &Context) const {
7767   SmallVector<CCValAssign, 16> RVLocs;
7768   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7769   return CCInfo.CheckReturn(
7770       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7771                 ? RetCC_PPC_Cold
7772                 : RetCC_PPC);
7773 }
7774 
7775 SDValue
7776 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7777                                bool isVarArg,
7778                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7779                                const SmallVectorImpl<SDValue> &OutVals,
7780                                const SDLoc &dl, SelectionDAG &DAG) const {
7781   SmallVector<CCValAssign, 16> RVLocs;
7782   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7783                  *DAG.getContext());
7784   CCInfo.AnalyzeReturn(Outs,
7785                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7786                            ? RetCC_PPC_Cold
7787                            : RetCC_PPC);
7788 
7789   SDValue Flag;
7790   SmallVector<SDValue, 4> RetOps(1, Chain);
7791 
7792   // Copy the result values into the output registers.
7793   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7794     CCValAssign &VA = RVLocs[i];
7795     assert(VA.isRegLoc() && "Can only return in registers!");
7796 
7797     SDValue Arg = OutVals[RealResIdx];
7798 
7799     switch (VA.getLocInfo()) {
7800     default: llvm_unreachable("Unknown loc info!");
7801     case CCValAssign::Full: break;
7802     case CCValAssign::AExt:
7803       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7804       break;
7805     case CCValAssign::ZExt:
7806       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7807       break;
7808     case CCValAssign::SExt:
7809       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7810       break;
7811     }
7812     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7813       bool isLittleEndian = Subtarget.isLittleEndian();
7814       // Legalize ret f64 -> ret 2 x i32.
7815       SDValue SVal =
7816           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7817                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7818       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7819       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7820       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7821                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7822       Flag = Chain.getValue(1);
7823       VA = RVLocs[++i]; // skip ahead to next loc
7824       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7825     } else
7826       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7827     Flag = Chain.getValue(1);
7828     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7829   }
7830 
7831   RetOps[0] = Chain;  // Update chain.
7832 
7833   // Add the flag if we have it.
7834   if (Flag.getNode())
7835     RetOps.push_back(Flag);
7836 
7837   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7838 }
7839 
7840 SDValue
7841 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7842                                                 SelectionDAG &DAG) const {
7843   SDLoc dl(Op);
7844 
7845   // Get the correct type for integers.
7846   EVT IntVT = Op.getValueType();
7847 
7848   // Get the inputs.
7849   SDValue Chain = Op.getOperand(0);
7850   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7851   // Build a DYNAREAOFFSET node.
7852   SDValue Ops[2] = {Chain, FPSIdx};
7853   SDVTList VTs = DAG.getVTList(IntVT);
7854   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7855 }
7856 
7857 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7858                                              SelectionDAG &DAG) const {
7859   // When we pop the dynamic allocation we need to restore the SP link.
7860   SDLoc dl(Op);
7861 
7862   // Get the correct type for pointers.
7863   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7864 
7865   // Construct the stack pointer operand.
7866   bool isPPC64 = Subtarget.isPPC64();
7867   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7868   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7869 
7870   // Get the operands for the STACKRESTORE.
7871   SDValue Chain = Op.getOperand(0);
7872   SDValue SaveSP = Op.getOperand(1);
7873 
7874   // Load the old link SP.
7875   SDValue LoadLinkSP =
7876       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7877 
7878   // Restore the stack pointer.
7879   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7880 
7881   // Store the old link SP.
7882   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7883 }
7884 
7885 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7886   MachineFunction &MF = DAG.getMachineFunction();
7887   bool isPPC64 = Subtarget.isPPC64();
7888   EVT PtrVT = getPointerTy(MF.getDataLayout());
7889 
7890   // Get current frame pointer save index.  The users of this index will be
7891   // primarily DYNALLOC instructions.
7892   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7893   int RASI = FI->getReturnAddrSaveIndex();
7894 
7895   // If the frame pointer save index hasn't been defined yet.
7896   if (!RASI) {
7897     // Find out what the fix offset of the frame pointer save area.
7898     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7899     // Allocate the frame index for frame pointer save area.
7900     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7901     // Save the result.
7902     FI->setReturnAddrSaveIndex(RASI);
7903   }
7904   return DAG.getFrameIndex(RASI, PtrVT);
7905 }
7906 
7907 SDValue
7908 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7909   MachineFunction &MF = DAG.getMachineFunction();
7910   bool isPPC64 = Subtarget.isPPC64();
7911   EVT PtrVT = getPointerTy(MF.getDataLayout());
7912 
7913   // Get current frame pointer save index.  The users of this index will be
7914   // primarily DYNALLOC instructions.
7915   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7916   int FPSI = FI->getFramePointerSaveIndex();
7917 
7918   // If the frame pointer save index hasn't been defined yet.
7919   if (!FPSI) {
7920     // Find out what the fix offset of the frame pointer save area.
7921     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7922     // Allocate the frame index for frame pointer save area.
7923     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7924     // Save the result.
7925     FI->setFramePointerSaveIndex(FPSI);
7926   }
7927   return DAG.getFrameIndex(FPSI, PtrVT);
7928 }
7929 
7930 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7931                                                    SelectionDAG &DAG) const {
7932   MachineFunction &MF = DAG.getMachineFunction();
7933   // Get the inputs.
7934   SDValue Chain = Op.getOperand(0);
7935   SDValue Size  = Op.getOperand(1);
7936   SDLoc dl(Op);
7937 
7938   // Get the correct type for pointers.
7939   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7940   // Negate the size.
7941   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7942                                 DAG.getConstant(0, dl, PtrVT), Size);
7943   // Construct a node for the frame pointer save index.
7944   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7945   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7946   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7947   if (hasInlineStackProbe(MF))
7948     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7949   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7950 }
7951 
7952 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7953                                                      SelectionDAG &DAG) const {
7954   MachineFunction &MF = DAG.getMachineFunction();
7955 
7956   bool isPPC64 = Subtarget.isPPC64();
7957   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7958 
7959   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7960   return DAG.getFrameIndex(FI, PtrVT);
7961 }
7962 
7963 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7964                                                SelectionDAG &DAG) const {
7965   SDLoc DL(Op);
7966   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7967                      DAG.getVTList(MVT::i32, MVT::Other),
7968                      Op.getOperand(0), Op.getOperand(1));
7969 }
7970 
7971 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7972                                                 SelectionDAG &DAG) const {
7973   SDLoc DL(Op);
7974   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7975                      Op.getOperand(0), Op.getOperand(1));
7976 }
7977 
7978 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7979   if (Op.getValueType().isVector())
7980     return LowerVectorLoad(Op, DAG);
7981 
7982   assert(Op.getValueType() == MVT::i1 &&
7983          "Custom lowering only for i1 loads");
7984 
7985   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7986 
7987   SDLoc dl(Op);
7988   LoadSDNode *LD = cast<LoadSDNode>(Op);
7989 
7990   SDValue Chain = LD->getChain();
7991   SDValue BasePtr = LD->getBasePtr();
7992   MachineMemOperand *MMO = LD->getMemOperand();
7993 
7994   SDValue NewLD =
7995       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7996                      BasePtr, MVT::i8, MMO);
7997   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7998 
7999   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
8000   return DAG.getMergeValues(Ops, dl);
8001 }
8002 
8003 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
8004   if (Op.getOperand(1).getValueType().isVector())
8005     return LowerVectorStore(Op, DAG);
8006 
8007   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
8008          "Custom lowering only for i1 stores");
8009 
8010   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
8011 
8012   SDLoc dl(Op);
8013   StoreSDNode *ST = cast<StoreSDNode>(Op);
8014 
8015   SDValue Chain = ST->getChain();
8016   SDValue BasePtr = ST->getBasePtr();
8017   SDValue Value = ST->getValue();
8018   MachineMemOperand *MMO = ST->getMemOperand();
8019 
8020   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
8021                       Value);
8022   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
8023 }
8024 
8025 // FIXME: Remove this once the ANDI glue bug is fixed:
8026 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
8027   assert(Op.getValueType() == MVT::i1 &&
8028          "Custom lowering only for i1 results");
8029 
8030   SDLoc DL(Op);
8031   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
8032 }
8033 
8034 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
8035                                                SelectionDAG &DAG) const {
8036 
8037   // Implements a vector truncate that fits in a vector register as a shuffle.
8038   // We want to legalize vector truncates down to where the source fits in
8039   // a vector register (and target is therefore smaller than vector register
8040   // size).  At that point legalization will try to custom lower the sub-legal
8041   // result and get here - where we can contain the truncate as a single target
8042   // operation.
8043 
8044   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
8045   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
8046   //
8047   // We will implement it for big-endian ordering as this (where x denotes
8048   // undefined):
8049   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
8050   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
8051   //
8052   // The same operation in little-endian ordering will be:
8053   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
8054   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
8055 
8056   assert(Op.getValueType().isVector() && "Vector type expected.");
8057 
8058   SDLoc DL(Op);
8059   SDValue N1 = Op.getOperand(0);
8060   unsigned SrcSize = N1.getValueType().getSizeInBits();
8061   assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector");
8062   SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
8063 
8064   EVT TrgVT = Op.getValueType();
8065   unsigned TrgNumElts = TrgVT.getVectorNumElements();
8066   EVT EltVT = TrgVT.getVectorElementType();
8067   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8068   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8069 
8070   // First list the elements we want to keep.
8071   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
8072   SmallVector<int, 16> ShuffV;
8073   if (Subtarget.isLittleEndian())
8074     for (unsigned i = 0; i < TrgNumElts; ++i)
8075       ShuffV.push_back(i * SizeMult);
8076   else
8077     for (unsigned i = 1; i <= TrgNumElts; ++i)
8078       ShuffV.push_back(i * SizeMult - 1);
8079 
8080   // Populate the remaining elements with undefs.
8081   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
8082     // ShuffV.push_back(i + WideNumElts);
8083     ShuffV.push_back(WideNumElts + 1);
8084 
8085   SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc);
8086   return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV);
8087 }
8088 
8089 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
8090 /// possible.
8091 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
8092   // Not FP? Not a fsel.
8093   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
8094       !Op.getOperand(2).getValueType().isFloatingPoint())
8095     return Op;
8096 
8097   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8098 
8099   EVT ResVT = Op.getValueType();
8100   EVT CmpVT = Op.getOperand(0).getValueType();
8101   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
8102   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
8103   SDLoc dl(Op);
8104   SDNodeFlags Flags = Op.getNode()->getFlags();
8105 
8106   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
8107   // presence of infinities.
8108   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
8109     switch (CC) {
8110     default:
8111       break;
8112     case ISD::SETOGT:
8113     case ISD::SETGT:
8114       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
8115     case ISD::SETOLT:
8116     case ISD::SETLT:
8117       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
8118     }
8119   }
8120 
8121   // We might be able to do better than this under some circumstances, but in
8122   // general, fsel-based lowering of select is a finite-math-only optimization.
8123   // For more information, see section F.3 of the 2.06 ISA specification.
8124   // With ISA 3.0
8125   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
8126       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
8127     return Op;
8128 
8129   // If the RHS of the comparison is a 0.0, we don't need to do the
8130   // subtraction at all.
8131   SDValue Sel1;
8132   if (isFloatingPointZero(RHS))
8133     switch (CC) {
8134     default: break;       // SETUO etc aren't handled by fsel.
8135     case ISD::SETNE:
8136       std::swap(TV, FV);
8137       LLVM_FALLTHROUGH;
8138     case ISD::SETEQ:
8139       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8140         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8141       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8142       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8143         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8144       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8145                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
8146     case ISD::SETULT:
8147     case ISD::SETLT:
8148       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8149       LLVM_FALLTHROUGH;
8150     case ISD::SETOGE:
8151     case ISD::SETGE:
8152       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8153         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8154       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8155     case ISD::SETUGT:
8156     case ISD::SETGT:
8157       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8158       LLVM_FALLTHROUGH;
8159     case ISD::SETOLE:
8160     case ISD::SETLE:
8161       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8162         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8163       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8164                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
8165     }
8166 
8167   SDValue Cmp;
8168   switch (CC) {
8169   default: break;       // SETUO etc aren't handled by fsel.
8170   case ISD::SETNE:
8171     std::swap(TV, FV);
8172     LLVM_FALLTHROUGH;
8173   case ISD::SETEQ:
8174     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8175     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8176       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8177     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8178     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8179       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8180     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8181                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
8182   case ISD::SETULT:
8183   case ISD::SETLT:
8184     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8185     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8186       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8187     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8188   case ISD::SETOGE:
8189   case ISD::SETGE:
8190     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8191     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8192       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8193     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8194   case ISD::SETUGT:
8195   case ISD::SETGT:
8196     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8197     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8198       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8199     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8200   case ISD::SETOLE:
8201   case ISD::SETLE:
8202     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8203     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8204       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8205     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8206   }
8207   return Op;
8208 }
8209 
8210 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
8211                                                SelectionDAG &DAG,
8212                                                const SDLoc &dl) const {
8213   assert(Op.getOperand(0).getValueType().isFloatingPoint());
8214   SDValue Src = Op.getOperand(0);
8215   if (Src.getValueType() == MVT::f32)
8216     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8217 
8218   SDValue Tmp;
8219   switch (Op.getSimpleValueType().SimpleTy) {
8220   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8221   case MVT::i32:
8222     Tmp = DAG.getNode(
8223         Op.getOpcode() == ISD::FP_TO_SINT
8224             ? PPCISD::FCTIWZ
8225             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
8226         dl, MVT::f64, Src);
8227     break;
8228   case MVT::i64:
8229     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
8230            "i64 FP_TO_UINT is supported only with FPCVT");
8231     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
8232                                                         PPCISD::FCTIDUZ,
8233                       dl, MVT::f64, Src);
8234     break;
8235   }
8236 
8237   // Convert the FP value to an int value through memory.
8238   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8239     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
8240   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8241   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8242   MachinePointerInfo MPI =
8243       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
8244 
8245   // Emit a store to the stack slot.
8246   SDValue Chain;
8247   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8248   if (i32Stack) {
8249     MachineFunction &MF = DAG.getMachineFunction();
8250     Alignment = Align(4);
8251     MachineMemOperand *MMO =
8252         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8253     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
8254     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8255               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8256   } else
8257     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment);
8258 
8259   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
8260   // add in a bias on big endian.
8261   if (Op.getValueType() == MVT::i32 && !i32Stack) {
8262     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8263                         DAG.getConstant(4, dl, FIPtr.getValueType()));
8264     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8265   }
8266 
8267   RLI.Chain = Chain;
8268   RLI.Ptr = FIPtr;
8269   RLI.MPI = MPI;
8270   RLI.Alignment = Alignment;
8271 }
8272 
8273 /// Custom lowers floating point to integer conversions to use
8274 /// the direct move instructions available in ISA 2.07 to avoid the
8275 /// need for load/store combinations.
8276 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8277                                                     SelectionDAG &DAG,
8278                                                     const SDLoc &dl) const {
8279   assert(Op.getOperand(0).getValueType().isFloatingPoint());
8280   SDValue Src = Op.getOperand(0);
8281 
8282   if (Src.getValueType() == MVT::f32)
8283     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8284 
8285   SDValue Tmp;
8286   switch (Op.getSimpleValueType().SimpleTy) {
8287   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8288   case MVT::i32:
8289     Tmp = DAG.getNode(
8290         Op.getOpcode() == ISD::FP_TO_SINT
8291             ? PPCISD::FCTIWZ
8292             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
8293         dl, MVT::f64, Src);
8294     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
8295     break;
8296   case MVT::i64:
8297     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
8298            "i64 FP_TO_UINT is supported only with FPCVT");
8299     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
8300                                                         PPCISD::FCTIDUZ,
8301                       dl, MVT::f64, Src);
8302     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
8303     break;
8304   }
8305   return Tmp;
8306 }
8307 
8308 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8309                                           const SDLoc &dl) const {
8310 
8311   // FP to INT conversions are legal for f128.
8312   if (Op->getOperand(0).getValueType() == MVT::f128)
8313     return Op;
8314 
8315   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8316   // PPC (the libcall is not available).
8317   if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
8318     if (Op.getValueType() == MVT::i32) {
8319       if (Op.getOpcode() == ISD::FP_TO_SINT) {
8320         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8321                                  MVT::f64, Op.getOperand(0),
8322                                  DAG.getIntPtrConstant(0, dl));
8323         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8324                                  MVT::f64, Op.getOperand(0),
8325                                  DAG.getIntPtrConstant(1, dl));
8326 
8327         // Add the two halves of the long double in round-to-zero mode.
8328         SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8329 
8330         // Now use a smaller FP_TO_SINT.
8331         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8332       }
8333       if (Op.getOpcode() == ISD::FP_TO_UINT) {
8334         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8335         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8336         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
8337         //  X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8338         // FIXME: generated code sucks.
8339         // TODO: Are there fast-math-flags to propagate to this FSUB?
8340         SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
8341                                    Op.getOperand(0), Tmp);
8342         True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8343         True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
8344                            DAG.getConstant(0x80000000, dl, MVT::i32));
8345         SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
8346                                     Op.getOperand(0));
8347         return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
8348                                ISD::SETGE);
8349       }
8350     }
8351 
8352     return SDValue();
8353   }
8354 
8355   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8356     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8357 
8358   ReuseLoadInfo RLI;
8359   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8360 
8361   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8362                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8363 }
8364 
8365 // We're trying to insert a regular store, S, and then a load, L. If the
8366 // incoming value, O, is a load, we might just be able to have our load use the
8367 // address used by O. However, we don't know if anything else will store to
8368 // that address before we can load from it. To prevent this situation, we need
8369 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8370 // the same chain operand as O, we create a token factor from the chain results
8371 // of O and L, and we replace all uses of O's chain result with that token
8372 // factor (see spliceIntoChain below for this last part).
8373 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8374                                             ReuseLoadInfo &RLI,
8375                                             SelectionDAG &DAG,
8376                                             ISD::LoadExtType ET) const {
8377   SDLoc dl(Op);
8378   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8379                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8380   if (ET == ISD::NON_EXTLOAD &&
8381       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8382       isOperationLegalOrCustom(Op.getOpcode(),
8383                                Op.getOperand(0).getValueType())) {
8384 
8385     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8386     return true;
8387   }
8388 
8389   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8390   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8391       LD->isNonTemporal())
8392     return false;
8393   if (LD->getMemoryVT() != MemVT)
8394     return false;
8395 
8396   RLI.Ptr = LD->getBasePtr();
8397   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8398     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8399            "Non-pre-inc AM on PPC?");
8400     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8401                           LD->getOffset());
8402   }
8403 
8404   RLI.Chain = LD->getChain();
8405   RLI.MPI = LD->getPointerInfo();
8406   RLI.IsDereferenceable = LD->isDereferenceable();
8407   RLI.IsInvariant = LD->isInvariant();
8408   RLI.Alignment = LD->getAlign();
8409   RLI.AAInfo = LD->getAAInfo();
8410   RLI.Ranges = LD->getRanges();
8411 
8412   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8413   return true;
8414 }
8415 
8416 // Given the head of the old chain, ResChain, insert a token factor containing
8417 // it and NewResChain, and make users of ResChain now be users of that token
8418 // factor.
8419 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8420 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8421                                         SDValue NewResChain,
8422                                         SelectionDAG &DAG) const {
8423   if (!ResChain)
8424     return;
8425 
8426   SDLoc dl(NewResChain);
8427 
8428   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8429                            NewResChain, DAG.getUNDEF(MVT::Other));
8430   assert(TF.getNode() != NewResChain.getNode() &&
8431          "A new TF really is required here");
8432 
8433   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8434   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8435 }
8436 
8437 /// Analyze profitability of direct move
8438 /// prefer float load to int load plus direct move
8439 /// when there is no integer use of int load
8440 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8441   SDNode *Origin = Op.getOperand(0).getNode();
8442   if (Origin->getOpcode() != ISD::LOAD)
8443     return true;
8444 
8445   // If there is no LXSIBZX/LXSIHZX, like Power8,
8446   // prefer direct move if the memory size is 1 or 2 bytes.
8447   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8448   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8449     return true;
8450 
8451   for (SDNode::use_iterator UI = Origin->use_begin(),
8452                             UE = Origin->use_end();
8453        UI != UE; ++UI) {
8454 
8455     // Only look at the users of the loaded value.
8456     if (UI.getUse().get().getResNo() != 0)
8457       continue;
8458 
8459     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8460         UI->getOpcode() != ISD::UINT_TO_FP)
8461       return true;
8462   }
8463 
8464   return false;
8465 }
8466 
8467 /// Custom lowers integer to floating point conversions to use
8468 /// the direct move instructions available in ISA 2.07 to avoid the
8469 /// need for load/store combinations.
8470 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8471                                                     SelectionDAG &DAG,
8472                                                     const SDLoc &dl) const {
8473   assert((Op.getValueType() == MVT::f32 ||
8474           Op.getValueType() == MVT::f64) &&
8475          "Invalid floating point type as target of conversion");
8476   assert(Subtarget.hasFPCVT() &&
8477          "Int to FP conversions with direct moves require FPCVT");
8478   SDValue FP;
8479   SDValue Src = Op.getOperand(0);
8480   bool SinglePrec = Op.getValueType() == MVT::f32;
8481   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8482   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
8483   unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
8484                              (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
8485 
8486   if (WordInt) {
8487     FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
8488                      dl, MVT::f64, Src);
8489     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8490   }
8491   else {
8492     FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
8493     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8494   }
8495 
8496   return FP;
8497 }
8498 
8499 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8500 
8501   EVT VecVT = Vec.getValueType();
8502   assert(VecVT.isVector() && "Expected a vector type.");
8503   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8504 
8505   EVT EltVT = VecVT.getVectorElementType();
8506   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8507   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8508 
8509   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8510   SmallVector<SDValue, 16> Ops(NumConcat);
8511   Ops[0] = Vec;
8512   SDValue UndefVec = DAG.getUNDEF(VecVT);
8513   for (unsigned i = 1; i < NumConcat; ++i)
8514     Ops[i] = UndefVec;
8515 
8516   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8517 }
8518 
8519 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8520                                                 const SDLoc &dl) const {
8521 
8522   unsigned Opc = Op.getOpcode();
8523   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
8524          "Unexpected conversion type");
8525   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8526          "Supports conversions to v2f64/v4f32 only.");
8527 
8528   bool SignedConv = Opc == ISD::SINT_TO_FP;
8529   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8530 
8531   SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
8532   EVT WideVT = Wide.getValueType();
8533   unsigned WideNumElts = WideVT.getVectorNumElements();
8534   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8535 
8536   SmallVector<int, 16> ShuffV;
8537   for (unsigned i = 0; i < WideNumElts; ++i)
8538     ShuffV.push_back(i + WideNumElts);
8539 
8540   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8541   int SaveElts = FourEltRes ? 4 : 2;
8542   if (Subtarget.isLittleEndian())
8543     for (int i = 0; i < SaveElts; i++)
8544       ShuffV[i * Stride] = i;
8545   else
8546     for (int i = 1; i <= SaveElts; i++)
8547       ShuffV[i * Stride - 1] = i - 1;
8548 
8549   SDValue ShuffleSrc2 =
8550       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8551   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8552 
8553   SDValue Extend;
8554   if (SignedConv) {
8555     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8556     EVT ExtVT = Op.getOperand(0).getValueType();
8557     if (Subtarget.hasP9Altivec())
8558       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8559                                IntermediateVT.getVectorNumElements());
8560 
8561     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8562                          DAG.getValueType(ExtVT));
8563   } else
8564     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8565 
8566   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8567 }
8568 
8569 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8570                                           SelectionDAG &DAG) const {
8571   SDLoc dl(Op);
8572 
8573   EVT InVT = Op.getOperand(0).getValueType();
8574   EVT OutVT = Op.getValueType();
8575   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8576       isOperationCustom(Op.getOpcode(), InVT))
8577     return LowerINT_TO_FPVector(Op, DAG, dl);
8578 
8579   // Conversions to f128 are legal.
8580   if (Op.getValueType() == MVT::f128)
8581     return Op;
8582 
8583   if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
8584     if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
8585       return SDValue();
8586 
8587     SDValue Value = Op.getOperand(0);
8588     // The values are now known to be -1 (false) or 1 (true). To convert this
8589     // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
8590     // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8591     Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8592 
8593     SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8594 
8595     Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8596 
8597     if (Op.getValueType() != MVT::v4f64)
8598       Value = DAG.getNode(ISD::FP_ROUND, dl,
8599                           Op.getValueType(), Value,
8600                           DAG.getIntPtrConstant(1, dl));
8601     return Value;
8602   }
8603 
8604   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8605   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8606     return SDValue();
8607 
8608   if (Op.getOperand(0).getValueType() == MVT::i1)
8609     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
8610                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
8611                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
8612 
8613   // If we have direct moves, we can do all the conversion, skip the store/load
8614   // however, without FPCVT we can't do most conversions.
8615   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8616       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8617     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8618 
8619   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
8620          "UINT_TO_FP is supported only with FPCVT");
8621 
8622   // If we have FCFIDS, then use it when converting to single-precision.
8623   // Otherwise, convert to double-precision and then round.
8624   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8625                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
8626                                                             : PPCISD::FCFIDS)
8627                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
8628                                                             : PPCISD::FCFID);
8629   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8630                   ? MVT::f32
8631                   : MVT::f64;
8632 
8633   if (Op.getOperand(0).getValueType() == MVT::i64) {
8634     SDValue SINT = Op.getOperand(0);
8635     // When converting to single-precision, we actually need to convert
8636     // to double-precision first and then round to single-precision.
8637     // To avoid double-rounding effects during that operation, we have
8638     // to prepare the input operand.  Bits that might be truncated when
8639     // converting to double-precision are replaced by a bit that won't
8640     // be lost at this stage, but is below the single-precision rounding
8641     // position.
8642     //
8643     // However, if -enable-unsafe-fp-math is in effect, accept double
8644     // rounding to avoid the extra overhead.
8645     if (Op.getValueType() == MVT::f32 &&
8646         !Subtarget.hasFPCVT() &&
8647         !DAG.getTarget().Options.UnsafeFPMath) {
8648 
8649       // Twiddle input to make sure the low 11 bits are zero.  (If this
8650       // is the case, we are guaranteed the value will fit into the 53 bit
8651       // mantissa of an IEEE double-precision value without rounding.)
8652       // If any of those low 11 bits were not zero originally, make sure
8653       // bit 12 (value 2048) is set instead, so that the final rounding
8654       // to single-precision gets the correct result.
8655       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8656                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8657       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8658                           Round, DAG.getConstant(2047, dl, MVT::i64));
8659       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8660       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8661                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8662 
8663       // However, we cannot use that value unconditionally: if the magnitude
8664       // of the input value is small, the bit-twiddling we did above might
8665       // end up visibly changing the output.  Fortunately, in that case, we
8666       // don't need to twiddle bits since the original input will convert
8667       // exactly to double-precision floating-point already.  Therefore,
8668       // construct a conditional to use the original value if the top 11
8669       // bits are all sign-bit copies, and use the rounded value computed
8670       // above otherwise.
8671       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8672                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8673       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8674                          Cond, DAG.getConstant(1, dl, MVT::i64));
8675       Cond = DAG.getSetCC(
8676           dl,
8677           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8678           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8679 
8680       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8681     }
8682 
8683     ReuseLoadInfo RLI;
8684     SDValue Bits;
8685 
8686     MachineFunction &MF = DAG.getMachineFunction();
8687     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8688       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8689                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8690       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8691     } else if (Subtarget.hasLFIWAX() &&
8692                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8693       MachineMemOperand *MMO =
8694         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8695                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8696       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8697       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8698                                      DAG.getVTList(MVT::f64, MVT::Other),
8699                                      Ops, MVT::i32, MMO);
8700       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8701     } else if (Subtarget.hasFPCVT() &&
8702                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8703       MachineMemOperand *MMO =
8704         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8705                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8706       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8707       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8708                                      DAG.getVTList(MVT::f64, MVT::Other),
8709                                      Ops, MVT::i32, MMO);
8710       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8711     } else if (((Subtarget.hasLFIWAX() &&
8712                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8713                 (Subtarget.hasFPCVT() &&
8714                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8715                SINT.getOperand(0).getValueType() == MVT::i32) {
8716       MachineFrameInfo &MFI = MF.getFrameInfo();
8717       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8718 
8719       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8720       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8721 
8722       SDValue Store =
8723           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
8724                        MachinePointerInfo::getFixedStack(
8725                            DAG.getMachineFunction(), FrameIdx));
8726 
8727       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8728              "Expected an i32 store");
8729 
8730       RLI.Ptr = FIdx;
8731       RLI.Chain = Store;
8732       RLI.MPI =
8733           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8734       RLI.Alignment = Align(4);
8735 
8736       MachineMemOperand *MMO =
8737         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8738                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8739       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8740       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8741                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8742                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8743                                      Ops, MVT::i32, MMO);
8744     } else
8745       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8746 
8747     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
8748 
8749     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8750       FP = DAG.getNode(ISD::FP_ROUND, dl,
8751                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
8752     return FP;
8753   }
8754 
8755   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
8756          "Unhandled INT_TO_FP type in custom expander!");
8757   // Since we only generate this in 64-bit mode, we can take advantage of
8758   // 64-bit registers.  In particular, sign extend the input value into the
8759   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8760   // then lfd it and fcfid it.
8761   MachineFunction &MF = DAG.getMachineFunction();
8762   MachineFrameInfo &MFI = MF.getFrameInfo();
8763   EVT PtrVT = getPointerTy(MF.getDataLayout());
8764 
8765   SDValue Ld;
8766   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8767     ReuseLoadInfo RLI;
8768     bool ReusingLoad;
8769     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
8770                                             DAG))) {
8771       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8772       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8773 
8774       SDValue Store =
8775           DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
8776                        MachinePointerInfo::getFixedStack(
8777                            DAG.getMachineFunction(), FrameIdx));
8778 
8779       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8780              "Expected an i32 store");
8781 
8782       RLI.Ptr = FIdx;
8783       RLI.Chain = Store;
8784       RLI.MPI =
8785           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8786       RLI.Alignment = Align(4);
8787     }
8788 
8789     MachineMemOperand *MMO =
8790       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8791                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8792     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8793     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
8794                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
8795                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
8796                                  Ops, MVT::i32, MMO);
8797     if (ReusingLoad)
8798       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8799   } else {
8800     assert(Subtarget.isPPC64() &&
8801            "i32->FP without LFIWAX supported only on PPC64");
8802 
8803     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8804     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8805 
8806     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
8807                                 Op.getOperand(0));
8808 
8809     // STD the extended value into the stack slot.
8810     SDValue Store = DAG.getStore(
8811         DAG.getEntryNode(), dl, Ext64, FIdx,
8812         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8813 
8814     // Load the value as a double.
8815     Ld = DAG.getLoad(
8816         MVT::f64, dl, Store, FIdx,
8817         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8818   }
8819 
8820   // FCFID it and return it.
8821   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
8822   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8823     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8824                      DAG.getIntPtrConstant(0, dl));
8825   return FP;
8826 }
8827 
8828 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8829                                             SelectionDAG &DAG) const {
8830   SDLoc dl(Op);
8831   /*
8832    The rounding mode is in bits 30:31 of FPSR, and has the following
8833    settings:
8834      00 Round to nearest
8835      01 Round to 0
8836      10 Round to +inf
8837      11 Round to -inf
8838 
8839   FLT_ROUNDS, on the other hand, expects the following:
8840     -1 Undefined
8841      0 Round to 0
8842      1 Round to nearest
8843      2 Round to +inf
8844      3 Round to -inf
8845 
8846   To perform the conversion, we do:
8847     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8848   */
8849 
8850   MachineFunction &MF = DAG.getMachineFunction();
8851   EVT VT = Op.getValueType();
8852   EVT PtrVT = getPointerTy(MF.getDataLayout());
8853 
8854   // Save FP Control Word to register
8855   SDValue Chain = Op.getOperand(0);
8856   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8857   Chain = MFFS.getValue(1);
8858 
8859   // Save FP register to stack slot
8860   int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8861   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8862   Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8863 
8864   // Load FP Control Word from low 32 bits of stack slot.
8865   SDValue Four = DAG.getConstant(4, dl, PtrVT);
8866   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8867   SDValue CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8868   Chain = CWD.getValue(1);
8869 
8870   // Transform as necessary
8871   SDValue CWD1 =
8872     DAG.getNode(ISD::AND, dl, MVT::i32,
8873                 CWD, DAG.getConstant(3, dl, MVT::i32));
8874   SDValue CWD2 =
8875     DAG.getNode(ISD::SRL, dl, MVT::i32,
8876                 DAG.getNode(ISD::AND, dl, MVT::i32,
8877                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8878                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8879                             DAG.getConstant(3, dl, MVT::i32)),
8880                 DAG.getConstant(1, dl, MVT::i32));
8881 
8882   SDValue RetVal =
8883     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8884 
8885   RetVal =
8886       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8887                   dl, VT, RetVal);
8888 
8889   return DAG.getMergeValues({RetVal, Chain}, dl);
8890 }
8891 
8892 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8893   EVT VT = Op.getValueType();
8894   unsigned BitWidth = VT.getSizeInBits();
8895   SDLoc dl(Op);
8896   assert(Op.getNumOperands() == 3 &&
8897          VT == Op.getOperand(1).getValueType() &&
8898          "Unexpected SHL!");
8899 
8900   // Expand into a bunch of logical ops.  Note that these ops
8901   // depend on the PPC behavior for oversized shift amounts.
8902   SDValue Lo = Op.getOperand(0);
8903   SDValue Hi = Op.getOperand(1);
8904   SDValue Amt = Op.getOperand(2);
8905   EVT AmtVT = Amt.getValueType();
8906 
8907   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8908                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8909   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8910   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8911   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8912   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8913                              DAG.getConstant(-BitWidth, dl, AmtVT));
8914   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8915   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8916   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8917   SDValue OutOps[] = { OutLo, OutHi };
8918   return DAG.getMergeValues(OutOps, dl);
8919 }
8920 
8921 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8922   EVT VT = Op.getValueType();
8923   SDLoc dl(Op);
8924   unsigned BitWidth = VT.getSizeInBits();
8925   assert(Op.getNumOperands() == 3 &&
8926          VT == Op.getOperand(1).getValueType() &&
8927          "Unexpected SRL!");
8928 
8929   // Expand into a bunch of logical ops.  Note that these ops
8930   // depend on the PPC behavior for oversized shift amounts.
8931   SDValue Lo = Op.getOperand(0);
8932   SDValue Hi = Op.getOperand(1);
8933   SDValue Amt = Op.getOperand(2);
8934   EVT AmtVT = Amt.getValueType();
8935 
8936   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8937                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8938   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8939   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8940   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8941   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8942                              DAG.getConstant(-BitWidth, dl, AmtVT));
8943   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8944   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8945   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8946   SDValue OutOps[] = { OutLo, OutHi };
8947   return DAG.getMergeValues(OutOps, dl);
8948 }
8949 
8950 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8951   SDLoc dl(Op);
8952   EVT VT = Op.getValueType();
8953   unsigned BitWidth = VT.getSizeInBits();
8954   assert(Op.getNumOperands() == 3 &&
8955          VT == Op.getOperand(1).getValueType() &&
8956          "Unexpected SRA!");
8957 
8958   // Expand into a bunch of logical ops, followed by a select_cc.
8959   SDValue Lo = Op.getOperand(0);
8960   SDValue Hi = Op.getOperand(1);
8961   SDValue Amt = Op.getOperand(2);
8962   EVT AmtVT = Amt.getValueType();
8963 
8964   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8965                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8966   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8967   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8968   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8969   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8970                              DAG.getConstant(-BitWidth, dl, AmtVT));
8971   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8972   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8973   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8974                                   Tmp4, Tmp6, ISD::SETLE);
8975   SDValue OutOps[] = { OutLo, OutHi };
8976   return DAG.getMergeValues(OutOps, dl);
8977 }
8978 
8979 //===----------------------------------------------------------------------===//
8980 // Vector related lowering.
8981 //
8982 
8983 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8984 /// element size of SplatSize. Cast the result to VT.
8985 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8986                                       SelectionDAG &DAG, const SDLoc &dl) {
8987   static const MVT VTys[] = { // canonical VT to use for each size.
8988     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8989   };
8990 
8991   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8992 
8993   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8994   if (Val == ((1LU << (SplatSize * 8)) - 1)) {
8995     SplatSize = 1;
8996     Val = 0xFF;
8997   }
8998 
8999   EVT CanonicalVT = VTys[SplatSize-1];
9000 
9001   // Build a canonical splat for this value.
9002   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
9003 }
9004 
9005 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
9006 /// specified intrinsic ID.
9007 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
9008                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
9009   if (DestVT == MVT::Other) DestVT = Op.getValueType();
9010   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9011                      DAG.getConstant(IID, dl, MVT::i32), Op);
9012 }
9013 
9014 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
9015 /// specified intrinsic ID.
9016 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
9017                                 SelectionDAG &DAG, const SDLoc &dl,
9018                                 EVT DestVT = MVT::Other) {
9019   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
9020   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9021                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
9022 }
9023 
9024 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
9025 /// specified intrinsic ID.
9026 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
9027                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
9028                                 EVT DestVT = MVT::Other) {
9029   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
9030   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9031                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
9032 }
9033 
9034 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
9035 /// amount.  The result has the specified value type.
9036 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
9037                            SelectionDAG &DAG, const SDLoc &dl) {
9038   // Force LHS/RHS to be the right type.
9039   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
9040   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
9041 
9042   int Ops[16];
9043   for (unsigned i = 0; i != 16; ++i)
9044     Ops[i] = i + Amt;
9045   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
9046   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9047 }
9048 
9049 /// Do we have an efficient pattern in a .td file for this node?
9050 ///
9051 /// \param V - pointer to the BuildVectorSDNode being matched
9052 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
9053 ///
9054 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
9055 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
9056 /// the opposite is true (expansion is beneficial) are:
9057 /// - The node builds a vector out of integers that are not 32 or 64-bits
9058 /// - The node builds a vector out of constants
9059 /// - The node is a "load-and-splat"
9060 /// In all other cases, we will choose to keep the BUILD_VECTOR.
9061 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
9062                                             bool HasDirectMove,
9063                                             bool HasP8Vector) {
9064   EVT VecVT = V->getValueType(0);
9065   bool RightType = VecVT == MVT::v2f64 ||
9066     (HasP8Vector && VecVT == MVT::v4f32) ||
9067     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
9068   if (!RightType)
9069     return false;
9070 
9071   bool IsSplat = true;
9072   bool IsLoad = false;
9073   SDValue Op0 = V->getOperand(0);
9074 
9075   // This function is called in a block that confirms the node is not a constant
9076   // splat. So a constant BUILD_VECTOR here means the vector is built out of
9077   // different constants.
9078   if (V->isConstant())
9079     return false;
9080   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
9081     if (V->getOperand(i).isUndef())
9082       return false;
9083     // We want to expand nodes that represent load-and-splat even if the
9084     // loaded value is a floating point truncation or conversion to int.
9085     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
9086         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
9087          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9088         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
9089          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9090         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
9091          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
9092       IsLoad = true;
9093     // If the operands are different or the input is not a load and has more
9094     // uses than just this BV node, then it isn't a splat.
9095     if (V->getOperand(i) != Op0 ||
9096         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
9097       IsSplat = false;
9098   }
9099   return !(IsSplat && IsLoad);
9100 }
9101 
9102 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
9103 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
9104 
9105   SDLoc dl(Op);
9106   SDValue Op0 = Op->getOperand(0);
9107 
9108   if ((Op.getValueType() != MVT::f128) ||
9109       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
9110       (Op0.getOperand(0).getValueType() != MVT::i64) ||
9111       (Op0.getOperand(1).getValueType() != MVT::i64))
9112     return SDValue();
9113 
9114   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
9115                      Op0.getOperand(1));
9116 }
9117 
9118 static const SDValue *getNormalLoadInput(const SDValue &Op) {
9119   const SDValue *InputLoad = &Op;
9120   if (InputLoad->getOpcode() == ISD::BITCAST)
9121     InputLoad = &InputLoad->getOperand(0);
9122   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
9123       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED)
9124     InputLoad = &InputLoad->getOperand(0);
9125   if (InputLoad->getOpcode() != ISD::LOAD)
9126     return nullptr;
9127   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9128   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
9129 }
9130 
9131 // Convert the argument APFloat to a single precision APFloat if there is no
9132 // loss in information during the conversion to single precision APFloat and the
9133 // resulting number is not a denormal number. Return true if successful.
9134 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
9135   APFloat APFloatToConvert = ArgAPFloat;
9136   bool LosesInfo = true;
9137   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9138                            &LosesInfo);
9139   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
9140   if (Success)
9141     ArgAPFloat = APFloatToConvert;
9142   return Success;
9143 }
9144 
9145 // Bitcast the argument APInt to a double and convert it to a single precision
9146 // APFloat, bitcast the APFloat to an APInt and assign it to the original
9147 // argument if there is no loss in information during the conversion from
9148 // double to single precision APFloat and the resulting number is not a denormal
9149 // number. Return true if successful.
9150 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
9151   double DpValue = ArgAPInt.bitsToDouble();
9152   APFloat APFloatDp(DpValue);
9153   bool Success = convertToNonDenormSingle(APFloatDp);
9154   if (Success)
9155     ArgAPInt = APFloatDp.bitcastToAPInt();
9156   return Success;
9157 }
9158 
9159 // If this is a case we can't handle, return null and let the default
9160 // expansion code take care of it.  If we CAN select this case, and if it
9161 // selects to a single instruction, return Op.  Otherwise, if we can codegen
9162 // this case more efficiently than a constant pool load, lower it to the
9163 // sequence of ops that should be used.
9164 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9165                                              SelectionDAG &DAG) const {
9166   SDLoc dl(Op);
9167   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9168   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9169 
9170   if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
9171     // We first build an i32 vector, load it into a QPX register,
9172     // then convert it to a floating-point vector and compare it
9173     // to a zero vector to get the boolean result.
9174     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9175     int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
9176     MachinePointerInfo PtrInfo =
9177         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9178     EVT PtrVT = getPointerTy(DAG.getDataLayout());
9179     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9180 
9181     assert(BVN->getNumOperands() == 4 &&
9182       "BUILD_VECTOR for v4i1 does not have 4 operands");
9183 
9184     bool IsConst = true;
9185     for (unsigned i = 0; i < 4; ++i) {
9186       if (BVN->getOperand(i).isUndef()) continue;
9187       if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
9188         IsConst = false;
9189         break;
9190       }
9191     }
9192 
9193     if (IsConst) {
9194       Constant *One =
9195         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
9196       Constant *NegOne =
9197         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
9198 
9199       Constant *CV[4];
9200       for (unsigned i = 0; i < 4; ++i) {
9201         if (BVN->getOperand(i).isUndef())
9202           CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
9203         else if (isNullConstant(BVN->getOperand(i)))
9204           CV[i] = NegOne;
9205         else
9206           CV[i] = One;
9207       }
9208 
9209       Constant *CP = ConstantVector::get(CV);
9210       SDValue CPIdx =
9211           DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), Align(16));
9212 
9213       SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
9214       SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
9215       return DAG.getMemIntrinsicNode(
9216           PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
9217           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
9218     }
9219 
9220     SmallVector<SDValue, 4> Stores;
9221     for (unsigned i = 0; i < 4; ++i) {
9222       if (BVN->getOperand(i).isUndef()) continue;
9223 
9224       unsigned Offset = 4*i;
9225       SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9226       Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9227 
9228       unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
9229       if (StoreSize > 4) {
9230         Stores.push_back(
9231             DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
9232                               PtrInfo.getWithOffset(Offset), MVT::i32));
9233       } else {
9234         SDValue StoreValue = BVN->getOperand(i);
9235         if (StoreSize < 4)
9236           StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
9237 
9238         Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
9239                                       PtrInfo.getWithOffset(Offset)));
9240       }
9241     }
9242 
9243     SDValue StoreChain;
9244     if (!Stores.empty())
9245       StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9246     else
9247       StoreChain = DAG.getEntryNode();
9248 
9249     // Now load from v4i32 into the QPX register; this will extend it to
9250     // v4i64 but not yet convert it to a floating point. Nevertheless, this
9251     // is typed as v4f64 because the QPX register integer states are not
9252     // explicitly represented.
9253 
9254     SDValue Ops[] = {StoreChain,
9255                      DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
9256                      FIdx};
9257     SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
9258 
9259     SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
9260       dl, VTs, Ops, MVT::v4i32, PtrInfo);
9261     LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9262       DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
9263       LoadedVect);
9264 
9265     SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
9266 
9267     return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
9268   }
9269 
9270   // All other QPX vectors are handled by generic code.
9271   if (Subtarget.hasQPX())
9272     return SDValue();
9273 
9274   // Check if this is a splat of a constant value.
9275   APInt APSplatBits, APSplatUndef;
9276   unsigned SplatBitSize;
9277   bool HasAnyUndefs;
9278   bool BVNIsConstantSplat =
9279       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9280                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9281 
9282   // If it is a splat of a double, check if we can shrink it to a 32 bit
9283   // non-denormal float which when converted back to double gives us the same
9284   // double. This is to exploit the XXSPLTIDP instruction.
9285   if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
9286       (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
9287       convertToNonDenormSingle(APSplatBits)) {
9288     SDValue SplatNode = DAG.getNode(
9289         PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9290         DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9291     return DAG.getBitcast(Op.getValueType(), SplatNode);
9292   }
9293 
9294   if (!BVNIsConstantSplat || SplatBitSize > 32) {
9295 
9296     const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0));
9297     // Handle load-and-splat patterns as we have instructions that will do this
9298     // in one go.
9299     if (InputLoad && DAG.isSplatValue(Op, true)) {
9300       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9301 
9302       // We have handling for 4 and 8 byte elements.
9303       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9304 
9305       // Checking for a single use of this load, we have to check for vector
9306       // width (128 bits) / ElementSize uses (since each operand of the
9307       // BUILD_VECTOR is a separate use of the value.
9308       if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
9309           ((Subtarget.hasVSX() && ElementSize == 64) ||
9310            (Subtarget.hasP9Vector() && ElementSize == 32))) {
9311         SDValue Ops[] = {
9312           LD->getChain(),    // Chain
9313           LD->getBasePtr(),  // Ptr
9314           DAG.getValueType(Op.getValueType()) // VT
9315         };
9316         return
9317           DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
9318                                   DAG.getVTList(Op.getValueType(), MVT::Other),
9319                                   Ops, LD->getMemoryVT(), LD->getMemOperand());
9320       }
9321     }
9322 
9323     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
9324     // lowered to VSX instructions under certain conditions.
9325     // Without VSX, there is no pattern more efficient than expanding the node.
9326     if (Subtarget.hasVSX() &&
9327         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9328                                         Subtarget.hasP8Vector()))
9329       return Op;
9330     return SDValue();
9331   }
9332 
9333   uint64_t SplatBits = APSplatBits.getZExtValue();
9334   uint64_t SplatUndef = APSplatUndef.getZExtValue();
9335   unsigned SplatSize = SplatBitSize / 8;
9336 
9337   // First, handle single instruction cases.
9338 
9339   // All zeros?
9340   if (SplatBits == 0) {
9341     // Canonicalize all zero vectors to be v4i32.
9342     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9343       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9344       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9345     }
9346     return Op;
9347   }
9348 
9349   // We have XXSPLTIW for constant splats four bytes wide.
9350   // Given vector length is a multiple of 4, 2-byte splats can be replaced
9351   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9352   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9353   // turned into a 4-byte splat of 0xABABABAB.
9354   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9355     return getCanonicalConstSplat((SplatBits |= SplatBits << 16), SplatSize * 2,
9356                                   Op.getValueType(), DAG, dl);
9357 
9358   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9359     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9360                                   dl);
9361 
9362   // We have XXSPLTIB for constant splats one byte wide.
9363   if (Subtarget.hasP9Vector() && SplatSize == 1)
9364     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9365                                   dl);
9366 
9367   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9368   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9369                     (32-SplatBitSize));
9370   if (SextVal >= -16 && SextVal <= 15)
9371     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9372                                   dl);
9373 
9374   // Two instruction sequences.
9375 
9376   // If this value is in the range [-32,30] and is even, use:
9377   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9378   // If this value is in the range [17,31] and is odd, use:
9379   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9380   // If this value is in the range [-31,-17] and is odd, use:
9381   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9382   // Note the last two are three-instruction sequences.
9383   if (SextVal >= -32 && SextVal <= 31) {
9384     // To avoid having these optimizations undone by constant folding,
9385     // we convert to a pseudo that will be expanded later into one of
9386     // the above forms.
9387     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9388     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9389               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9390     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9391     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9392     if (VT == Op.getValueType())
9393       return RetVal;
9394     else
9395       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9396   }
9397 
9398   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9399   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9400   // for fneg/fabs.
9401   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9402     // Make -1 and vspltisw -1:
9403     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9404 
9405     // Make the VSLW intrinsic, computing 0x8000_0000.
9406     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9407                                    OnesV, DAG, dl);
9408 
9409     // xor by OnesV to invert it.
9410     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9411     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9412   }
9413 
9414   // Check to see if this is a wide variety of vsplti*, binop self cases.
9415   static const signed char SplatCsts[] = {
9416     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9417     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9418   };
9419 
9420   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9421     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9422     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9423     int i = SplatCsts[idx];
9424 
9425     // Figure out what shift amount will be used by altivec if shifted by i in
9426     // this splat size.
9427     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9428 
9429     // vsplti + shl self.
9430     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9431       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9432       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9433         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9434         Intrinsic::ppc_altivec_vslw
9435       };
9436       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9437       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9438     }
9439 
9440     // vsplti + srl self.
9441     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9442       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9443       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9444         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9445         Intrinsic::ppc_altivec_vsrw
9446       };
9447       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9448       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9449     }
9450 
9451     // vsplti + sra self.
9452     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9453       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9454       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9455         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
9456         Intrinsic::ppc_altivec_vsraw
9457       };
9458       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9459       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9460     }
9461 
9462     // vsplti + rol self.
9463     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9464                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9465       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9466       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9467         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9468         Intrinsic::ppc_altivec_vrlw
9469       };
9470       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9471       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9472     }
9473 
9474     // t = vsplti c, result = vsldoi t, t, 1
9475     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9476       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9477       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9478       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9479     }
9480     // t = vsplti c, result = vsldoi t, t, 2
9481     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9482       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9483       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9484       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9485     }
9486     // t = vsplti c, result = vsldoi t, t, 3
9487     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9488       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9489       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9490       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9491     }
9492   }
9493 
9494   return SDValue();
9495 }
9496 
9497 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9498 /// the specified operations to build the shuffle.
9499 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9500                                       SDValue RHS, SelectionDAG &DAG,
9501                                       const SDLoc &dl) {
9502   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9503   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9504   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9505 
9506   enum {
9507     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9508     OP_VMRGHW,
9509     OP_VMRGLW,
9510     OP_VSPLTISW0,
9511     OP_VSPLTISW1,
9512     OP_VSPLTISW2,
9513     OP_VSPLTISW3,
9514     OP_VSLDOI4,
9515     OP_VSLDOI8,
9516     OP_VSLDOI12
9517   };
9518 
9519   if (OpNum == OP_COPY) {
9520     if (LHSID == (1*9+2)*9+3) return LHS;
9521     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9522     return RHS;
9523   }
9524 
9525   SDValue OpLHS, OpRHS;
9526   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9527   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9528 
9529   int ShufIdxs[16];
9530   switch (OpNum) {
9531   default: llvm_unreachable("Unknown i32 permute!");
9532   case OP_VMRGHW:
9533     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9534     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9535     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9536     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9537     break;
9538   case OP_VMRGLW:
9539     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9540     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9541     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9542     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9543     break;
9544   case OP_VSPLTISW0:
9545     for (unsigned i = 0; i != 16; ++i)
9546       ShufIdxs[i] = (i&3)+0;
9547     break;
9548   case OP_VSPLTISW1:
9549     for (unsigned i = 0; i != 16; ++i)
9550       ShufIdxs[i] = (i&3)+4;
9551     break;
9552   case OP_VSPLTISW2:
9553     for (unsigned i = 0; i != 16; ++i)
9554       ShufIdxs[i] = (i&3)+8;
9555     break;
9556   case OP_VSPLTISW3:
9557     for (unsigned i = 0; i != 16; ++i)
9558       ShufIdxs[i] = (i&3)+12;
9559     break;
9560   case OP_VSLDOI4:
9561     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9562   case OP_VSLDOI8:
9563     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9564   case OP_VSLDOI12:
9565     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9566   }
9567   EVT VT = OpLHS.getValueType();
9568   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9569   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9570   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9571   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9572 }
9573 
9574 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9575 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9576 /// SDValue.
9577 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9578                                            SelectionDAG &DAG) const {
9579   const unsigned BytesInVector = 16;
9580   bool IsLE = Subtarget.isLittleEndian();
9581   SDLoc dl(N);
9582   SDValue V1 = N->getOperand(0);
9583   SDValue V2 = N->getOperand(1);
9584   unsigned ShiftElts = 0, InsertAtByte = 0;
9585   bool Swap = false;
9586 
9587   // Shifts required to get the byte we want at element 7.
9588   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9589                                    0, 15, 14, 13, 12, 11, 10, 9};
9590   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9591                                 1, 2,  3,  4,  5,  6,  7,  8};
9592 
9593   ArrayRef<int> Mask = N->getMask();
9594   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9595 
9596   // For each mask element, find out if we're just inserting something
9597   // from V2 into V1 or vice versa.
9598   // Possible permutations inserting an element from V2 into V1:
9599   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9600   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9601   //   ...
9602   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9603   // Inserting from V1 into V2 will be similar, except mask range will be
9604   // [16,31].
9605 
9606   bool FoundCandidate = false;
9607   // If both vector operands for the shuffle are the same vector, the mask
9608   // will contain only elements from the first one and the second one will be
9609   // undef.
9610   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9611   // Go through the mask of half-words to find an element that's being moved
9612   // from one vector to the other.
9613   for (unsigned i = 0; i < BytesInVector; ++i) {
9614     unsigned CurrentElement = Mask[i];
9615     // If 2nd operand is undefined, we should only look for element 7 in the
9616     // Mask.
9617     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9618       continue;
9619 
9620     bool OtherElementsInOrder = true;
9621     // Examine the other elements in the Mask to see if they're in original
9622     // order.
9623     for (unsigned j = 0; j < BytesInVector; ++j) {
9624       if (j == i)
9625         continue;
9626       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9627       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9628       // in which we always assume we're always picking from the 1st operand.
9629       int MaskOffset =
9630           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9631       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9632         OtherElementsInOrder = false;
9633         break;
9634       }
9635     }
9636     // If other elements are in original order, we record the number of shifts
9637     // we need to get the element we want into element 7. Also record which byte
9638     // in the vector we should insert into.
9639     if (OtherElementsInOrder) {
9640       // If 2nd operand is undefined, we assume no shifts and no swapping.
9641       if (V2.isUndef()) {
9642         ShiftElts = 0;
9643         Swap = false;
9644       } else {
9645         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9646         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9647                          : BigEndianShifts[CurrentElement & 0xF];
9648         Swap = CurrentElement < BytesInVector;
9649       }
9650       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9651       FoundCandidate = true;
9652       break;
9653     }
9654   }
9655 
9656   if (!FoundCandidate)
9657     return SDValue();
9658 
9659   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9660   // optionally with VECSHL if shift is required.
9661   if (Swap)
9662     std::swap(V1, V2);
9663   if (V2.isUndef())
9664     V2 = V1;
9665   if (ShiftElts) {
9666     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9667                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9668     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9669                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9670   }
9671   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9672                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9673 }
9674 
9675 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9676 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9677 /// SDValue.
9678 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9679                                            SelectionDAG &DAG) const {
9680   const unsigned NumHalfWords = 8;
9681   const unsigned BytesInVector = NumHalfWords * 2;
9682   // Check that the shuffle is on half-words.
9683   if (!isNByteElemShuffleMask(N, 2, 1))
9684     return SDValue();
9685 
9686   bool IsLE = Subtarget.isLittleEndian();
9687   SDLoc dl(N);
9688   SDValue V1 = N->getOperand(0);
9689   SDValue V2 = N->getOperand(1);
9690   unsigned ShiftElts = 0, InsertAtByte = 0;
9691   bool Swap = false;
9692 
9693   // Shifts required to get the half-word we want at element 3.
9694   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9695   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9696 
9697   uint32_t Mask = 0;
9698   uint32_t OriginalOrderLow = 0x1234567;
9699   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9700   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9701   // 32-bit space, only need 4-bit nibbles per element.
9702   for (unsigned i = 0; i < NumHalfWords; ++i) {
9703     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9704     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9705   }
9706 
9707   // For each mask element, find out if we're just inserting something
9708   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9709   // from V2 into V1:
9710   //   X, 1, 2, 3, 4, 5, 6, 7
9711   //   0, X, 2, 3, 4, 5, 6, 7
9712   //   0, 1, X, 3, 4, 5, 6, 7
9713   //   0, 1, 2, X, 4, 5, 6, 7
9714   //   0, 1, 2, 3, X, 5, 6, 7
9715   //   0, 1, 2, 3, 4, X, 6, 7
9716   //   0, 1, 2, 3, 4, 5, X, 7
9717   //   0, 1, 2, 3, 4, 5, 6, X
9718   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9719 
9720   bool FoundCandidate = false;
9721   // Go through the mask of half-words to find an element that's being moved
9722   // from one vector to the other.
9723   for (unsigned i = 0; i < NumHalfWords; ++i) {
9724     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9725     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9726     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9727     uint32_t TargetOrder = 0x0;
9728 
9729     // If both vector operands for the shuffle are the same vector, the mask
9730     // will contain only elements from the first one and the second one will be
9731     // undef.
9732     if (V2.isUndef()) {
9733       ShiftElts = 0;
9734       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9735       TargetOrder = OriginalOrderLow;
9736       Swap = false;
9737       // Skip if not the correct element or mask of other elements don't equal
9738       // to our expected order.
9739       if (MaskOneElt == VINSERTHSrcElem &&
9740           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9741         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9742         FoundCandidate = true;
9743         break;
9744       }
9745     } else { // If both operands are defined.
9746       // Target order is [8,15] if the current mask is between [0,7].
9747       TargetOrder =
9748           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9749       // Skip if mask of other elements don't equal our expected order.
9750       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9751         // We only need the last 3 bits for the number of shifts.
9752         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9753                          : BigEndianShifts[MaskOneElt & 0x7];
9754         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9755         Swap = MaskOneElt < NumHalfWords;
9756         FoundCandidate = true;
9757         break;
9758       }
9759     }
9760   }
9761 
9762   if (!FoundCandidate)
9763     return SDValue();
9764 
9765   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9766   // optionally with VECSHL if shift is required.
9767   if (Swap)
9768     std::swap(V1, V2);
9769   if (V2.isUndef())
9770     V2 = V1;
9771   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9772   if (ShiftElts) {
9773     // Double ShiftElts because we're left shifting on v16i8 type.
9774     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9775                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9776     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9777     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9778                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9779     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9780   }
9781   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9782   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9783                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9784   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9785 }
9786 
9787 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9788 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9789 /// return the default SDValue.
9790 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9791                                               SelectionDAG &DAG) const {
9792   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9793   // to v16i8. Peek through the bitcasts to get the actual operands.
9794   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9795   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9796 
9797   auto ShuffleMask = SVN->getMask();
9798   SDValue VecShuffle(SVN, 0);
9799   SDLoc DL(SVN);
9800 
9801   // Check that we have a four byte shuffle.
9802   if (!isNByteElemShuffleMask(SVN, 4, 1))
9803     return SDValue();
9804 
9805   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9806   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9807     std::swap(LHS, RHS);
9808     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9809     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9810   }
9811 
9812   // Ensure that the RHS is a vector of constants.
9813   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9814   if (!BVN)
9815     return SDValue();
9816 
9817   // Check if RHS is a splat of 4-bytes (or smaller).
9818   APInt APSplatValue, APSplatUndef;
9819   unsigned SplatBitSize;
9820   bool HasAnyUndefs;
9821   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9822                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9823       SplatBitSize > 32)
9824     return SDValue();
9825 
9826   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9827   // The instruction splats a constant C into two words of the source vector
9828   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9829   // Thus we check that the shuffle mask is the equivalent  of
9830   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9831   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9832   // within each word are consecutive, so we only need to check the first byte.
9833   SDValue Index;
9834   bool IsLE = Subtarget.isLittleEndian();
9835   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9836       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9837        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9838     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9839   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9840            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9841             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9842     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9843   else
9844     return SDValue();
9845 
9846   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9847   // for XXSPLTI32DX.
9848   unsigned SplatVal = APSplatValue.getZExtValue();
9849   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9850     SplatVal |= (SplatVal << SplatBitSize);
9851 
9852   SDValue SplatNode = DAG.getNode(
9853       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9854       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9855   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9856 }
9857 
9858 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9859 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9860 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9861 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9862 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9863   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9864   assert(Op.getValueType() == MVT::v1i128 &&
9865          "Only set v1i128 as custom, other type shouldn't reach here!");
9866   SDLoc dl(Op);
9867   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9868   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9869   unsigned SHLAmt = N1.getConstantOperandVal(0);
9870   if (SHLAmt % 8 == 0) {
9871     SmallVector<int, 16> Mask(16, 0);
9872     std::iota(Mask.begin(), Mask.end(), 0);
9873     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9874     if (SDValue Shuffle =
9875             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9876                                  DAG.getUNDEF(MVT::v16i8), Mask))
9877       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9878   }
9879   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9880   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9881                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9882   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9883                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9884   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9885   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9886 }
9887 
9888 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9889 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9890 /// return the code it can be lowered into.  Worst case, it can always be
9891 /// lowered into a vperm.
9892 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9893                                                SelectionDAG &DAG) const {
9894   SDLoc dl(Op);
9895   SDValue V1 = Op.getOperand(0);
9896   SDValue V2 = Op.getOperand(1);
9897   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9898 
9899   // Any nodes that were combined in the target-independent combiner prior
9900   // to vector legalization will not be sent to the target combine. Try to
9901   // combine it here.
9902   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9903     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9904       return NewShuffle;
9905     Op = NewShuffle;
9906     SVOp = cast<ShuffleVectorSDNode>(Op);
9907     V1 = Op.getOperand(0);
9908     V2 = Op.getOperand(1);
9909   }
9910   EVT VT = Op.getValueType();
9911   bool isLittleEndian = Subtarget.isLittleEndian();
9912 
9913   unsigned ShiftElts, InsertAtByte;
9914   bool Swap = false;
9915 
9916   // If this is a load-and-splat, we can do that with a single instruction
9917   // in some cases. However if the load has multiple uses, we don't want to
9918   // combine it because that will just produce multiple loads.
9919   const SDValue *InputLoad = getNormalLoadInput(V1);
9920   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9921       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9922       InputLoad->hasOneUse()) {
9923     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9924     int SplatIdx =
9925       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9926 
9927     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9928     // For 4-byte load-and-splat, we need Power9.
9929     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9930       uint64_t Offset = 0;
9931       if (IsFourByte)
9932         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9933       else
9934         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9935 
9936       // If we are loading a partial vector, it does not make sense to adjust
9937       // the base pointer. This happens with (splat (s_to_v_permuted (ld))).
9938       if (LD->getMemoryVT().getSizeInBits() == (IsFourByte ? 32 : 64))
9939         Offset = 0;
9940       SDValue BasePtr = LD->getBasePtr();
9941       if (Offset != 0)
9942         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9943                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9944       SDValue Ops[] = {
9945         LD->getChain(),    // Chain
9946         BasePtr,           // BasePtr
9947         DAG.getValueType(Op.getValueType()) // VT
9948       };
9949       SDVTList VTL =
9950         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9951       SDValue LdSplt =
9952         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9953                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9954       if (LdSplt.getValueType() != SVOp->getValueType(0))
9955         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9956       return LdSplt;
9957     }
9958   }
9959   if (Subtarget.hasP9Vector() &&
9960       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9961                            isLittleEndian)) {
9962     if (Swap)
9963       std::swap(V1, V2);
9964     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9965     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9966     if (ShiftElts) {
9967       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9968                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9969       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9970                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9971       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9972     }
9973     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9974                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9975     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9976   }
9977 
9978   if (Subtarget.hasPrefixInstrs()) {
9979     SDValue SplatInsertNode;
9980     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9981       return SplatInsertNode;
9982   }
9983 
9984   if (Subtarget.hasP9Altivec()) {
9985     SDValue NewISDNode;
9986     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9987       return NewISDNode;
9988 
9989     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9990       return NewISDNode;
9991   }
9992 
9993   if (Subtarget.hasVSX() &&
9994       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9995     if (Swap)
9996       std::swap(V1, V2);
9997     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9998     SDValue Conv2 =
9999         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
10000 
10001     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
10002                               DAG.getConstant(ShiftElts, dl, MVT::i32));
10003     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
10004   }
10005 
10006   if (Subtarget.hasVSX() &&
10007     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
10008     if (Swap)
10009       std::swap(V1, V2);
10010     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10011     SDValue Conv2 =
10012         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
10013 
10014     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
10015                               DAG.getConstant(ShiftElts, dl, MVT::i32));
10016     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
10017   }
10018 
10019   if (Subtarget.hasP9Vector()) {
10020      if (PPC::isXXBRHShuffleMask(SVOp)) {
10021       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
10022       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
10023       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
10024     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
10025       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10026       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
10027       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
10028     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
10029       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10030       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
10031       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
10032     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
10033       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
10034       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
10035       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
10036     }
10037   }
10038 
10039   if (Subtarget.hasVSX()) {
10040     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
10041       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
10042 
10043       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10044       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
10045                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
10046       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
10047     }
10048 
10049     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
10050     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
10051       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
10052       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
10053       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
10054     }
10055   }
10056 
10057   if (Subtarget.hasQPX()) {
10058     if (VT.getVectorNumElements() != 4)
10059       return SDValue();
10060 
10061     if (V2.isUndef()) V2 = V1;
10062 
10063     int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
10064     if (AlignIdx != -1) {
10065       return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
10066                          DAG.getConstant(AlignIdx, dl, MVT::i32));
10067     } else if (SVOp->isSplat()) {
10068       int SplatIdx = SVOp->getSplatIndex();
10069       if (SplatIdx >= 4) {
10070         std::swap(V1, V2);
10071         SplatIdx -= 4;
10072       }
10073 
10074       return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
10075                          DAG.getConstant(SplatIdx, dl, MVT::i32));
10076     }
10077 
10078     // Lower this into a qvgpci/qvfperm pair.
10079 
10080     // Compute the qvgpci literal
10081     unsigned idx = 0;
10082     for (unsigned i = 0; i < 4; ++i) {
10083       int m = SVOp->getMaskElt(i);
10084       unsigned mm = m >= 0 ? (unsigned) m : i;
10085       idx |= mm << (3-i)*3;
10086     }
10087 
10088     SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
10089                              DAG.getConstant(idx, dl, MVT::i32));
10090     return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
10091   }
10092 
10093   // Cases that are handled by instructions that take permute immediates
10094   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
10095   // selected by the instruction selector.
10096   if (V2.isUndef()) {
10097     if (PPC::isSplatShuffleMask(SVOp, 1) ||
10098         PPC::isSplatShuffleMask(SVOp, 2) ||
10099         PPC::isSplatShuffleMask(SVOp, 4) ||
10100         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
10101         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
10102         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
10103         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
10104         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
10105         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
10106         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
10107         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
10108         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
10109         (Subtarget.hasP8Altivec() && (
10110          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
10111          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
10112          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
10113       return Op;
10114     }
10115   }
10116 
10117   // Altivec has a variety of "shuffle immediates" that take two vector inputs
10118   // and produce a fixed permutation.  If any of these match, do not lower to
10119   // VPERM.
10120   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
10121   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10122       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10123       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
10124       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10125       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10126       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10127       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10128       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10129       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10130       (Subtarget.hasP8Altivec() && (
10131        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10132        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
10133        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
10134     return Op;
10135 
10136   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
10137   // perfect shuffle table to emit an optimal matching sequence.
10138   ArrayRef<int> PermMask = SVOp->getMask();
10139 
10140   unsigned PFIndexes[4];
10141   bool isFourElementShuffle = true;
10142   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
10143     unsigned EltNo = 8;   // Start out undef.
10144     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
10145       if (PermMask[i*4+j] < 0)
10146         continue;   // Undef, ignore it.
10147 
10148       unsigned ByteSource = PermMask[i*4+j];
10149       if ((ByteSource & 3) != j) {
10150         isFourElementShuffle = false;
10151         break;
10152       }
10153 
10154       if (EltNo == 8) {
10155         EltNo = ByteSource/4;
10156       } else if (EltNo != ByteSource/4) {
10157         isFourElementShuffle = false;
10158         break;
10159       }
10160     }
10161     PFIndexes[i] = EltNo;
10162   }
10163 
10164   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
10165   // perfect shuffle vector to determine if it is cost effective to do this as
10166   // discrete instructions, or whether we should use a vperm.
10167   // For now, we skip this for little endian until such time as we have a
10168   // little-endian perfect shuffle table.
10169   if (isFourElementShuffle && !isLittleEndian) {
10170     // Compute the index in the perfect shuffle table.
10171     unsigned PFTableIndex =
10172       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
10173 
10174     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10175     unsigned Cost  = (PFEntry >> 30);
10176 
10177     // Determining when to avoid vperm is tricky.  Many things affect the cost
10178     // of vperm, particularly how many times the perm mask needs to be computed.
10179     // For example, if the perm mask can be hoisted out of a loop or is already
10180     // used (perhaps because there are multiple permutes with the same shuffle
10181     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
10182     // the loop requires an extra register.
10183     //
10184     // As a compromise, we only emit discrete instructions if the shuffle can be
10185     // generated in 3 or fewer operations.  When we have loop information
10186     // available, if this block is within a loop, we should avoid using vperm
10187     // for 3-operation perms and use a constant pool load instead.
10188     if (Cost < 3)
10189       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
10190   }
10191 
10192   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
10193   // vector that will get spilled to the constant pool.
10194   if (V2.isUndef()) V2 = V1;
10195 
10196   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
10197   // that it is in input element units, not in bytes.  Convert now.
10198 
10199   // For little endian, the order of the input vectors is reversed, and
10200   // the permutation mask is complemented with respect to 31.  This is
10201   // necessary to produce proper semantics with the big-endian-biased vperm
10202   // instruction.
10203   EVT EltVT = V1.getValueType().getVectorElementType();
10204   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
10205 
10206   SmallVector<SDValue, 16> ResultMask;
10207   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
10208     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
10209 
10210     for (unsigned j = 0; j != BytesPerElement; ++j)
10211       if (isLittleEndian)
10212         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
10213                                              dl, MVT::i32));
10214       else
10215         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
10216                                              MVT::i32));
10217   }
10218 
10219   ShufflesHandledWithVPERM++;
10220   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
10221   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
10222   LLVM_DEBUG(SVOp->dump());
10223   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
10224   LLVM_DEBUG(VPermMask.dump());
10225 
10226   if (isLittleEndian)
10227     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10228                        V2, V1, VPermMask);
10229   else
10230     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10231                        V1, V2, VPermMask);
10232 }
10233 
10234 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
10235 /// vector comparison.  If it is, return true and fill in Opc/isDot with
10236 /// information about the intrinsic.
10237 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
10238                                  bool &isDot, const PPCSubtarget &Subtarget) {
10239   unsigned IntrinsicID =
10240       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
10241   CompareOpc = -1;
10242   isDot = false;
10243   switch (IntrinsicID) {
10244   default:
10245     return false;
10246   // Comparison predicates.
10247   case Intrinsic::ppc_altivec_vcmpbfp_p:
10248     CompareOpc = 966;
10249     isDot = true;
10250     break;
10251   case Intrinsic::ppc_altivec_vcmpeqfp_p:
10252     CompareOpc = 198;
10253     isDot = true;
10254     break;
10255   case Intrinsic::ppc_altivec_vcmpequb_p:
10256     CompareOpc = 6;
10257     isDot = true;
10258     break;
10259   case Intrinsic::ppc_altivec_vcmpequh_p:
10260     CompareOpc = 70;
10261     isDot = true;
10262     break;
10263   case Intrinsic::ppc_altivec_vcmpequw_p:
10264     CompareOpc = 134;
10265     isDot = true;
10266     break;
10267   case Intrinsic::ppc_altivec_vcmpequd_p:
10268     if (Subtarget.hasP8Altivec()) {
10269       CompareOpc = 199;
10270       isDot = true;
10271     } else
10272       return false;
10273     break;
10274   case Intrinsic::ppc_altivec_vcmpneb_p:
10275   case Intrinsic::ppc_altivec_vcmpneh_p:
10276   case Intrinsic::ppc_altivec_vcmpnew_p:
10277   case Intrinsic::ppc_altivec_vcmpnezb_p:
10278   case Intrinsic::ppc_altivec_vcmpnezh_p:
10279   case Intrinsic::ppc_altivec_vcmpnezw_p:
10280     if (Subtarget.hasP9Altivec()) {
10281       switch (IntrinsicID) {
10282       default:
10283         llvm_unreachable("Unknown comparison intrinsic.");
10284       case Intrinsic::ppc_altivec_vcmpneb_p:
10285         CompareOpc = 7;
10286         break;
10287       case Intrinsic::ppc_altivec_vcmpneh_p:
10288         CompareOpc = 71;
10289         break;
10290       case Intrinsic::ppc_altivec_vcmpnew_p:
10291         CompareOpc = 135;
10292         break;
10293       case Intrinsic::ppc_altivec_vcmpnezb_p:
10294         CompareOpc = 263;
10295         break;
10296       case Intrinsic::ppc_altivec_vcmpnezh_p:
10297         CompareOpc = 327;
10298         break;
10299       case Intrinsic::ppc_altivec_vcmpnezw_p:
10300         CompareOpc = 391;
10301         break;
10302       }
10303       isDot = true;
10304     } else
10305       return false;
10306     break;
10307   case Intrinsic::ppc_altivec_vcmpgefp_p:
10308     CompareOpc = 454;
10309     isDot = true;
10310     break;
10311   case Intrinsic::ppc_altivec_vcmpgtfp_p:
10312     CompareOpc = 710;
10313     isDot = true;
10314     break;
10315   case Intrinsic::ppc_altivec_vcmpgtsb_p:
10316     CompareOpc = 774;
10317     isDot = true;
10318     break;
10319   case Intrinsic::ppc_altivec_vcmpgtsh_p:
10320     CompareOpc = 838;
10321     isDot = true;
10322     break;
10323   case Intrinsic::ppc_altivec_vcmpgtsw_p:
10324     CompareOpc = 902;
10325     isDot = true;
10326     break;
10327   case Intrinsic::ppc_altivec_vcmpgtsd_p:
10328     if (Subtarget.hasP8Altivec()) {
10329       CompareOpc = 967;
10330       isDot = true;
10331     } else
10332       return false;
10333     break;
10334   case Intrinsic::ppc_altivec_vcmpgtub_p:
10335     CompareOpc = 518;
10336     isDot = true;
10337     break;
10338   case Intrinsic::ppc_altivec_vcmpgtuh_p:
10339     CompareOpc = 582;
10340     isDot = true;
10341     break;
10342   case Intrinsic::ppc_altivec_vcmpgtuw_p:
10343     CompareOpc = 646;
10344     isDot = true;
10345     break;
10346   case Intrinsic::ppc_altivec_vcmpgtud_p:
10347     if (Subtarget.hasP8Altivec()) {
10348       CompareOpc = 711;
10349       isDot = true;
10350     } else
10351       return false;
10352     break;
10353 
10354   // VSX predicate comparisons use the same infrastructure
10355   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10356   case Intrinsic::ppc_vsx_xvcmpgedp_p:
10357   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10358   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10359   case Intrinsic::ppc_vsx_xvcmpgesp_p:
10360   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10361     if (Subtarget.hasVSX()) {
10362       switch (IntrinsicID) {
10363       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10364         CompareOpc = 99;
10365         break;
10366       case Intrinsic::ppc_vsx_xvcmpgedp_p:
10367         CompareOpc = 115;
10368         break;
10369       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10370         CompareOpc = 107;
10371         break;
10372       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10373         CompareOpc = 67;
10374         break;
10375       case Intrinsic::ppc_vsx_xvcmpgesp_p:
10376         CompareOpc = 83;
10377         break;
10378       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10379         CompareOpc = 75;
10380         break;
10381       }
10382       isDot = true;
10383     } else
10384       return false;
10385     break;
10386 
10387   // Normal Comparisons.
10388   case Intrinsic::ppc_altivec_vcmpbfp:
10389     CompareOpc = 966;
10390     break;
10391   case Intrinsic::ppc_altivec_vcmpeqfp:
10392     CompareOpc = 198;
10393     break;
10394   case Intrinsic::ppc_altivec_vcmpequb:
10395     CompareOpc = 6;
10396     break;
10397   case Intrinsic::ppc_altivec_vcmpequh:
10398     CompareOpc = 70;
10399     break;
10400   case Intrinsic::ppc_altivec_vcmpequw:
10401     CompareOpc = 134;
10402     break;
10403   case Intrinsic::ppc_altivec_vcmpequd:
10404     if (Subtarget.hasP8Altivec())
10405       CompareOpc = 199;
10406     else
10407       return false;
10408     break;
10409   case Intrinsic::ppc_altivec_vcmpneb:
10410   case Intrinsic::ppc_altivec_vcmpneh:
10411   case Intrinsic::ppc_altivec_vcmpnew:
10412   case Intrinsic::ppc_altivec_vcmpnezb:
10413   case Intrinsic::ppc_altivec_vcmpnezh:
10414   case Intrinsic::ppc_altivec_vcmpnezw:
10415     if (Subtarget.hasP9Altivec())
10416       switch (IntrinsicID) {
10417       default:
10418         llvm_unreachable("Unknown comparison intrinsic.");
10419       case Intrinsic::ppc_altivec_vcmpneb:
10420         CompareOpc = 7;
10421         break;
10422       case Intrinsic::ppc_altivec_vcmpneh:
10423         CompareOpc = 71;
10424         break;
10425       case Intrinsic::ppc_altivec_vcmpnew:
10426         CompareOpc = 135;
10427         break;
10428       case Intrinsic::ppc_altivec_vcmpnezb:
10429         CompareOpc = 263;
10430         break;
10431       case Intrinsic::ppc_altivec_vcmpnezh:
10432         CompareOpc = 327;
10433         break;
10434       case Intrinsic::ppc_altivec_vcmpnezw:
10435         CompareOpc = 391;
10436         break;
10437       }
10438     else
10439       return false;
10440     break;
10441   case Intrinsic::ppc_altivec_vcmpgefp:
10442     CompareOpc = 454;
10443     break;
10444   case Intrinsic::ppc_altivec_vcmpgtfp:
10445     CompareOpc = 710;
10446     break;
10447   case Intrinsic::ppc_altivec_vcmpgtsb:
10448     CompareOpc = 774;
10449     break;
10450   case Intrinsic::ppc_altivec_vcmpgtsh:
10451     CompareOpc = 838;
10452     break;
10453   case Intrinsic::ppc_altivec_vcmpgtsw:
10454     CompareOpc = 902;
10455     break;
10456   case Intrinsic::ppc_altivec_vcmpgtsd:
10457     if (Subtarget.hasP8Altivec())
10458       CompareOpc = 967;
10459     else
10460       return false;
10461     break;
10462   case Intrinsic::ppc_altivec_vcmpgtub:
10463     CompareOpc = 518;
10464     break;
10465   case Intrinsic::ppc_altivec_vcmpgtuh:
10466     CompareOpc = 582;
10467     break;
10468   case Intrinsic::ppc_altivec_vcmpgtuw:
10469     CompareOpc = 646;
10470     break;
10471   case Intrinsic::ppc_altivec_vcmpgtud:
10472     if (Subtarget.hasP8Altivec())
10473       CompareOpc = 711;
10474     else
10475       return false;
10476     break;
10477   }
10478   return true;
10479 }
10480 
10481 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10482 /// lower, do it, otherwise return null.
10483 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10484                                                    SelectionDAG &DAG) const {
10485   unsigned IntrinsicID =
10486     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10487 
10488   SDLoc dl(Op);
10489 
10490   if (IntrinsicID == Intrinsic::thread_pointer) {
10491     // Reads the thread pointer register, used for __builtin_thread_pointer.
10492     if (Subtarget.isPPC64())
10493       return DAG.getRegister(PPC::X13, MVT::i64);
10494     return DAG.getRegister(PPC::R2, MVT::i32);
10495   }
10496 
10497   // If this is a lowered altivec predicate compare, CompareOpc is set to the
10498   // opcode number of the comparison.
10499   int CompareOpc;
10500   bool isDot;
10501   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10502     return SDValue();    // Don't custom lower most intrinsics.
10503 
10504   // If this is a non-dot comparison, make the VCMP node and we are done.
10505   if (!isDot) {
10506     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10507                               Op.getOperand(1), Op.getOperand(2),
10508                               DAG.getConstant(CompareOpc, dl, MVT::i32));
10509     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10510   }
10511 
10512   // Create the PPCISD altivec 'dot' comparison node.
10513   SDValue Ops[] = {
10514     Op.getOperand(2),  // LHS
10515     Op.getOperand(3),  // RHS
10516     DAG.getConstant(CompareOpc, dl, MVT::i32)
10517   };
10518   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10519   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
10520 
10521   // Now that we have the comparison, emit a copy from the CR to a GPR.
10522   // This is flagged to the above dot comparison.
10523   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10524                                 DAG.getRegister(PPC::CR6, MVT::i32),
10525                                 CompNode.getValue(1));
10526 
10527   // Unpack the result based on how the target uses it.
10528   unsigned BitNo;   // Bit # of CR6.
10529   bool InvertBit;   // Invert result?
10530   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10531   default:  // Can't happen, don't crash on invalid number though.
10532   case 0:   // Return the value of the EQ bit of CR6.
10533     BitNo = 0; InvertBit = false;
10534     break;
10535   case 1:   // Return the inverted value of the EQ bit of CR6.
10536     BitNo = 0; InvertBit = true;
10537     break;
10538   case 2:   // Return the value of the LT bit of CR6.
10539     BitNo = 2; InvertBit = false;
10540     break;
10541   case 3:   // Return the inverted value of the LT bit of CR6.
10542     BitNo = 2; InvertBit = true;
10543     break;
10544   }
10545 
10546   // Shift the bit into the low position.
10547   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10548                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10549   // Isolate the bit.
10550   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10551                       DAG.getConstant(1, dl, MVT::i32));
10552 
10553   // If we are supposed to, toggle the bit.
10554   if (InvertBit)
10555     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10556                         DAG.getConstant(1, dl, MVT::i32));
10557   return Flags;
10558 }
10559 
10560 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10561                                                SelectionDAG &DAG) const {
10562   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10563   // the beginning of the argument list.
10564   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10565   SDLoc DL(Op);
10566   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10567   case Intrinsic::ppc_cfence: {
10568     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10569     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10570     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10571                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10572                                                   Op.getOperand(ArgStart + 1)),
10573                                       Op.getOperand(0)),
10574                    0);
10575   }
10576   default:
10577     break;
10578   }
10579   return SDValue();
10580 }
10581 
10582 // Lower scalar BSWAP64 to xxbrd.
10583 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10584   SDLoc dl(Op);
10585   // MTVSRDD
10586   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10587                    Op.getOperand(0));
10588   // XXBRD
10589   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10590   // MFVSRD
10591   int VectorIndex = 0;
10592   if (Subtarget.isLittleEndian())
10593     VectorIndex = 1;
10594   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10595                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10596   return Op;
10597 }
10598 
10599 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10600 // compared to a value that is atomically loaded (atomic loads zero-extend).
10601 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10602                                                 SelectionDAG &DAG) const {
10603   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10604          "Expecting an atomic compare-and-swap here.");
10605   SDLoc dl(Op);
10606   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10607   EVT MemVT = AtomicNode->getMemoryVT();
10608   if (MemVT.getSizeInBits() >= 32)
10609     return Op;
10610 
10611   SDValue CmpOp = Op.getOperand(2);
10612   // If this is already correctly zero-extended, leave it alone.
10613   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10614   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10615     return Op;
10616 
10617   // Clear the high bits of the compare operand.
10618   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10619   SDValue NewCmpOp =
10620     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10621                 DAG.getConstant(MaskVal, dl, MVT::i32));
10622 
10623   // Replace the existing compare operand with the properly zero-extended one.
10624   SmallVector<SDValue, 4> Ops;
10625   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10626     Ops.push_back(AtomicNode->getOperand(i));
10627   Ops[2] = NewCmpOp;
10628   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10629   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10630   auto NodeTy =
10631     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10632   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10633 }
10634 
10635 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10636                                                  SelectionDAG &DAG) const {
10637   SDLoc dl(Op);
10638   // Create a stack slot that is 16-byte aligned.
10639   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10640   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10641   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10642   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10643 
10644   // Store the input value into Value#0 of the stack slot.
10645   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10646                                MachinePointerInfo());
10647   // Load it out.
10648   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10649 }
10650 
10651 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10652                                                   SelectionDAG &DAG) const {
10653   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10654          "Should only be called for ISD::INSERT_VECTOR_ELT");
10655 
10656   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10657   // We have legal lowering for constant indices but not for variable ones.
10658   if (!C)
10659     return SDValue();
10660 
10661   EVT VT = Op.getValueType();
10662   SDLoc dl(Op);
10663   SDValue V1 = Op.getOperand(0);
10664   SDValue V2 = Op.getOperand(1);
10665   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10666   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10667     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10668     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10669     unsigned InsertAtElement = C->getZExtValue();
10670     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10671     if (Subtarget.isLittleEndian()) {
10672       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10673     }
10674     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10675                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10676   }
10677   return Op;
10678 }
10679 
10680 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
10681                                                    SelectionDAG &DAG) const {
10682   SDLoc dl(Op);
10683   SDNode *N = Op.getNode();
10684 
10685   assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
10686          "Unknown extract_vector_elt type");
10687 
10688   SDValue Value = N->getOperand(0);
10689 
10690   // The first part of this is like the store lowering except that we don't
10691   // need to track the chain.
10692 
10693   // The values are now known to be -1 (false) or 1 (true). To convert this
10694   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10695   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10696   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10697 
10698   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10699   // understand how to form the extending load.
10700   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10701 
10702   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10703 
10704   // Now convert to an integer and store.
10705   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10706     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10707     Value);
10708 
10709   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10710   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10711   MachinePointerInfo PtrInfo =
10712       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10713   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10714   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10715 
10716   SDValue StoreChain = DAG.getEntryNode();
10717   SDValue Ops[] = {StoreChain,
10718                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10719                    Value, FIdx};
10720   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10721 
10722   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10723     dl, VTs, Ops, MVT::v4i32, PtrInfo);
10724 
10725   // Extract the value requested.
10726   unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10727   SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10728   Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10729 
10730   SDValue IntVal =
10731       DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
10732 
10733   if (!Subtarget.useCRBits())
10734     return IntVal;
10735 
10736   return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
10737 }
10738 
10739 /// Lowering for QPX v4i1 loads
10740 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10741                                            SelectionDAG &DAG) const {
10742   SDLoc dl(Op);
10743   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10744   SDValue LoadChain = LN->getChain();
10745   SDValue BasePtr = LN->getBasePtr();
10746 
10747   if (Op.getValueType() == MVT::v4f64 ||
10748       Op.getValueType() == MVT::v4f32) {
10749     EVT MemVT = LN->getMemoryVT();
10750     unsigned Alignment = LN->getAlignment();
10751 
10752     // If this load is properly aligned, then it is legal.
10753     if (Alignment >= MemVT.getStoreSize())
10754       return Op;
10755 
10756     EVT ScalarVT = Op.getValueType().getScalarType(),
10757         ScalarMemVT = MemVT.getScalarType();
10758     unsigned Stride = ScalarMemVT.getStoreSize();
10759 
10760     SDValue Vals[4], LoadChains[4];
10761     for (unsigned Idx = 0; Idx < 4; ++Idx) {
10762       SDValue Load;
10763       if (ScalarVT != ScalarMemVT)
10764         Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
10765                               BasePtr,
10766                               LN->getPointerInfo().getWithOffset(Idx * Stride),
10767                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10768                               LN->getMemOperand()->getFlags(), LN->getAAInfo());
10769       else
10770         Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
10771                            LN->getPointerInfo().getWithOffset(Idx * Stride),
10772                            MinAlign(Alignment, Idx * Stride),
10773                            LN->getMemOperand()->getFlags(), LN->getAAInfo());
10774 
10775       if (Idx == 0 && LN->isIndexed()) {
10776         assert(LN->getAddressingMode() == ISD::PRE_INC &&
10777                "Unknown addressing mode on vector load");
10778         Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
10779                                   LN->getAddressingMode());
10780       }
10781 
10782       Vals[Idx] = Load;
10783       LoadChains[Idx] = Load.getValue(1);
10784 
10785       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10786                             DAG.getConstant(Stride, dl,
10787                                             BasePtr.getValueType()));
10788     }
10789 
10790     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10791     SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
10792 
10793     if (LN->isIndexed()) {
10794       SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
10795       return DAG.getMergeValues(RetOps, dl);
10796     }
10797 
10798     SDValue RetOps[] = { Value, TF };
10799     return DAG.getMergeValues(RetOps, dl);
10800   }
10801 
10802   assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
10803   assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
10804 
10805   // To lower v4i1 from a byte array, we load the byte elements of the
10806   // vector and then reuse the BUILD_VECTOR logic.
10807 
10808   SDValue VectElmts[4], VectElmtChains[4];
10809   for (unsigned i = 0; i < 4; ++i) {
10810     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10811     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10812 
10813     VectElmts[i] = DAG.getExtLoad(
10814         ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
10815         LN->getPointerInfo().getWithOffset(i), MVT::i8,
10816         /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
10817     VectElmtChains[i] = VectElmts[i].getValue(1);
10818   }
10819 
10820   LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
10821   SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
10822 
10823   SDValue RVals[] = { Value, LoadChain };
10824   return DAG.getMergeValues(RVals, dl);
10825 }
10826 
10827 /// Lowering for QPX v4i1 stores
10828 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10829                                             SelectionDAG &DAG) const {
10830   SDLoc dl(Op);
10831   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10832   SDValue StoreChain = SN->getChain();
10833   SDValue BasePtr = SN->getBasePtr();
10834   SDValue Value = SN->getValue();
10835 
10836   if (Value.getValueType() == MVT::v4f64 ||
10837       Value.getValueType() == MVT::v4f32) {
10838     EVT MemVT = SN->getMemoryVT();
10839     unsigned Alignment = SN->getAlignment();
10840 
10841     // If this store is properly aligned, then it is legal.
10842     if (Alignment >= MemVT.getStoreSize())
10843       return Op;
10844 
10845     EVT ScalarVT = Value.getValueType().getScalarType(),
10846         ScalarMemVT = MemVT.getScalarType();
10847     unsigned Stride = ScalarMemVT.getStoreSize();
10848 
10849     SDValue Stores[4];
10850     for (unsigned Idx = 0; Idx < 4; ++Idx) {
10851       SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
10852                                DAG.getVectorIdxConstant(Idx, dl));
10853       SDValue Store;
10854       if (ScalarVT != ScalarMemVT)
10855         Store =
10856             DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
10857                               SN->getPointerInfo().getWithOffset(Idx * Stride),
10858                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10859                               SN->getMemOperand()->getFlags(), SN->getAAInfo());
10860       else
10861         Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
10862                              SN->getPointerInfo().getWithOffset(Idx * Stride),
10863                              MinAlign(Alignment, Idx * Stride),
10864                              SN->getMemOperand()->getFlags(), SN->getAAInfo());
10865 
10866       if (Idx == 0 && SN->isIndexed()) {
10867         assert(SN->getAddressingMode() == ISD::PRE_INC &&
10868                "Unknown addressing mode on vector store");
10869         Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
10870                                     SN->getAddressingMode());
10871       }
10872 
10873       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10874                             DAG.getConstant(Stride, dl,
10875                                             BasePtr.getValueType()));
10876       Stores[Idx] = Store;
10877     }
10878 
10879     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10880 
10881     if (SN->isIndexed()) {
10882       SDValue RetOps[] = { TF, Stores[0].getValue(1) };
10883       return DAG.getMergeValues(RetOps, dl);
10884     }
10885 
10886     return TF;
10887   }
10888 
10889   assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
10890   assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
10891 
10892   // The values are now known to be -1 (false) or 1 (true). To convert this
10893   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10894   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10895   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10896 
10897   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10898   // understand how to form the extending load.
10899   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10900 
10901   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10902 
10903   // Now convert to an integer and store.
10904   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10905     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10906     Value);
10907 
10908   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10909   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10910   MachinePointerInfo PtrInfo =
10911       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10912   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10913   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10914 
10915   SDValue Ops[] = {StoreChain,
10916                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10917                    Value, FIdx};
10918   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10919 
10920   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10921     dl, VTs, Ops, MVT::v4i32, PtrInfo);
10922 
10923   // Move data into the byte array.
10924   SDValue Loads[4], LoadChains[4];
10925   for (unsigned i = 0; i < 4; ++i) {
10926     unsigned Offset = 4*i;
10927     SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10928     Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10929 
10930     Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
10931                            PtrInfo.getWithOffset(Offset));
10932     LoadChains[i] = Loads[i].getValue(1);
10933   }
10934 
10935   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10936 
10937   SDValue Stores[4];
10938   for (unsigned i = 0; i < 4; ++i) {
10939     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10940     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10941 
10942     Stores[i] = DAG.getTruncStore(
10943         StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
10944         MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
10945         SN->getAAInfo());
10946   }
10947 
10948   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10949 
10950   return StoreChain;
10951 }
10952 
10953 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10954   SDLoc dl(Op);
10955   if (Op.getValueType() == MVT::v4i32) {
10956     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10957 
10958     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10959     // +16 as shift amt.
10960     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10961     SDValue RHSSwap =   // = vrlw RHS, 16
10962       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10963 
10964     // Shrinkify inputs to v8i16.
10965     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10966     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10967     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10968 
10969     // Low parts multiplied together, generating 32-bit results (we ignore the
10970     // top parts).
10971     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10972                                         LHS, RHS, DAG, dl, MVT::v4i32);
10973 
10974     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10975                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10976     // Shift the high parts up 16 bits.
10977     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10978                               Neg16, DAG, dl);
10979     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10980   } else if (Op.getValueType() == MVT::v16i8) {
10981     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10982     bool isLittleEndian = Subtarget.isLittleEndian();
10983 
10984     // Multiply the even 8-bit parts, producing 16-bit sums.
10985     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10986                                            LHS, RHS, DAG, dl, MVT::v8i16);
10987     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10988 
10989     // Multiply the odd 8-bit parts, producing 16-bit sums.
10990     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10991                                           LHS, RHS, DAG, dl, MVT::v8i16);
10992     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10993 
10994     // Merge the results together.  Because vmuleub and vmuloub are
10995     // instructions with a big-endian bias, we must reverse the
10996     // element numbering and reverse the meaning of "odd" and "even"
10997     // when generating little endian code.
10998     int Ops[16];
10999     for (unsigned i = 0; i != 8; ++i) {
11000       if (isLittleEndian) {
11001         Ops[i*2  ] = 2*i;
11002         Ops[i*2+1] = 2*i+16;
11003       } else {
11004         Ops[i*2  ] = 2*i+1;
11005         Ops[i*2+1] = 2*i+1+16;
11006       }
11007     }
11008     if (isLittleEndian)
11009       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
11010     else
11011       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
11012   } else {
11013     llvm_unreachable("Unknown mul to lower!");
11014   }
11015 }
11016 
11017 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
11018 
11019   assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
11020 
11021   EVT VT = Op.getValueType();
11022   assert(VT.isVector() &&
11023          "Only set vector abs as custom, scalar abs shouldn't reach here!");
11024   assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
11025           VT == MVT::v16i8) &&
11026          "Unexpected vector element type!");
11027   assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
11028          "Current subtarget doesn't support smax v2i64!");
11029 
11030   // For vector abs, it can be lowered to:
11031   // abs x
11032   // ==>
11033   // y = -x
11034   // smax(x, y)
11035 
11036   SDLoc dl(Op);
11037   SDValue X = Op.getOperand(0);
11038   SDValue Zero = DAG.getConstant(0, dl, VT);
11039   SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
11040 
11041   // SMAX patch https://reviews.llvm.org/D47332
11042   // hasn't landed yet, so use intrinsic first here.
11043   // TODO: Should use SMAX directly once SMAX patch landed
11044   Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
11045   if (VT == MVT::v2i64)
11046     BifID = Intrinsic::ppc_altivec_vmaxsd;
11047   else if (VT == MVT::v8i16)
11048     BifID = Intrinsic::ppc_altivec_vmaxsh;
11049   else if (VT == MVT::v16i8)
11050     BifID = Intrinsic::ppc_altivec_vmaxsb;
11051 
11052   return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
11053 }
11054 
11055 // Custom lowering for fpext vf32 to v2f64
11056 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
11057 
11058   assert(Op.getOpcode() == ISD::FP_EXTEND &&
11059          "Should only be called for ISD::FP_EXTEND");
11060 
11061   // FIXME: handle extends from half precision float vectors on P9.
11062   // We only want to custom lower an extend from v2f32 to v2f64.
11063   if (Op.getValueType() != MVT::v2f64 ||
11064       Op.getOperand(0).getValueType() != MVT::v2f32)
11065     return SDValue();
11066 
11067   SDLoc dl(Op);
11068   SDValue Op0 = Op.getOperand(0);
11069 
11070   switch (Op0.getOpcode()) {
11071   default:
11072     return SDValue();
11073   case ISD::EXTRACT_SUBVECTOR: {
11074     assert(Op0.getNumOperands() == 2 &&
11075            isa<ConstantSDNode>(Op0->getOperand(1)) &&
11076            "Node should have 2 operands with second one being a constant!");
11077 
11078     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
11079       return SDValue();
11080 
11081     // Custom lower is only done for high or low doubleword.
11082     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
11083     if (Idx % 2 != 0)
11084       return SDValue();
11085 
11086     // Since input is v4f32, at this point Idx is either 0 or 2.
11087     // Shift to get the doubleword position we want.
11088     int DWord = Idx >> 1;
11089 
11090     // High and low word positions are different on little endian.
11091     if (Subtarget.isLittleEndian())
11092       DWord ^= 0x1;
11093 
11094     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
11095                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
11096   }
11097   case ISD::FADD:
11098   case ISD::FMUL:
11099   case ISD::FSUB: {
11100     SDValue NewLoad[2];
11101     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
11102       // Ensure both input are loads.
11103       SDValue LdOp = Op0.getOperand(i);
11104       if (LdOp.getOpcode() != ISD::LOAD)
11105         return SDValue();
11106       // Generate new load node.
11107       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
11108       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11109       NewLoad[i] = DAG.getMemIntrinsicNode(
11110           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11111           LD->getMemoryVT(), LD->getMemOperand());
11112     }
11113     SDValue NewOp =
11114         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
11115                     NewLoad[1], Op0.getNode()->getFlags());
11116     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
11117                        DAG.getConstant(0, dl, MVT::i32));
11118   }
11119   case ISD::LOAD: {
11120     LoadSDNode *LD = cast<LoadSDNode>(Op0);
11121     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11122     SDValue NewLd = DAG.getMemIntrinsicNode(
11123         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11124         LD->getMemoryVT(), LD->getMemOperand());
11125     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
11126                        DAG.getConstant(0, dl, MVT::i32));
11127   }
11128   }
11129   llvm_unreachable("ERROR:Should return for all cases within swtich.");
11130 }
11131 
11132 /// LowerOperation - Provide custom lowering hooks for some operations.
11133 ///
11134 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
11135   switch (Op.getOpcode()) {
11136   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
11137   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
11138   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
11139   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
11140   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
11141   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
11142   case ISD::SETCC:              return LowerSETCC(Op, DAG);
11143   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
11144   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
11145 
11146   // Variable argument lowering.
11147   case ISD::VASTART:            return LowerVASTART(Op, DAG);
11148   case ISD::VAARG:              return LowerVAARG(Op, DAG);
11149   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
11150 
11151   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
11152   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
11153   case ISD::GET_DYNAMIC_AREA_OFFSET:
11154     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
11155 
11156   // Exception handling lowering.
11157   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
11158   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
11159   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
11160 
11161   case ISD::LOAD:               return LowerLOAD(Op, DAG);
11162   case ISD::STORE:              return LowerSTORE(Op, DAG);
11163   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
11164   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
11165   case ISD::FP_TO_UINT:
11166   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
11167   case ISD::UINT_TO_FP:
11168   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
11169   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
11170 
11171   // Lower 64-bit shifts.
11172   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
11173   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
11174   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
11175 
11176   // Vector-related lowering.
11177   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
11178   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
11179   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
11180   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
11181   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
11182   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
11183   case ISD::MUL:                return LowerMUL(Op, DAG);
11184   case ISD::ABS:                return LowerABS(Op, DAG);
11185   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
11186   case ISD::ROTL:               return LowerROTL(Op, DAG);
11187 
11188   // For counter-based loop handling.
11189   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
11190 
11191   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
11192 
11193   // Frame & Return address.
11194   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
11195   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
11196 
11197   case ISD::INTRINSIC_VOID:
11198     return LowerINTRINSIC_VOID(Op, DAG);
11199   case ISD::BSWAP:
11200     return LowerBSWAP(Op, DAG);
11201   case ISD::ATOMIC_CMP_SWAP:
11202     return LowerATOMIC_CMP_SWAP(Op, DAG);
11203   }
11204 }
11205 
11206 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
11207                                            SmallVectorImpl<SDValue>&Results,
11208                                            SelectionDAG &DAG) const {
11209   SDLoc dl(N);
11210   switch (N->getOpcode()) {
11211   default:
11212     llvm_unreachable("Do not know how to custom type legalize this operation!");
11213   case ISD::READCYCLECOUNTER: {
11214     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
11215     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
11216 
11217     Results.push_back(
11218         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
11219     Results.push_back(RTB.getValue(2));
11220     break;
11221   }
11222   case ISD::INTRINSIC_W_CHAIN: {
11223     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
11224         Intrinsic::loop_decrement)
11225       break;
11226 
11227     assert(N->getValueType(0) == MVT::i1 &&
11228            "Unexpected result type for CTR decrement intrinsic");
11229     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
11230                                  N->getValueType(0));
11231     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
11232     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
11233                                  N->getOperand(1));
11234 
11235     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
11236     Results.push_back(NewInt.getValue(1));
11237     break;
11238   }
11239   case ISD::VAARG: {
11240     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
11241       return;
11242 
11243     EVT VT = N->getValueType(0);
11244 
11245     if (VT == MVT::i64) {
11246       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
11247 
11248       Results.push_back(NewNode);
11249       Results.push_back(NewNode.getValue(1));
11250     }
11251     return;
11252   }
11253   case ISD::FP_TO_SINT:
11254   case ISD::FP_TO_UINT:
11255     // LowerFP_TO_INT() can only handle f32 and f64.
11256     if (N->getOperand(0).getValueType() == MVT::ppcf128)
11257       return;
11258     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
11259     return;
11260   case ISD::TRUNCATE: {
11261     EVT TrgVT = N->getValueType(0);
11262     EVT OpVT = N->getOperand(0).getValueType();
11263     if (TrgVT.isVector() &&
11264         isOperationCustom(N->getOpcode(), TrgVT) &&
11265         OpVT.getSizeInBits() <= 128 &&
11266         isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits()))
11267       Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG));
11268     return;
11269   }
11270   case ISD::BITCAST:
11271     // Don't handle bitcast here.
11272     return;
11273   case ISD::FP_EXTEND:
11274     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
11275     if (Lowered)
11276       Results.push_back(Lowered);
11277     return;
11278   }
11279 }
11280 
11281 //===----------------------------------------------------------------------===//
11282 //  Other Lowering Code
11283 //===----------------------------------------------------------------------===//
11284 
11285 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
11286   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
11287   Function *Func = Intrinsic::getDeclaration(M, Id);
11288   return Builder.CreateCall(Func, {});
11289 }
11290 
11291 // The mappings for emitLeading/TrailingFence is taken from
11292 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
11293 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
11294                                                  Instruction *Inst,
11295                                                  AtomicOrdering Ord) const {
11296   if (Ord == AtomicOrdering::SequentiallyConsistent)
11297     return callIntrinsic(Builder, Intrinsic::ppc_sync);
11298   if (isReleaseOrStronger(Ord))
11299     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11300   return nullptr;
11301 }
11302 
11303 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
11304                                                   Instruction *Inst,
11305                                                   AtomicOrdering Ord) const {
11306   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
11307     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
11308     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
11309     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
11310     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
11311       return Builder.CreateCall(
11312           Intrinsic::getDeclaration(
11313               Builder.GetInsertBlock()->getParent()->getParent(),
11314               Intrinsic::ppc_cfence, {Inst->getType()}),
11315           {Inst});
11316     // FIXME: Can use isync for rmw operation.
11317     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11318   }
11319   return nullptr;
11320 }
11321 
11322 MachineBasicBlock *
11323 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
11324                                     unsigned AtomicSize,
11325                                     unsigned BinOpcode,
11326                                     unsigned CmpOpcode,
11327                                     unsigned CmpPred) const {
11328   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11329   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11330 
11331   auto LoadMnemonic = PPC::LDARX;
11332   auto StoreMnemonic = PPC::STDCX;
11333   switch (AtomicSize) {
11334   default:
11335     llvm_unreachable("Unexpected size of atomic entity");
11336   case 1:
11337     LoadMnemonic = PPC::LBARX;
11338     StoreMnemonic = PPC::STBCX;
11339     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11340     break;
11341   case 2:
11342     LoadMnemonic = PPC::LHARX;
11343     StoreMnemonic = PPC::STHCX;
11344     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11345     break;
11346   case 4:
11347     LoadMnemonic = PPC::LWARX;
11348     StoreMnemonic = PPC::STWCX;
11349     break;
11350   case 8:
11351     LoadMnemonic = PPC::LDARX;
11352     StoreMnemonic = PPC::STDCX;
11353     break;
11354   }
11355 
11356   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11357   MachineFunction *F = BB->getParent();
11358   MachineFunction::iterator It = ++BB->getIterator();
11359 
11360   Register dest = MI.getOperand(0).getReg();
11361   Register ptrA = MI.getOperand(1).getReg();
11362   Register ptrB = MI.getOperand(2).getReg();
11363   Register incr = MI.getOperand(3).getReg();
11364   DebugLoc dl = MI.getDebugLoc();
11365 
11366   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11367   MachineBasicBlock *loop2MBB =
11368     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11369   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11370   F->insert(It, loopMBB);
11371   if (CmpOpcode)
11372     F->insert(It, loop2MBB);
11373   F->insert(It, exitMBB);
11374   exitMBB->splice(exitMBB->begin(), BB,
11375                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11376   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11377 
11378   MachineRegisterInfo &RegInfo = F->getRegInfo();
11379   Register TmpReg = (!BinOpcode) ? incr :
11380     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
11381                                            : &PPC::GPRCRegClass);
11382 
11383   //  thisMBB:
11384   //   ...
11385   //   fallthrough --> loopMBB
11386   BB->addSuccessor(loopMBB);
11387 
11388   //  loopMBB:
11389   //   l[wd]arx dest, ptr
11390   //   add r0, dest, incr
11391   //   st[wd]cx. r0, ptr
11392   //   bne- loopMBB
11393   //   fallthrough --> exitMBB
11394 
11395   // For max/min...
11396   //  loopMBB:
11397   //   l[wd]arx dest, ptr
11398   //   cmpl?[wd] incr, dest
11399   //   bgt exitMBB
11400   //  loop2MBB:
11401   //   st[wd]cx. dest, ptr
11402   //   bne- loopMBB
11403   //   fallthrough --> exitMBB
11404 
11405   BB = loopMBB;
11406   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
11407     .addReg(ptrA).addReg(ptrB);
11408   if (BinOpcode)
11409     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
11410   if (CmpOpcode) {
11411     // Signed comparisons of byte or halfword values must be sign-extended.
11412     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
11413       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11414       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
11415               ExtReg).addReg(dest);
11416       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11417         .addReg(incr).addReg(ExtReg);
11418     } else
11419       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11420         .addReg(incr).addReg(dest);
11421 
11422     BuildMI(BB, dl, TII->get(PPC::BCC))
11423       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
11424     BB->addSuccessor(loop2MBB);
11425     BB->addSuccessor(exitMBB);
11426     BB = loop2MBB;
11427   }
11428   BuildMI(BB, dl, TII->get(StoreMnemonic))
11429     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
11430   BuildMI(BB, dl, TII->get(PPC::BCC))
11431     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
11432   BB->addSuccessor(loopMBB);
11433   BB->addSuccessor(exitMBB);
11434 
11435   //  exitMBB:
11436   //   ...
11437   BB = exitMBB;
11438   return BB;
11439 }
11440 
11441 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
11442     MachineInstr &MI, MachineBasicBlock *BB,
11443     bool is8bit, // operation
11444     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11445   // If we support part-word atomic mnemonics, just use them
11446   if (Subtarget.hasPartwordAtomics())
11447     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11448                             CmpPred);
11449 
11450   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11451   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11452   // In 64 bit mode we have to use 64 bits for addresses, even though the
11453   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
11454   // registers without caring whether they're 32 or 64, but here we're
11455   // doing actual arithmetic on the addresses.
11456   bool is64bit = Subtarget.isPPC64();
11457   bool isLittleEndian = Subtarget.isLittleEndian();
11458   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11459 
11460   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11461   MachineFunction *F = BB->getParent();
11462   MachineFunction::iterator It = ++BB->getIterator();
11463 
11464   Register dest = MI.getOperand(0).getReg();
11465   Register ptrA = MI.getOperand(1).getReg();
11466   Register ptrB = MI.getOperand(2).getReg();
11467   Register incr = MI.getOperand(3).getReg();
11468   DebugLoc dl = MI.getDebugLoc();
11469 
11470   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11471   MachineBasicBlock *loop2MBB =
11472       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11473   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11474   F->insert(It, loopMBB);
11475   if (CmpOpcode)
11476     F->insert(It, loop2MBB);
11477   F->insert(It, exitMBB);
11478   exitMBB->splice(exitMBB->begin(), BB,
11479                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11480   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11481 
11482   MachineRegisterInfo &RegInfo = F->getRegInfo();
11483   const TargetRegisterClass *RC =
11484       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11485   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11486 
11487   Register PtrReg = RegInfo.createVirtualRegister(RC);
11488   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11489   Register ShiftReg =
11490       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11491   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11492   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11493   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11494   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11495   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11496   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11497   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11498   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11499   Register Ptr1Reg;
11500   Register TmpReg =
11501       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11502 
11503   //  thisMBB:
11504   //   ...
11505   //   fallthrough --> loopMBB
11506   BB->addSuccessor(loopMBB);
11507 
11508   // The 4-byte load must be aligned, while a char or short may be
11509   // anywhere in the word.  Hence all this nasty bookkeeping code.
11510   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11511   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11512   //   xori shift, shift1, 24 [16]
11513   //   rlwinm ptr, ptr1, 0, 0, 29
11514   //   slw incr2, incr, shift
11515   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11516   //   slw mask, mask2, shift
11517   //  loopMBB:
11518   //   lwarx tmpDest, ptr
11519   //   add tmp, tmpDest, incr2
11520   //   andc tmp2, tmpDest, mask
11521   //   and tmp3, tmp, mask
11522   //   or tmp4, tmp3, tmp2
11523   //   stwcx. tmp4, ptr
11524   //   bne- loopMBB
11525   //   fallthrough --> exitMBB
11526   //   srw dest, tmpDest, shift
11527   if (ptrA != ZeroReg) {
11528     Ptr1Reg = RegInfo.createVirtualRegister(RC);
11529     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11530         .addReg(ptrA)
11531         .addReg(ptrB);
11532   } else {
11533     Ptr1Reg = ptrB;
11534   }
11535   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11536   // mode.
11537   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11538       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11539       .addImm(3)
11540       .addImm(27)
11541       .addImm(is8bit ? 28 : 27);
11542   if (!isLittleEndian)
11543     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11544         .addReg(Shift1Reg)
11545         .addImm(is8bit ? 24 : 16);
11546   if (is64bit)
11547     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11548         .addReg(Ptr1Reg)
11549         .addImm(0)
11550         .addImm(61);
11551   else
11552     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11553         .addReg(Ptr1Reg)
11554         .addImm(0)
11555         .addImm(0)
11556         .addImm(29);
11557   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11558   if (is8bit)
11559     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11560   else {
11561     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11562     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11563         .addReg(Mask3Reg)
11564         .addImm(65535);
11565   }
11566   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11567       .addReg(Mask2Reg)
11568       .addReg(ShiftReg);
11569 
11570   BB = loopMBB;
11571   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11572       .addReg(ZeroReg)
11573       .addReg(PtrReg);
11574   if (BinOpcode)
11575     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11576         .addReg(Incr2Reg)
11577         .addReg(TmpDestReg);
11578   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11579       .addReg(TmpDestReg)
11580       .addReg(MaskReg);
11581   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11582   if (CmpOpcode) {
11583     // For unsigned comparisons, we can directly compare the shifted values.
11584     // For signed comparisons we shift and sign extend.
11585     Register SReg = RegInfo.createVirtualRegister(GPRC);
11586     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11587         .addReg(TmpDestReg)
11588         .addReg(MaskReg);
11589     unsigned ValueReg = SReg;
11590     unsigned CmpReg = Incr2Reg;
11591     if (CmpOpcode == PPC::CMPW) {
11592       ValueReg = RegInfo.createVirtualRegister(GPRC);
11593       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11594           .addReg(SReg)
11595           .addReg(ShiftReg);
11596       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11597       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11598           .addReg(ValueReg);
11599       ValueReg = ValueSReg;
11600       CmpReg = incr;
11601     }
11602     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11603         .addReg(CmpReg)
11604         .addReg(ValueReg);
11605     BuildMI(BB, dl, TII->get(PPC::BCC))
11606         .addImm(CmpPred)
11607         .addReg(PPC::CR0)
11608         .addMBB(exitMBB);
11609     BB->addSuccessor(loop2MBB);
11610     BB->addSuccessor(exitMBB);
11611     BB = loop2MBB;
11612   }
11613   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11614   BuildMI(BB, dl, TII->get(PPC::STWCX))
11615       .addReg(Tmp4Reg)
11616       .addReg(ZeroReg)
11617       .addReg(PtrReg);
11618   BuildMI(BB, dl, TII->get(PPC::BCC))
11619       .addImm(PPC::PRED_NE)
11620       .addReg(PPC::CR0)
11621       .addMBB(loopMBB);
11622   BB->addSuccessor(loopMBB);
11623   BB->addSuccessor(exitMBB);
11624 
11625   //  exitMBB:
11626   //   ...
11627   BB = exitMBB;
11628   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11629       .addReg(TmpDestReg)
11630       .addReg(ShiftReg);
11631   return BB;
11632 }
11633 
11634 llvm::MachineBasicBlock *
11635 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11636                                     MachineBasicBlock *MBB) const {
11637   DebugLoc DL = MI.getDebugLoc();
11638   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11639   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11640 
11641   MachineFunction *MF = MBB->getParent();
11642   MachineRegisterInfo &MRI = MF->getRegInfo();
11643 
11644   const BasicBlock *BB = MBB->getBasicBlock();
11645   MachineFunction::iterator I = ++MBB->getIterator();
11646 
11647   Register DstReg = MI.getOperand(0).getReg();
11648   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11649   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11650   Register mainDstReg = MRI.createVirtualRegister(RC);
11651   Register restoreDstReg = MRI.createVirtualRegister(RC);
11652 
11653   MVT PVT = getPointerTy(MF->getDataLayout());
11654   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11655          "Invalid Pointer Size!");
11656   // For v = setjmp(buf), we generate
11657   //
11658   // thisMBB:
11659   //  SjLjSetup mainMBB
11660   //  bl mainMBB
11661   //  v_restore = 1
11662   //  b sinkMBB
11663   //
11664   // mainMBB:
11665   //  buf[LabelOffset] = LR
11666   //  v_main = 0
11667   //
11668   // sinkMBB:
11669   //  v = phi(main, restore)
11670   //
11671 
11672   MachineBasicBlock *thisMBB = MBB;
11673   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11674   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11675   MF->insert(I, mainMBB);
11676   MF->insert(I, sinkMBB);
11677 
11678   MachineInstrBuilder MIB;
11679 
11680   // Transfer the remainder of BB and its successor edges to sinkMBB.
11681   sinkMBB->splice(sinkMBB->begin(), MBB,
11682                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11683   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11684 
11685   // Note that the structure of the jmp_buf used here is not compatible
11686   // with that used by libc, and is not designed to be. Specifically, it
11687   // stores only those 'reserved' registers that LLVM does not otherwise
11688   // understand how to spill. Also, by convention, by the time this
11689   // intrinsic is called, Clang has already stored the frame address in the
11690   // first slot of the buffer and stack address in the third. Following the
11691   // X86 target code, we'll store the jump address in the second slot. We also
11692   // need to save the TOC pointer (R2) to handle jumps between shared
11693   // libraries, and that will be stored in the fourth slot. The thread
11694   // identifier (R13) is not affected.
11695 
11696   // thisMBB:
11697   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11698   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11699   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11700 
11701   // Prepare IP either in reg.
11702   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11703   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11704   Register BufReg = MI.getOperand(1).getReg();
11705 
11706   if (Subtarget.is64BitELFABI()) {
11707     setUsesTOCBasePtr(*MBB->getParent());
11708     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11709               .addReg(PPC::X2)
11710               .addImm(TOCOffset)
11711               .addReg(BufReg)
11712               .cloneMemRefs(MI);
11713   }
11714 
11715   // Naked functions never have a base pointer, and so we use r1. For all
11716   // other functions, this decision must be delayed until during PEI.
11717   unsigned BaseReg;
11718   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11719     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11720   else
11721     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11722 
11723   MIB = BuildMI(*thisMBB, MI, DL,
11724                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11725             .addReg(BaseReg)
11726             .addImm(BPOffset)
11727             .addReg(BufReg)
11728             .cloneMemRefs(MI);
11729 
11730   // Setup
11731   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11732   MIB.addRegMask(TRI->getNoPreservedMask());
11733 
11734   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11735 
11736   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11737           .addMBB(mainMBB);
11738   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11739 
11740   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11741   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11742 
11743   // mainMBB:
11744   //  mainDstReg = 0
11745   MIB =
11746       BuildMI(mainMBB, DL,
11747               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11748 
11749   // Store IP
11750   if (Subtarget.isPPC64()) {
11751     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11752             .addReg(LabelReg)
11753             .addImm(LabelOffset)
11754             .addReg(BufReg);
11755   } else {
11756     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11757             .addReg(LabelReg)
11758             .addImm(LabelOffset)
11759             .addReg(BufReg);
11760   }
11761   MIB.cloneMemRefs(MI);
11762 
11763   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11764   mainMBB->addSuccessor(sinkMBB);
11765 
11766   // sinkMBB:
11767   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11768           TII->get(PPC::PHI), DstReg)
11769     .addReg(mainDstReg).addMBB(mainMBB)
11770     .addReg(restoreDstReg).addMBB(thisMBB);
11771 
11772   MI.eraseFromParent();
11773   return sinkMBB;
11774 }
11775 
11776 MachineBasicBlock *
11777 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11778                                      MachineBasicBlock *MBB) const {
11779   DebugLoc DL = MI.getDebugLoc();
11780   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11781 
11782   MachineFunction *MF = MBB->getParent();
11783   MachineRegisterInfo &MRI = MF->getRegInfo();
11784 
11785   MVT PVT = getPointerTy(MF->getDataLayout());
11786   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11787          "Invalid Pointer Size!");
11788 
11789   const TargetRegisterClass *RC =
11790     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11791   Register Tmp = MRI.createVirtualRegister(RC);
11792   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11793   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11794   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11795   unsigned BP =
11796       (PVT == MVT::i64)
11797           ? PPC::X30
11798           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11799                                                               : PPC::R30);
11800 
11801   MachineInstrBuilder MIB;
11802 
11803   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11804   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11805   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11806   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11807 
11808   Register BufReg = MI.getOperand(0).getReg();
11809 
11810   // Reload FP (the jumped-to function may not have had a
11811   // frame pointer, and if so, then its r31 will be restored
11812   // as necessary).
11813   if (PVT == MVT::i64) {
11814     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11815             .addImm(0)
11816             .addReg(BufReg);
11817   } else {
11818     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11819             .addImm(0)
11820             .addReg(BufReg);
11821   }
11822   MIB.cloneMemRefs(MI);
11823 
11824   // Reload IP
11825   if (PVT == MVT::i64) {
11826     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11827             .addImm(LabelOffset)
11828             .addReg(BufReg);
11829   } else {
11830     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11831             .addImm(LabelOffset)
11832             .addReg(BufReg);
11833   }
11834   MIB.cloneMemRefs(MI);
11835 
11836   // Reload SP
11837   if (PVT == MVT::i64) {
11838     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11839             .addImm(SPOffset)
11840             .addReg(BufReg);
11841   } else {
11842     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11843             .addImm(SPOffset)
11844             .addReg(BufReg);
11845   }
11846   MIB.cloneMemRefs(MI);
11847 
11848   // Reload BP
11849   if (PVT == MVT::i64) {
11850     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11851             .addImm(BPOffset)
11852             .addReg(BufReg);
11853   } else {
11854     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11855             .addImm(BPOffset)
11856             .addReg(BufReg);
11857   }
11858   MIB.cloneMemRefs(MI);
11859 
11860   // Reload TOC
11861   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11862     setUsesTOCBasePtr(*MBB->getParent());
11863     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11864               .addImm(TOCOffset)
11865               .addReg(BufReg)
11866               .cloneMemRefs(MI);
11867   }
11868 
11869   // Jump
11870   BuildMI(*MBB, MI, DL,
11871           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11872   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11873 
11874   MI.eraseFromParent();
11875   return MBB;
11876 }
11877 
11878 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11879   // If the function specifically requests inline stack probes, emit them.
11880   if (MF.getFunction().hasFnAttribute("probe-stack"))
11881     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11882            "inline-asm";
11883   return false;
11884 }
11885 
11886 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11887   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11888   unsigned StackAlign = TFI->getStackAlignment();
11889   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11890          "Unexpected stack alignment");
11891   // The default stack probe size is 4096 if the function has no
11892   // stack-probe-size attribute.
11893   unsigned StackProbeSize = 4096;
11894   const Function &Fn = MF.getFunction();
11895   if (Fn.hasFnAttribute("stack-probe-size"))
11896     Fn.getFnAttribute("stack-probe-size")
11897         .getValueAsString()
11898         .getAsInteger(0, StackProbeSize);
11899   // Round down to the stack alignment.
11900   StackProbeSize &= ~(StackAlign - 1);
11901   return StackProbeSize ? StackProbeSize : StackAlign;
11902 }
11903 
11904 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11905 // into three phases. In the first phase, it uses pseudo instruction
11906 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11907 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11908 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11909 // MaxCallFrameSize so that it can calculate correct data area pointer.
11910 MachineBasicBlock *
11911 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11912                                     MachineBasicBlock *MBB) const {
11913   const bool isPPC64 = Subtarget.isPPC64();
11914   MachineFunction *MF = MBB->getParent();
11915   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11916   DebugLoc DL = MI.getDebugLoc();
11917   const unsigned ProbeSize = getStackProbeSize(*MF);
11918   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11919   MachineRegisterInfo &MRI = MF->getRegInfo();
11920   // The CFG of probing stack looks as
11921   //         +-----+
11922   //         | MBB |
11923   //         +--+--+
11924   //            |
11925   //       +----v----+
11926   //  +--->+ TestMBB +---+
11927   //  |    +----+----+   |
11928   //  |         |        |
11929   //  |   +-----v----+   |
11930   //  +---+ BlockMBB |   |
11931   //      +----------+   |
11932   //                     |
11933   //       +---------+   |
11934   //       | TailMBB +<--+
11935   //       +---------+
11936   // In MBB, calculate previous frame pointer and final stack pointer.
11937   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11938   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11939   // TailMBB is spliced via \p MI.
11940   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11941   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11942   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11943 
11944   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11945   MF->insert(MBBIter, TestMBB);
11946   MF->insert(MBBIter, BlockMBB);
11947   MF->insert(MBBIter, TailMBB);
11948 
11949   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11950   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11951 
11952   Register DstReg = MI.getOperand(0).getReg();
11953   Register NegSizeReg = MI.getOperand(1).getReg();
11954   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11955   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11956   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11957   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11958 
11959   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11960   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11961   // NegSize.
11962   unsigned ProbeOpc;
11963   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11964     ProbeOpc =
11965         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11966   else
11967     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11968     // and NegSizeReg will be allocated in the same phyreg to avoid
11969     // redundant copy when NegSizeReg has only one use which is current MI and
11970     // will be replaced by PREPARE_PROBED_ALLOCA then.
11971     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11972                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11973   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11974       .addDef(ActualNegSizeReg)
11975       .addReg(NegSizeReg)
11976       .add(MI.getOperand(2))
11977       .add(MI.getOperand(3));
11978 
11979   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11980   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11981           FinalStackPtr)
11982       .addReg(SPReg)
11983       .addReg(ActualNegSizeReg);
11984 
11985   // Materialize a scratch register for update.
11986   int64_t NegProbeSize = -(int64_t)ProbeSize;
11987   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11988   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11989   if (!isInt<16>(NegProbeSize)) {
11990     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11991     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11992         .addImm(NegProbeSize >> 16);
11993     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11994             ScratchReg)
11995         .addReg(TempReg)
11996         .addImm(NegProbeSize & 0xFFFF);
11997   } else
11998     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11999         .addImm(NegProbeSize);
12000 
12001   {
12002     // Probing leading residual part.
12003     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12004     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
12005         .addReg(ActualNegSizeReg)
12006         .addReg(ScratchReg);
12007     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12008     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
12009         .addReg(Div)
12010         .addReg(ScratchReg);
12011     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12012     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
12013         .addReg(Mul)
12014         .addReg(ActualNegSizeReg);
12015     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12016         .addReg(FramePointer)
12017         .addReg(SPReg)
12018         .addReg(NegMod);
12019   }
12020 
12021   {
12022     // Remaining part should be multiple of ProbeSize.
12023     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
12024     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
12025         .addReg(SPReg)
12026         .addReg(FinalStackPtr);
12027     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
12028         .addImm(PPC::PRED_EQ)
12029         .addReg(CmpResult)
12030         .addMBB(TailMBB);
12031     TestMBB->addSuccessor(BlockMBB);
12032     TestMBB->addSuccessor(TailMBB);
12033   }
12034 
12035   {
12036     // Touch the block.
12037     // |P...|P...|P...
12038     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12039         .addReg(FramePointer)
12040         .addReg(SPReg)
12041         .addReg(ScratchReg);
12042     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
12043     BlockMBB->addSuccessor(TestMBB);
12044   }
12045 
12046   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
12047   // DYNAREAOFFSET pseudo instruction to get the future result.
12048   Register MaxCallFrameSizeReg =
12049       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12050   BuildMI(TailMBB, DL,
12051           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
12052           MaxCallFrameSizeReg)
12053       .add(MI.getOperand(2))
12054       .add(MI.getOperand(3));
12055   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
12056       .addReg(SPReg)
12057       .addReg(MaxCallFrameSizeReg);
12058 
12059   // Splice instructions after MI to TailMBB.
12060   TailMBB->splice(TailMBB->end(), MBB,
12061                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
12062   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
12063   MBB->addSuccessor(TestMBB);
12064 
12065   // Delete the pseudo instruction.
12066   MI.eraseFromParent();
12067 
12068   ++NumDynamicAllocaProbed;
12069   return TailMBB;
12070 }
12071 
12072 MachineBasicBlock *
12073 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
12074                                                MachineBasicBlock *BB) const {
12075   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
12076       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
12077     if (Subtarget.is64BitELFABI() &&
12078         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
12079         !Subtarget.isUsingPCRelativeCalls()) {
12080       // Call lowering should have added an r2 operand to indicate a dependence
12081       // on the TOC base pointer value. It can't however, because there is no
12082       // way to mark the dependence as implicit there, and so the stackmap code
12083       // will confuse it with a regular operand. Instead, add the dependence
12084       // here.
12085       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
12086     }
12087 
12088     return emitPatchPoint(MI, BB);
12089   }
12090 
12091   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
12092       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
12093     return emitEHSjLjSetJmp(MI, BB);
12094   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
12095              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
12096     return emitEHSjLjLongJmp(MI, BB);
12097   }
12098 
12099   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
12100 
12101   // To "insert" these instructions we actually have to insert their
12102   // control-flow patterns.
12103   const BasicBlock *LLVM_BB = BB->getBasicBlock();
12104   MachineFunction::iterator It = ++BB->getIterator();
12105 
12106   MachineFunction *F = BB->getParent();
12107 
12108   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12109       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
12110       MI.getOpcode() == PPC::SELECT_I8) {
12111     SmallVector<MachineOperand, 2> Cond;
12112     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12113         MI.getOpcode() == PPC::SELECT_CC_I8)
12114       Cond.push_back(MI.getOperand(4));
12115     else
12116       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
12117     Cond.push_back(MI.getOperand(1));
12118 
12119     DebugLoc dl = MI.getDebugLoc();
12120     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
12121                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
12122   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
12123              MI.getOpcode() == PPC::SELECT_CC_F8 ||
12124              MI.getOpcode() == PPC::SELECT_CC_F16 ||
12125              MI.getOpcode() == PPC::SELECT_CC_QFRC ||
12126              MI.getOpcode() == PPC::SELECT_CC_QSRC ||
12127              MI.getOpcode() == PPC::SELECT_CC_QBRC ||
12128              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
12129              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
12130              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
12131              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
12132              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
12133              MI.getOpcode() == PPC::SELECT_CC_SPE ||
12134              MI.getOpcode() == PPC::SELECT_F4 ||
12135              MI.getOpcode() == PPC::SELECT_F8 ||
12136              MI.getOpcode() == PPC::SELECT_F16 ||
12137              MI.getOpcode() == PPC::SELECT_QFRC ||
12138              MI.getOpcode() == PPC::SELECT_QSRC ||
12139              MI.getOpcode() == PPC::SELECT_QBRC ||
12140              MI.getOpcode() == PPC::SELECT_SPE ||
12141              MI.getOpcode() == PPC::SELECT_SPE4 ||
12142              MI.getOpcode() == PPC::SELECT_VRRC ||
12143              MI.getOpcode() == PPC::SELECT_VSFRC ||
12144              MI.getOpcode() == PPC::SELECT_VSSRC ||
12145              MI.getOpcode() == PPC::SELECT_VSRC) {
12146     // The incoming instruction knows the destination vreg to set, the
12147     // condition code register to branch on, the true/false values to
12148     // select between, and a branch opcode to use.
12149 
12150     //  thisMBB:
12151     //  ...
12152     //   TrueVal = ...
12153     //   cmpTY ccX, r1, r2
12154     //   bCC copy1MBB
12155     //   fallthrough --> copy0MBB
12156     MachineBasicBlock *thisMBB = BB;
12157     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
12158     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12159     DebugLoc dl = MI.getDebugLoc();
12160     F->insert(It, copy0MBB);
12161     F->insert(It, sinkMBB);
12162 
12163     // Transfer the remainder of BB and its successor edges to sinkMBB.
12164     sinkMBB->splice(sinkMBB->begin(), BB,
12165                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12166     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12167 
12168     // Next, add the true and fallthrough blocks as its successors.
12169     BB->addSuccessor(copy0MBB);
12170     BB->addSuccessor(sinkMBB);
12171 
12172     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
12173         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
12174         MI.getOpcode() == PPC::SELECT_F16 ||
12175         MI.getOpcode() == PPC::SELECT_SPE4 ||
12176         MI.getOpcode() == PPC::SELECT_SPE ||
12177         MI.getOpcode() == PPC::SELECT_QFRC ||
12178         MI.getOpcode() == PPC::SELECT_QSRC ||
12179         MI.getOpcode() == PPC::SELECT_QBRC ||
12180         MI.getOpcode() == PPC::SELECT_VRRC ||
12181         MI.getOpcode() == PPC::SELECT_VSFRC ||
12182         MI.getOpcode() == PPC::SELECT_VSSRC ||
12183         MI.getOpcode() == PPC::SELECT_VSRC) {
12184       BuildMI(BB, dl, TII->get(PPC::BC))
12185           .addReg(MI.getOperand(1).getReg())
12186           .addMBB(sinkMBB);
12187     } else {
12188       unsigned SelectPred = MI.getOperand(4).getImm();
12189       BuildMI(BB, dl, TII->get(PPC::BCC))
12190           .addImm(SelectPred)
12191           .addReg(MI.getOperand(1).getReg())
12192           .addMBB(sinkMBB);
12193     }
12194 
12195     //  copy0MBB:
12196     //   %FalseValue = ...
12197     //   # fallthrough to sinkMBB
12198     BB = copy0MBB;
12199 
12200     // Update machine-CFG edges
12201     BB->addSuccessor(sinkMBB);
12202 
12203     //  sinkMBB:
12204     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
12205     //  ...
12206     BB = sinkMBB;
12207     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
12208         .addReg(MI.getOperand(3).getReg())
12209         .addMBB(copy0MBB)
12210         .addReg(MI.getOperand(2).getReg())
12211         .addMBB(thisMBB);
12212   } else if (MI.getOpcode() == PPC::ReadTB) {
12213     // To read the 64-bit time-base register on a 32-bit target, we read the
12214     // two halves. Should the counter have wrapped while it was being read, we
12215     // need to try again.
12216     // ...
12217     // readLoop:
12218     // mfspr Rx,TBU # load from TBU
12219     // mfspr Ry,TB  # load from TB
12220     // mfspr Rz,TBU # load from TBU
12221     // cmpw crX,Rx,Rz # check if 'old'='new'
12222     // bne readLoop   # branch if they're not equal
12223     // ...
12224 
12225     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
12226     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12227     DebugLoc dl = MI.getDebugLoc();
12228     F->insert(It, readMBB);
12229     F->insert(It, sinkMBB);
12230 
12231     // Transfer the remainder of BB and its successor edges to sinkMBB.
12232     sinkMBB->splice(sinkMBB->begin(), BB,
12233                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12234     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12235 
12236     BB->addSuccessor(readMBB);
12237     BB = readMBB;
12238 
12239     MachineRegisterInfo &RegInfo = F->getRegInfo();
12240     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
12241     Register LoReg = MI.getOperand(0).getReg();
12242     Register HiReg = MI.getOperand(1).getReg();
12243 
12244     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
12245     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
12246     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
12247 
12248     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12249 
12250     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
12251         .addReg(HiReg)
12252         .addReg(ReadAgainReg);
12253     BuildMI(BB, dl, TII->get(PPC::BCC))
12254         .addImm(PPC::PRED_NE)
12255         .addReg(CmpReg)
12256         .addMBB(readMBB);
12257 
12258     BB->addSuccessor(readMBB);
12259     BB->addSuccessor(sinkMBB);
12260   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
12261     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
12262   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
12263     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
12264   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
12265     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
12266   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
12267     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
12268 
12269   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
12270     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
12271   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
12272     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
12273   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
12274     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
12275   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
12276     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
12277 
12278   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
12279     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
12280   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
12281     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
12282   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
12283     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
12284   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
12285     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
12286 
12287   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
12288     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
12289   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
12290     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
12291   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
12292     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
12293   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
12294     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
12295 
12296   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
12297     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
12298   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
12299     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
12300   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
12301     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
12302   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
12303     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
12304 
12305   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
12306     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
12307   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
12308     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
12309   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
12310     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
12311   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
12312     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
12313 
12314   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
12315     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
12316   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
12317     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
12318   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
12319     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
12320   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
12321     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
12322 
12323   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
12324     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
12325   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
12326     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
12327   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
12328     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
12329   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
12330     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
12331 
12332   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
12333     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
12334   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
12335     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
12336   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
12337     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
12338   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
12339     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
12340 
12341   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
12342     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
12343   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
12344     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
12345   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
12346     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
12347   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
12348     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
12349 
12350   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
12351     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
12352   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
12353     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
12354   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
12355     BB = EmitAtomicBinary(MI, BB, 4, 0);
12356   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
12357     BB = EmitAtomicBinary(MI, BB, 8, 0);
12358   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
12359            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
12360            (Subtarget.hasPartwordAtomics() &&
12361             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
12362            (Subtarget.hasPartwordAtomics() &&
12363             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
12364     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
12365 
12366     auto LoadMnemonic = PPC::LDARX;
12367     auto StoreMnemonic = PPC::STDCX;
12368     switch (MI.getOpcode()) {
12369     default:
12370       llvm_unreachable("Compare and swap of unknown size");
12371     case PPC::ATOMIC_CMP_SWAP_I8:
12372       LoadMnemonic = PPC::LBARX;
12373       StoreMnemonic = PPC::STBCX;
12374       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12375       break;
12376     case PPC::ATOMIC_CMP_SWAP_I16:
12377       LoadMnemonic = PPC::LHARX;
12378       StoreMnemonic = PPC::STHCX;
12379       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12380       break;
12381     case PPC::ATOMIC_CMP_SWAP_I32:
12382       LoadMnemonic = PPC::LWARX;
12383       StoreMnemonic = PPC::STWCX;
12384       break;
12385     case PPC::ATOMIC_CMP_SWAP_I64:
12386       LoadMnemonic = PPC::LDARX;
12387       StoreMnemonic = PPC::STDCX;
12388       break;
12389     }
12390     Register dest = MI.getOperand(0).getReg();
12391     Register ptrA = MI.getOperand(1).getReg();
12392     Register ptrB = MI.getOperand(2).getReg();
12393     Register oldval = MI.getOperand(3).getReg();
12394     Register newval = MI.getOperand(4).getReg();
12395     DebugLoc dl = MI.getDebugLoc();
12396 
12397     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12398     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12399     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12400     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12401     F->insert(It, loop1MBB);
12402     F->insert(It, loop2MBB);
12403     F->insert(It, midMBB);
12404     F->insert(It, exitMBB);
12405     exitMBB->splice(exitMBB->begin(), BB,
12406                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12407     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12408 
12409     //  thisMBB:
12410     //   ...
12411     //   fallthrough --> loopMBB
12412     BB->addSuccessor(loop1MBB);
12413 
12414     // loop1MBB:
12415     //   l[bhwd]arx dest, ptr
12416     //   cmp[wd] dest, oldval
12417     //   bne- midMBB
12418     // loop2MBB:
12419     //   st[bhwd]cx. newval, ptr
12420     //   bne- loopMBB
12421     //   b exitBB
12422     // midMBB:
12423     //   st[bhwd]cx. dest, ptr
12424     // exitBB:
12425     BB = loop1MBB;
12426     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
12427     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
12428         .addReg(oldval)
12429         .addReg(dest);
12430     BuildMI(BB, dl, TII->get(PPC::BCC))
12431         .addImm(PPC::PRED_NE)
12432         .addReg(PPC::CR0)
12433         .addMBB(midMBB);
12434     BB->addSuccessor(loop2MBB);
12435     BB->addSuccessor(midMBB);
12436 
12437     BB = loop2MBB;
12438     BuildMI(BB, dl, TII->get(StoreMnemonic))
12439         .addReg(newval)
12440         .addReg(ptrA)
12441         .addReg(ptrB);
12442     BuildMI(BB, dl, TII->get(PPC::BCC))
12443         .addImm(PPC::PRED_NE)
12444         .addReg(PPC::CR0)
12445         .addMBB(loop1MBB);
12446     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12447     BB->addSuccessor(loop1MBB);
12448     BB->addSuccessor(exitMBB);
12449 
12450     BB = midMBB;
12451     BuildMI(BB, dl, TII->get(StoreMnemonic))
12452         .addReg(dest)
12453         .addReg(ptrA)
12454         .addReg(ptrB);
12455     BB->addSuccessor(exitMBB);
12456 
12457     //  exitMBB:
12458     //   ...
12459     BB = exitMBB;
12460   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12461              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12462     // We must use 64-bit registers for addresses when targeting 64-bit,
12463     // since we're actually doing arithmetic on them.  Other registers
12464     // can be 32-bit.
12465     bool is64bit = Subtarget.isPPC64();
12466     bool isLittleEndian = Subtarget.isLittleEndian();
12467     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12468 
12469     Register dest = MI.getOperand(0).getReg();
12470     Register ptrA = MI.getOperand(1).getReg();
12471     Register ptrB = MI.getOperand(2).getReg();
12472     Register oldval = MI.getOperand(3).getReg();
12473     Register newval = MI.getOperand(4).getReg();
12474     DebugLoc dl = MI.getDebugLoc();
12475 
12476     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12477     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12478     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12479     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12480     F->insert(It, loop1MBB);
12481     F->insert(It, loop2MBB);
12482     F->insert(It, midMBB);
12483     F->insert(It, exitMBB);
12484     exitMBB->splice(exitMBB->begin(), BB,
12485                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12486     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12487 
12488     MachineRegisterInfo &RegInfo = F->getRegInfo();
12489     const TargetRegisterClass *RC =
12490         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12491     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12492 
12493     Register PtrReg = RegInfo.createVirtualRegister(RC);
12494     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12495     Register ShiftReg =
12496         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12497     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12498     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12499     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12500     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12501     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12502     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12503     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12504     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12505     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12506     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12507     Register Ptr1Reg;
12508     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12509     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12510     //  thisMBB:
12511     //   ...
12512     //   fallthrough --> loopMBB
12513     BB->addSuccessor(loop1MBB);
12514 
12515     // The 4-byte load must be aligned, while a char or short may be
12516     // anywhere in the word.  Hence all this nasty bookkeeping code.
12517     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
12518     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12519     //   xori shift, shift1, 24 [16]
12520     //   rlwinm ptr, ptr1, 0, 0, 29
12521     //   slw newval2, newval, shift
12522     //   slw oldval2, oldval,shift
12523     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12524     //   slw mask, mask2, shift
12525     //   and newval3, newval2, mask
12526     //   and oldval3, oldval2, mask
12527     // loop1MBB:
12528     //   lwarx tmpDest, ptr
12529     //   and tmp, tmpDest, mask
12530     //   cmpw tmp, oldval3
12531     //   bne- midMBB
12532     // loop2MBB:
12533     //   andc tmp2, tmpDest, mask
12534     //   or tmp4, tmp2, newval3
12535     //   stwcx. tmp4, ptr
12536     //   bne- loop1MBB
12537     //   b exitBB
12538     // midMBB:
12539     //   stwcx. tmpDest, ptr
12540     // exitBB:
12541     //   srw dest, tmpDest, shift
12542     if (ptrA != ZeroReg) {
12543       Ptr1Reg = RegInfo.createVirtualRegister(RC);
12544       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12545           .addReg(ptrA)
12546           .addReg(ptrB);
12547     } else {
12548       Ptr1Reg = ptrB;
12549     }
12550 
12551     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12552     // mode.
12553     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12554         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12555         .addImm(3)
12556         .addImm(27)
12557         .addImm(is8bit ? 28 : 27);
12558     if (!isLittleEndian)
12559       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12560           .addReg(Shift1Reg)
12561           .addImm(is8bit ? 24 : 16);
12562     if (is64bit)
12563       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12564           .addReg(Ptr1Reg)
12565           .addImm(0)
12566           .addImm(61);
12567     else
12568       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12569           .addReg(Ptr1Reg)
12570           .addImm(0)
12571           .addImm(0)
12572           .addImm(29);
12573     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12574         .addReg(newval)
12575         .addReg(ShiftReg);
12576     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12577         .addReg(oldval)
12578         .addReg(ShiftReg);
12579     if (is8bit)
12580       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12581     else {
12582       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12583       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12584           .addReg(Mask3Reg)
12585           .addImm(65535);
12586     }
12587     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12588         .addReg(Mask2Reg)
12589         .addReg(ShiftReg);
12590     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12591         .addReg(NewVal2Reg)
12592         .addReg(MaskReg);
12593     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12594         .addReg(OldVal2Reg)
12595         .addReg(MaskReg);
12596 
12597     BB = loop1MBB;
12598     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12599         .addReg(ZeroReg)
12600         .addReg(PtrReg);
12601     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12602         .addReg(TmpDestReg)
12603         .addReg(MaskReg);
12604     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12605         .addReg(TmpReg)
12606         .addReg(OldVal3Reg);
12607     BuildMI(BB, dl, TII->get(PPC::BCC))
12608         .addImm(PPC::PRED_NE)
12609         .addReg(PPC::CR0)
12610         .addMBB(midMBB);
12611     BB->addSuccessor(loop2MBB);
12612     BB->addSuccessor(midMBB);
12613 
12614     BB = loop2MBB;
12615     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12616         .addReg(TmpDestReg)
12617         .addReg(MaskReg);
12618     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12619         .addReg(Tmp2Reg)
12620         .addReg(NewVal3Reg);
12621     BuildMI(BB, dl, TII->get(PPC::STWCX))
12622         .addReg(Tmp4Reg)
12623         .addReg(ZeroReg)
12624         .addReg(PtrReg);
12625     BuildMI(BB, dl, TII->get(PPC::BCC))
12626         .addImm(PPC::PRED_NE)
12627         .addReg(PPC::CR0)
12628         .addMBB(loop1MBB);
12629     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12630     BB->addSuccessor(loop1MBB);
12631     BB->addSuccessor(exitMBB);
12632 
12633     BB = midMBB;
12634     BuildMI(BB, dl, TII->get(PPC::STWCX))
12635         .addReg(TmpDestReg)
12636         .addReg(ZeroReg)
12637         .addReg(PtrReg);
12638     BB->addSuccessor(exitMBB);
12639 
12640     //  exitMBB:
12641     //   ...
12642     BB = exitMBB;
12643     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12644         .addReg(TmpReg)
12645         .addReg(ShiftReg);
12646   } else if (MI.getOpcode() == PPC::FADDrtz) {
12647     // This pseudo performs an FADD with rounding mode temporarily forced
12648     // to round-to-zero.  We emit this via custom inserter since the FPSCR
12649     // is not modeled at the SelectionDAG level.
12650     Register Dest = MI.getOperand(0).getReg();
12651     Register Src1 = MI.getOperand(1).getReg();
12652     Register Src2 = MI.getOperand(2).getReg();
12653     DebugLoc dl = MI.getDebugLoc();
12654 
12655     MachineRegisterInfo &RegInfo = F->getRegInfo();
12656     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12657 
12658     // Save FPSCR value.
12659     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12660 
12661     // Set rounding mode to round-to-zero.
12662     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
12663     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
12664 
12665     // Perform addition.
12666     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
12667 
12668     // Restore FPSCR value.
12669     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12670   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12671              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12672              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12673              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12674     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12675                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12676                           ? PPC::ANDI8_rec
12677                           : PPC::ANDI_rec;
12678     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12679                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12680 
12681     MachineRegisterInfo &RegInfo = F->getRegInfo();
12682     Register Dest = RegInfo.createVirtualRegister(
12683         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12684 
12685     DebugLoc Dl = MI.getDebugLoc();
12686     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12687         .addReg(MI.getOperand(1).getReg())
12688         .addImm(1);
12689     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12690             MI.getOperand(0).getReg())
12691         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12692   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12693     DebugLoc Dl = MI.getDebugLoc();
12694     MachineRegisterInfo &RegInfo = F->getRegInfo();
12695     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12696     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12697     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12698             MI.getOperand(0).getReg())
12699         .addReg(CRReg);
12700   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12701     DebugLoc Dl = MI.getDebugLoc();
12702     unsigned Imm = MI.getOperand(1).getImm();
12703     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12704     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12705             MI.getOperand(0).getReg())
12706         .addReg(PPC::CR0EQ);
12707   } else if (MI.getOpcode() == PPC::SETRNDi) {
12708     DebugLoc dl = MI.getDebugLoc();
12709     Register OldFPSCRReg = MI.getOperand(0).getReg();
12710 
12711     // Save FPSCR value.
12712     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12713 
12714     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12715     // the following settings:
12716     //   00 Round to nearest
12717     //   01 Round to 0
12718     //   10 Round to +inf
12719     //   11 Round to -inf
12720 
12721     // When the operand is immediate, using the two least significant bits of
12722     // the immediate to set the bits 62:63 of FPSCR.
12723     unsigned Mode = MI.getOperand(1).getImm();
12724     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12725       .addImm(31);
12726 
12727     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12728       .addImm(30);
12729   } else if (MI.getOpcode() == PPC::SETRND) {
12730     DebugLoc dl = MI.getDebugLoc();
12731 
12732     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12733     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12734     // If the target doesn't have DirectMove, we should use stack to do the
12735     // conversion, because the target doesn't have the instructions like mtvsrd
12736     // or mfvsrd to do this conversion directly.
12737     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12738       if (Subtarget.hasDirectMove()) {
12739         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12740           .addReg(SrcReg);
12741       } else {
12742         // Use stack to do the register copy.
12743         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12744         MachineRegisterInfo &RegInfo = F->getRegInfo();
12745         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12746         if (RC == &PPC::F8RCRegClass) {
12747           // Copy register from F8RCRegClass to G8RCRegclass.
12748           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12749                  "Unsupported RegClass.");
12750 
12751           StoreOp = PPC::STFD;
12752           LoadOp = PPC::LD;
12753         } else {
12754           // Copy register from G8RCRegClass to F8RCRegclass.
12755           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12756                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12757                  "Unsupported RegClass.");
12758         }
12759 
12760         MachineFrameInfo &MFI = F->getFrameInfo();
12761         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12762 
12763         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12764             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12765             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12766             MFI.getObjectAlign(FrameIdx));
12767 
12768         // Store the SrcReg into the stack.
12769         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12770           .addReg(SrcReg)
12771           .addImm(0)
12772           .addFrameIndex(FrameIdx)
12773           .addMemOperand(MMOStore);
12774 
12775         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12776             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12777             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12778             MFI.getObjectAlign(FrameIdx));
12779 
12780         // Load from the stack where SrcReg is stored, and save to DestReg,
12781         // so we have done the RegClass conversion from RegClass::SrcReg to
12782         // RegClass::DestReg.
12783         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12784           .addImm(0)
12785           .addFrameIndex(FrameIdx)
12786           .addMemOperand(MMOLoad);
12787       }
12788     };
12789 
12790     Register OldFPSCRReg = MI.getOperand(0).getReg();
12791 
12792     // Save FPSCR value.
12793     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12794 
12795     // When the operand is gprc register, use two least significant bits of the
12796     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12797     //
12798     // copy OldFPSCRTmpReg, OldFPSCRReg
12799     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12800     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12801     // copy NewFPSCRReg, NewFPSCRTmpReg
12802     // mtfsf 255, NewFPSCRReg
12803     MachineOperand SrcOp = MI.getOperand(1);
12804     MachineRegisterInfo &RegInfo = F->getRegInfo();
12805     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12806 
12807     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12808 
12809     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12810     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12811 
12812     // The first operand of INSERT_SUBREG should be a register which has
12813     // subregisters, we only care about its RegClass, so we should use an
12814     // IMPLICIT_DEF register.
12815     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12816     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12817       .addReg(ImDefReg)
12818       .add(SrcOp)
12819       .addImm(1);
12820 
12821     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12822     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12823       .addReg(OldFPSCRTmpReg)
12824       .addReg(ExtSrcReg)
12825       .addImm(0)
12826       .addImm(62);
12827 
12828     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12829     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12830 
12831     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12832     // bits of FPSCR.
12833     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12834       .addImm(255)
12835       .addReg(NewFPSCRReg)
12836       .addImm(0)
12837       .addImm(0);
12838   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12839              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12840     return emitProbedAlloca(MI, BB);
12841   } else {
12842     llvm_unreachable("Unexpected instr type to insert");
12843   }
12844 
12845   MI.eraseFromParent(); // The pseudo instruction is gone now.
12846   return BB;
12847 }
12848 
12849 //===----------------------------------------------------------------------===//
12850 // Target Optimization Hooks
12851 //===----------------------------------------------------------------------===//
12852 
12853 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12854   // For the estimates, convergence is quadratic, so we essentially double the
12855   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12856   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12857   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12858   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12859   if (VT.getScalarType() == MVT::f64)
12860     RefinementSteps++;
12861   return RefinementSteps;
12862 }
12863 
12864 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12865                                            int Enabled, int &RefinementSteps,
12866                                            bool &UseOneConstNR,
12867                                            bool Reciprocal) const {
12868   EVT VT = Operand.getValueType();
12869   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12870       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12871       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12872       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12873       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12874       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12875     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12876       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12877 
12878     // The Newton-Raphson computation with a single constant does not provide
12879     // enough accuracy on some CPUs.
12880     UseOneConstNR = !Subtarget.needsTwoConstNR();
12881     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12882   }
12883   return SDValue();
12884 }
12885 
12886 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12887                                             int Enabled,
12888                                             int &RefinementSteps) const {
12889   EVT VT = Operand.getValueType();
12890   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12891       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12892       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12893       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12894       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12895       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12896     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12897       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12898     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12899   }
12900   return SDValue();
12901 }
12902 
12903 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12904   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12905   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12906   // enabled for division), this functionality is redundant with the default
12907   // combiner logic (once the division -> reciprocal/multiply transformation
12908   // has taken place). As a result, this matters more for older cores than for
12909   // newer ones.
12910 
12911   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12912   // reciprocal if there are two or more FDIVs (for embedded cores with only
12913   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12914   switch (Subtarget.getCPUDirective()) {
12915   default:
12916     return 3;
12917   case PPC::DIR_440:
12918   case PPC::DIR_A2:
12919   case PPC::DIR_E500:
12920   case PPC::DIR_E500mc:
12921   case PPC::DIR_E5500:
12922     return 2;
12923   }
12924 }
12925 
12926 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12927 // collapsed, and so we need to look through chains of them.
12928 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12929                                      int64_t& Offset, SelectionDAG &DAG) {
12930   if (DAG.isBaseWithConstantOffset(Loc)) {
12931     Base = Loc.getOperand(0);
12932     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12933 
12934     // The base might itself be a base plus an offset, and if so, accumulate
12935     // that as well.
12936     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12937   }
12938 }
12939 
12940 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12941                             unsigned Bytes, int Dist,
12942                             SelectionDAG &DAG) {
12943   if (VT.getSizeInBits() / 8 != Bytes)
12944     return false;
12945 
12946   SDValue BaseLoc = Base->getBasePtr();
12947   if (Loc.getOpcode() == ISD::FrameIndex) {
12948     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12949       return false;
12950     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12951     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12952     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12953     int FS  = MFI.getObjectSize(FI);
12954     int BFS = MFI.getObjectSize(BFI);
12955     if (FS != BFS || FS != (int)Bytes) return false;
12956     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12957   }
12958 
12959   SDValue Base1 = Loc, Base2 = BaseLoc;
12960   int64_t Offset1 = 0, Offset2 = 0;
12961   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12962   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12963   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12964     return true;
12965 
12966   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12967   const GlobalValue *GV1 = nullptr;
12968   const GlobalValue *GV2 = nullptr;
12969   Offset1 = 0;
12970   Offset2 = 0;
12971   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12972   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12973   if (isGA1 && isGA2 && GV1 == GV2)
12974     return Offset1 == (Offset2 + Dist*Bytes);
12975   return false;
12976 }
12977 
12978 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12979 // not enforce equality of the chain operands.
12980 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12981                             unsigned Bytes, int Dist,
12982                             SelectionDAG &DAG) {
12983   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12984     EVT VT = LS->getMemoryVT();
12985     SDValue Loc = LS->getBasePtr();
12986     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12987   }
12988 
12989   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12990     EVT VT;
12991     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12992     default: return false;
12993     case Intrinsic::ppc_qpx_qvlfd:
12994     case Intrinsic::ppc_qpx_qvlfda:
12995       VT = MVT::v4f64;
12996       break;
12997     case Intrinsic::ppc_qpx_qvlfs:
12998     case Intrinsic::ppc_qpx_qvlfsa:
12999       VT = MVT::v4f32;
13000       break;
13001     case Intrinsic::ppc_qpx_qvlfcd:
13002     case Intrinsic::ppc_qpx_qvlfcda:
13003       VT = MVT::v2f64;
13004       break;
13005     case Intrinsic::ppc_qpx_qvlfcs:
13006     case Intrinsic::ppc_qpx_qvlfcsa:
13007       VT = MVT::v2f32;
13008       break;
13009     case Intrinsic::ppc_qpx_qvlfiwa:
13010     case Intrinsic::ppc_qpx_qvlfiwz:
13011     case Intrinsic::ppc_altivec_lvx:
13012     case Intrinsic::ppc_altivec_lvxl:
13013     case Intrinsic::ppc_vsx_lxvw4x:
13014     case Intrinsic::ppc_vsx_lxvw4x_be:
13015       VT = MVT::v4i32;
13016       break;
13017     case Intrinsic::ppc_vsx_lxvd2x:
13018     case Intrinsic::ppc_vsx_lxvd2x_be:
13019       VT = MVT::v2f64;
13020       break;
13021     case Intrinsic::ppc_altivec_lvebx:
13022       VT = MVT::i8;
13023       break;
13024     case Intrinsic::ppc_altivec_lvehx:
13025       VT = MVT::i16;
13026       break;
13027     case Intrinsic::ppc_altivec_lvewx:
13028       VT = MVT::i32;
13029       break;
13030     }
13031 
13032     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
13033   }
13034 
13035   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
13036     EVT VT;
13037     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
13038     default: return false;
13039     case Intrinsic::ppc_qpx_qvstfd:
13040     case Intrinsic::ppc_qpx_qvstfda:
13041       VT = MVT::v4f64;
13042       break;
13043     case Intrinsic::ppc_qpx_qvstfs:
13044     case Intrinsic::ppc_qpx_qvstfsa:
13045       VT = MVT::v4f32;
13046       break;
13047     case Intrinsic::ppc_qpx_qvstfcd:
13048     case Intrinsic::ppc_qpx_qvstfcda:
13049       VT = MVT::v2f64;
13050       break;
13051     case Intrinsic::ppc_qpx_qvstfcs:
13052     case Intrinsic::ppc_qpx_qvstfcsa:
13053       VT = MVT::v2f32;
13054       break;
13055     case Intrinsic::ppc_qpx_qvstfiw:
13056     case Intrinsic::ppc_qpx_qvstfiwa:
13057     case Intrinsic::ppc_altivec_stvx:
13058     case Intrinsic::ppc_altivec_stvxl:
13059     case Intrinsic::ppc_vsx_stxvw4x:
13060       VT = MVT::v4i32;
13061       break;
13062     case Intrinsic::ppc_vsx_stxvd2x:
13063       VT = MVT::v2f64;
13064       break;
13065     case Intrinsic::ppc_vsx_stxvw4x_be:
13066       VT = MVT::v4i32;
13067       break;
13068     case Intrinsic::ppc_vsx_stxvd2x_be:
13069       VT = MVT::v2f64;
13070       break;
13071     case Intrinsic::ppc_altivec_stvebx:
13072       VT = MVT::i8;
13073       break;
13074     case Intrinsic::ppc_altivec_stvehx:
13075       VT = MVT::i16;
13076       break;
13077     case Intrinsic::ppc_altivec_stvewx:
13078       VT = MVT::i32;
13079       break;
13080     }
13081 
13082     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
13083   }
13084 
13085   return false;
13086 }
13087 
13088 // Return true is there is a nearyby consecutive load to the one provided
13089 // (regardless of alignment). We search up and down the chain, looking though
13090 // token factors and other loads (but nothing else). As a result, a true result
13091 // indicates that it is safe to create a new consecutive load adjacent to the
13092 // load provided.
13093 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
13094   SDValue Chain = LD->getChain();
13095   EVT VT = LD->getMemoryVT();
13096 
13097   SmallSet<SDNode *, 16> LoadRoots;
13098   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
13099   SmallSet<SDNode *, 16> Visited;
13100 
13101   // First, search up the chain, branching to follow all token-factor operands.
13102   // If we find a consecutive load, then we're done, otherwise, record all
13103   // nodes just above the top-level loads and token factors.
13104   while (!Queue.empty()) {
13105     SDNode *ChainNext = Queue.pop_back_val();
13106     if (!Visited.insert(ChainNext).second)
13107       continue;
13108 
13109     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
13110       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13111         return true;
13112 
13113       if (!Visited.count(ChainLD->getChain().getNode()))
13114         Queue.push_back(ChainLD->getChain().getNode());
13115     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
13116       for (const SDUse &O : ChainNext->ops())
13117         if (!Visited.count(O.getNode()))
13118           Queue.push_back(O.getNode());
13119     } else
13120       LoadRoots.insert(ChainNext);
13121   }
13122 
13123   // Second, search down the chain, starting from the top-level nodes recorded
13124   // in the first phase. These top-level nodes are the nodes just above all
13125   // loads and token factors. Starting with their uses, recursively look though
13126   // all loads (just the chain uses) and token factors to find a consecutive
13127   // load.
13128   Visited.clear();
13129   Queue.clear();
13130 
13131   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
13132        IE = LoadRoots.end(); I != IE; ++I) {
13133     Queue.push_back(*I);
13134 
13135     while (!Queue.empty()) {
13136       SDNode *LoadRoot = Queue.pop_back_val();
13137       if (!Visited.insert(LoadRoot).second)
13138         continue;
13139 
13140       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
13141         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13142           return true;
13143 
13144       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
13145            UE = LoadRoot->use_end(); UI != UE; ++UI)
13146         if (((isa<MemSDNode>(*UI) &&
13147             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
13148             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
13149           Queue.push_back(*UI);
13150     }
13151   }
13152 
13153   return false;
13154 }
13155 
13156 /// This function is called when we have proved that a SETCC node can be replaced
13157 /// by subtraction (and other supporting instructions) so that the result of
13158 /// comparison is kept in a GPR instead of CR. This function is purely for
13159 /// codegen purposes and has some flags to guide the codegen process.
13160 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
13161                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
13162   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13163 
13164   // Zero extend the operands to the largest legal integer. Originally, they
13165   // must be of a strictly smaller size.
13166   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
13167                          DAG.getConstant(Size, DL, MVT::i32));
13168   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
13169                          DAG.getConstant(Size, DL, MVT::i32));
13170 
13171   // Swap if needed. Depends on the condition code.
13172   if (Swap)
13173     std::swap(Op0, Op1);
13174 
13175   // Subtract extended integers.
13176   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
13177 
13178   // Move the sign bit to the least significant position and zero out the rest.
13179   // Now the least significant bit carries the result of original comparison.
13180   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
13181                              DAG.getConstant(Size - 1, DL, MVT::i32));
13182   auto Final = Shifted;
13183 
13184   // Complement the result if needed. Based on the condition code.
13185   if (Complement)
13186     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
13187                         DAG.getConstant(1, DL, MVT::i64));
13188 
13189   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
13190 }
13191 
13192 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
13193                                                   DAGCombinerInfo &DCI) const {
13194   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13195 
13196   SelectionDAG &DAG = DCI.DAG;
13197   SDLoc DL(N);
13198 
13199   // Size of integers being compared has a critical role in the following
13200   // analysis, so we prefer to do this when all types are legal.
13201   if (!DCI.isAfterLegalizeDAG())
13202     return SDValue();
13203 
13204   // If all users of SETCC extend its value to a legal integer type
13205   // then we replace SETCC with a subtraction
13206   for (SDNode::use_iterator UI = N->use_begin(),
13207        UE = N->use_end(); UI != UE; ++UI) {
13208     if (UI->getOpcode() != ISD::ZERO_EXTEND)
13209       return SDValue();
13210   }
13211 
13212   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13213   auto OpSize = N->getOperand(0).getValueSizeInBits();
13214 
13215   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
13216 
13217   if (OpSize < Size) {
13218     switch (CC) {
13219     default: break;
13220     case ISD::SETULT:
13221       return generateEquivalentSub(N, Size, false, false, DL, DAG);
13222     case ISD::SETULE:
13223       return generateEquivalentSub(N, Size, true, true, DL, DAG);
13224     case ISD::SETUGT:
13225       return generateEquivalentSub(N, Size, false, true, DL, DAG);
13226     case ISD::SETUGE:
13227       return generateEquivalentSub(N, Size, true, false, DL, DAG);
13228     }
13229   }
13230 
13231   return SDValue();
13232 }
13233 
13234 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
13235                                                   DAGCombinerInfo &DCI) const {
13236   SelectionDAG &DAG = DCI.DAG;
13237   SDLoc dl(N);
13238 
13239   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
13240   // If we're tracking CR bits, we need to be careful that we don't have:
13241   //   trunc(binary-ops(zext(x), zext(y)))
13242   // or
13243   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
13244   // such that we're unnecessarily moving things into GPRs when it would be
13245   // better to keep them in CR bits.
13246 
13247   // Note that trunc here can be an actual i1 trunc, or can be the effective
13248   // truncation that comes from a setcc or select_cc.
13249   if (N->getOpcode() == ISD::TRUNCATE &&
13250       N->getValueType(0) != MVT::i1)
13251     return SDValue();
13252 
13253   if (N->getOperand(0).getValueType() != MVT::i32 &&
13254       N->getOperand(0).getValueType() != MVT::i64)
13255     return SDValue();
13256 
13257   if (N->getOpcode() == ISD::SETCC ||
13258       N->getOpcode() == ISD::SELECT_CC) {
13259     // If we're looking at a comparison, then we need to make sure that the
13260     // high bits (all except for the first) don't matter the result.
13261     ISD::CondCode CC =
13262       cast<CondCodeSDNode>(N->getOperand(
13263         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
13264     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
13265 
13266     if (ISD::isSignedIntSetCC(CC)) {
13267       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
13268           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
13269         return SDValue();
13270     } else if (ISD::isUnsignedIntSetCC(CC)) {
13271       if (!DAG.MaskedValueIsZero(N->getOperand(0),
13272                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
13273           !DAG.MaskedValueIsZero(N->getOperand(1),
13274                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
13275         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
13276                                              : SDValue());
13277     } else {
13278       // This is neither a signed nor an unsigned comparison, just make sure
13279       // that the high bits are equal.
13280       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
13281       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
13282 
13283       // We don't really care about what is known about the first bit (if
13284       // anything), so clear it in all masks prior to comparing them.
13285       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
13286       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
13287 
13288       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
13289         return SDValue();
13290     }
13291   }
13292 
13293   // We now know that the higher-order bits are irrelevant, we just need to
13294   // make sure that all of the intermediate operations are bit operations, and
13295   // all inputs are extensions.
13296   if (N->getOperand(0).getOpcode() != ISD::AND &&
13297       N->getOperand(0).getOpcode() != ISD::OR  &&
13298       N->getOperand(0).getOpcode() != ISD::XOR &&
13299       N->getOperand(0).getOpcode() != ISD::SELECT &&
13300       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
13301       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
13302       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
13303       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
13304       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
13305     return SDValue();
13306 
13307   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
13308       N->getOperand(1).getOpcode() != ISD::AND &&
13309       N->getOperand(1).getOpcode() != ISD::OR  &&
13310       N->getOperand(1).getOpcode() != ISD::XOR &&
13311       N->getOperand(1).getOpcode() != ISD::SELECT &&
13312       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
13313       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
13314       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
13315       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
13316       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
13317     return SDValue();
13318 
13319   SmallVector<SDValue, 4> Inputs;
13320   SmallVector<SDValue, 8> BinOps, PromOps;
13321   SmallPtrSet<SDNode *, 16> Visited;
13322 
13323   for (unsigned i = 0; i < 2; ++i) {
13324     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13325           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13326           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13327           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13328         isa<ConstantSDNode>(N->getOperand(i)))
13329       Inputs.push_back(N->getOperand(i));
13330     else
13331       BinOps.push_back(N->getOperand(i));
13332 
13333     if (N->getOpcode() == ISD::TRUNCATE)
13334       break;
13335   }
13336 
13337   // Visit all inputs, collect all binary operations (and, or, xor and
13338   // select) that are all fed by extensions.
13339   while (!BinOps.empty()) {
13340     SDValue BinOp = BinOps.back();
13341     BinOps.pop_back();
13342 
13343     if (!Visited.insert(BinOp.getNode()).second)
13344       continue;
13345 
13346     PromOps.push_back(BinOp);
13347 
13348     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13349       // The condition of the select is not promoted.
13350       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13351         continue;
13352       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13353         continue;
13354 
13355       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13356             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13357             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13358            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13359           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13360         Inputs.push_back(BinOp.getOperand(i));
13361       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13362                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13363                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13364                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13365                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
13366                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13367                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13368                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13369                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
13370         BinOps.push_back(BinOp.getOperand(i));
13371       } else {
13372         // We have an input that is not an extension or another binary
13373         // operation; we'll abort this transformation.
13374         return SDValue();
13375       }
13376     }
13377   }
13378 
13379   // Make sure that this is a self-contained cluster of operations (which
13380   // is not quite the same thing as saying that everything has only one
13381   // use).
13382   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13383     if (isa<ConstantSDNode>(Inputs[i]))
13384       continue;
13385 
13386     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13387                               UE = Inputs[i].getNode()->use_end();
13388          UI != UE; ++UI) {
13389       SDNode *User = *UI;
13390       if (User != N && !Visited.count(User))
13391         return SDValue();
13392 
13393       // Make sure that we're not going to promote the non-output-value
13394       // operand(s) or SELECT or SELECT_CC.
13395       // FIXME: Although we could sometimes handle this, and it does occur in
13396       // practice that one of the condition inputs to the select is also one of
13397       // the outputs, we currently can't deal with this.
13398       if (User->getOpcode() == ISD::SELECT) {
13399         if (User->getOperand(0) == Inputs[i])
13400           return SDValue();
13401       } else if (User->getOpcode() == ISD::SELECT_CC) {
13402         if (User->getOperand(0) == Inputs[i] ||
13403             User->getOperand(1) == Inputs[i])
13404           return SDValue();
13405       }
13406     }
13407   }
13408 
13409   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13410     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13411                               UE = PromOps[i].getNode()->use_end();
13412          UI != UE; ++UI) {
13413       SDNode *User = *UI;
13414       if (User != N && !Visited.count(User))
13415         return SDValue();
13416 
13417       // Make sure that we're not going to promote the non-output-value
13418       // operand(s) or SELECT or SELECT_CC.
13419       // FIXME: Although we could sometimes handle this, and it does occur in
13420       // practice that one of the condition inputs to the select is also one of
13421       // the outputs, we currently can't deal with this.
13422       if (User->getOpcode() == ISD::SELECT) {
13423         if (User->getOperand(0) == PromOps[i])
13424           return SDValue();
13425       } else if (User->getOpcode() == ISD::SELECT_CC) {
13426         if (User->getOperand(0) == PromOps[i] ||
13427             User->getOperand(1) == PromOps[i])
13428           return SDValue();
13429       }
13430     }
13431   }
13432 
13433   // Replace all inputs with the extension operand.
13434   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13435     // Constants may have users outside the cluster of to-be-promoted nodes,
13436     // and so we need to replace those as we do the promotions.
13437     if (isa<ConstantSDNode>(Inputs[i]))
13438       continue;
13439     else
13440       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
13441   }
13442 
13443   std::list<HandleSDNode> PromOpHandles;
13444   for (auto &PromOp : PromOps)
13445     PromOpHandles.emplace_back(PromOp);
13446 
13447   // Replace all operations (these are all the same, but have a different
13448   // (i1) return type). DAG.getNode will validate that the types of
13449   // a binary operator match, so go through the list in reverse so that
13450   // we've likely promoted both operands first. Any intermediate truncations or
13451   // extensions disappear.
13452   while (!PromOpHandles.empty()) {
13453     SDValue PromOp = PromOpHandles.back().getValue();
13454     PromOpHandles.pop_back();
13455 
13456     if (PromOp.getOpcode() == ISD::TRUNCATE ||
13457         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13458         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13459         PromOp.getOpcode() == ISD::ANY_EXTEND) {
13460       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13461           PromOp.getOperand(0).getValueType() != MVT::i1) {
13462         // The operand is not yet ready (see comment below).
13463         PromOpHandles.emplace_front(PromOp);
13464         continue;
13465       }
13466 
13467       SDValue RepValue = PromOp.getOperand(0);
13468       if (isa<ConstantSDNode>(RepValue))
13469         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13470 
13471       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13472       continue;
13473     }
13474 
13475     unsigned C;
13476     switch (PromOp.getOpcode()) {
13477     default:             C = 0; break;
13478     case ISD::SELECT:    C = 1; break;
13479     case ISD::SELECT_CC: C = 2; break;
13480     }
13481 
13482     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13483          PromOp.getOperand(C).getValueType() != MVT::i1) ||
13484         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13485          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13486       // The to-be-promoted operands of this node have not yet been
13487       // promoted (this should be rare because we're going through the
13488       // list backward, but if one of the operands has several users in
13489       // this cluster of to-be-promoted nodes, it is possible).
13490       PromOpHandles.emplace_front(PromOp);
13491       continue;
13492     }
13493 
13494     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13495                                 PromOp.getNode()->op_end());
13496 
13497     // If there are any constant inputs, make sure they're replaced now.
13498     for (unsigned i = 0; i < 2; ++i)
13499       if (isa<ConstantSDNode>(Ops[C+i]))
13500         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13501 
13502     DAG.ReplaceAllUsesOfValueWith(PromOp,
13503       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13504   }
13505 
13506   // Now we're left with the initial truncation itself.
13507   if (N->getOpcode() == ISD::TRUNCATE)
13508     return N->getOperand(0);
13509 
13510   // Otherwise, this is a comparison. The operands to be compared have just
13511   // changed type (to i1), but everything else is the same.
13512   return SDValue(N, 0);
13513 }
13514 
13515 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13516                                                   DAGCombinerInfo &DCI) const {
13517   SelectionDAG &DAG = DCI.DAG;
13518   SDLoc dl(N);
13519 
13520   // If we're tracking CR bits, we need to be careful that we don't have:
13521   //   zext(binary-ops(trunc(x), trunc(y)))
13522   // or
13523   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13524   // such that we're unnecessarily moving things into CR bits that can more
13525   // efficiently stay in GPRs. Note that if we're not certain that the high
13526   // bits are set as required by the final extension, we still may need to do
13527   // some masking to get the proper behavior.
13528 
13529   // This same functionality is important on PPC64 when dealing with
13530   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13531   // the return values of functions. Because it is so similar, it is handled
13532   // here as well.
13533 
13534   if (N->getValueType(0) != MVT::i32 &&
13535       N->getValueType(0) != MVT::i64)
13536     return SDValue();
13537 
13538   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13539         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13540     return SDValue();
13541 
13542   if (N->getOperand(0).getOpcode() != ISD::AND &&
13543       N->getOperand(0).getOpcode() != ISD::OR  &&
13544       N->getOperand(0).getOpcode() != ISD::XOR &&
13545       N->getOperand(0).getOpcode() != ISD::SELECT &&
13546       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13547     return SDValue();
13548 
13549   SmallVector<SDValue, 4> Inputs;
13550   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13551   SmallPtrSet<SDNode *, 16> Visited;
13552 
13553   // Visit all inputs, collect all binary operations (and, or, xor and
13554   // select) that are all fed by truncations.
13555   while (!BinOps.empty()) {
13556     SDValue BinOp = BinOps.back();
13557     BinOps.pop_back();
13558 
13559     if (!Visited.insert(BinOp.getNode()).second)
13560       continue;
13561 
13562     PromOps.push_back(BinOp);
13563 
13564     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13565       // The condition of the select is not promoted.
13566       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13567         continue;
13568       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13569         continue;
13570 
13571       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13572           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13573         Inputs.push_back(BinOp.getOperand(i));
13574       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13575                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13576                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13577                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13578                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13579         BinOps.push_back(BinOp.getOperand(i));
13580       } else {
13581         // We have an input that is not a truncation or another binary
13582         // operation; we'll abort this transformation.
13583         return SDValue();
13584       }
13585     }
13586   }
13587 
13588   // The operands of a select that must be truncated when the select is
13589   // promoted because the operand is actually part of the to-be-promoted set.
13590   DenseMap<SDNode *, EVT> SelectTruncOp[2];
13591 
13592   // Make sure that this is a self-contained cluster of operations (which
13593   // is not quite the same thing as saying that everything has only one
13594   // use).
13595   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13596     if (isa<ConstantSDNode>(Inputs[i]))
13597       continue;
13598 
13599     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13600                               UE = Inputs[i].getNode()->use_end();
13601          UI != UE; ++UI) {
13602       SDNode *User = *UI;
13603       if (User != N && !Visited.count(User))
13604         return SDValue();
13605 
13606       // If we're going to promote the non-output-value operand(s) or SELECT or
13607       // SELECT_CC, record them for truncation.
13608       if (User->getOpcode() == ISD::SELECT) {
13609         if (User->getOperand(0) == Inputs[i])
13610           SelectTruncOp[0].insert(std::make_pair(User,
13611                                     User->getOperand(0).getValueType()));
13612       } else if (User->getOpcode() == ISD::SELECT_CC) {
13613         if (User->getOperand(0) == Inputs[i])
13614           SelectTruncOp[0].insert(std::make_pair(User,
13615                                     User->getOperand(0).getValueType()));
13616         if (User->getOperand(1) == Inputs[i])
13617           SelectTruncOp[1].insert(std::make_pair(User,
13618                                     User->getOperand(1).getValueType()));
13619       }
13620     }
13621   }
13622 
13623   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13624     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13625                               UE = PromOps[i].getNode()->use_end();
13626          UI != UE; ++UI) {
13627       SDNode *User = *UI;
13628       if (User != N && !Visited.count(User))
13629         return SDValue();
13630 
13631       // If we're going to promote the non-output-value operand(s) or SELECT or
13632       // SELECT_CC, record them for truncation.
13633       if (User->getOpcode() == ISD::SELECT) {
13634         if (User->getOperand(0) == PromOps[i])
13635           SelectTruncOp[0].insert(std::make_pair(User,
13636                                     User->getOperand(0).getValueType()));
13637       } else if (User->getOpcode() == ISD::SELECT_CC) {
13638         if (User->getOperand(0) == PromOps[i])
13639           SelectTruncOp[0].insert(std::make_pair(User,
13640                                     User->getOperand(0).getValueType()));
13641         if (User->getOperand(1) == PromOps[i])
13642           SelectTruncOp[1].insert(std::make_pair(User,
13643                                     User->getOperand(1).getValueType()));
13644       }
13645     }
13646   }
13647 
13648   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13649   bool ReallyNeedsExt = false;
13650   if (N->getOpcode() != ISD::ANY_EXTEND) {
13651     // If all of the inputs are not already sign/zero extended, then
13652     // we'll still need to do that at the end.
13653     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13654       if (isa<ConstantSDNode>(Inputs[i]))
13655         continue;
13656 
13657       unsigned OpBits =
13658         Inputs[i].getOperand(0).getValueSizeInBits();
13659       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
13660 
13661       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13662            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13663                                   APInt::getHighBitsSet(OpBits,
13664                                                         OpBits-PromBits))) ||
13665           (N->getOpcode() == ISD::SIGN_EXTEND &&
13666            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13667              (OpBits-(PromBits-1)))) {
13668         ReallyNeedsExt = true;
13669         break;
13670       }
13671     }
13672   }
13673 
13674   // Replace all inputs, either with the truncation operand, or a
13675   // truncation or extension to the final output type.
13676   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13677     // Constant inputs need to be replaced with the to-be-promoted nodes that
13678     // use them because they might have users outside of the cluster of
13679     // promoted nodes.
13680     if (isa<ConstantSDNode>(Inputs[i]))
13681       continue;
13682 
13683     SDValue InSrc = Inputs[i].getOperand(0);
13684     if (Inputs[i].getValueType() == N->getValueType(0))
13685       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13686     else if (N->getOpcode() == ISD::SIGN_EXTEND)
13687       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13688         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13689     else if (N->getOpcode() == ISD::ZERO_EXTEND)
13690       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13691         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13692     else
13693       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13694         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13695   }
13696 
13697   std::list<HandleSDNode> PromOpHandles;
13698   for (auto &PromOp : PromOps)
13699     PromOpHandles.emplace_back(PromOp);
13700 
13701   // Replace all operations (these are all the same, but have a different
13702   // (promoted) return type). DAG.getNode will validate that the types of
13703   // a binary operator match, so go through the list in reverse so that
13704   // we've likely promoted both operands first.
13705   while (!PromOpHandles.empty()) {
13706     SDValue PromOp = PromOpHandles.back().getValue();
13707     PromOpHandles.pop_back();
13708 
13709     unsigned C;
13710     switch (PromOp.getOpcode()) {
13711     default:             C = 0; break;
13712     case ISD::SELECT:    C = 1; break;
13713     case ISD::SELECT_CC: C = 2; break;
13714     }
13715 
13716     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13717          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13718         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13719          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13720       // The to-be-promoted operands of this node have not yet been
13721       // promoted (this should be rare because we're going through the
13722       // list backward, but if one of the operands has several users in
13723       // this cluster of to-be-promoted nodes, it is possible).
13724       PromOpHandles.emplace_front(PromOp);
13725       continue;
13726     }
13727 
13728     // For SELECT and SELECT_CC nodes, we do a similar check for any
13729     // to-be-promoted comparison inputs.
13730     if (PromOp.getOpcode() == ISD::SELECT ||
13731         PromOp.getOpcode() == ISD::SELECT_CC) {
13732       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13733            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13734           (SelectTruncOp[1].count(PromOp.getNode()) &&
13735            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13736         PromOpHandles.emplace_front(PromOp);
13737         continue;
13738       }
13739     }
13740 
13741     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13742                                 PromOp.getNode()->op_end());
13743 
13744     // If this node has constant inputs, then they'll need to be promoted here.
13745     for (unsigned i = 0; i < 2; ++i) {
13746       if (!isa<ConstantSDNode>(Ops[C+i]))
13747         continue;
13748       if (Ops[C+i].getValueType() == N->getValueType(0))
13749         continue;
13750 
13751       if (N->getOpcode() == ISD::SIGN_EXTEND)
13752         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13753       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13754         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13755       else
13756         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13757     }
13758 
13759     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13760     // truncate them again to the original value type.
13761     if (PromOp.getOpcode() == ISD::SELECT ||
13762         PromOp.getOpcode() == ISD::SELECT_CC) {
13763       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13764       if (SI0 != SelectTruncOp[0].end())
13765         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13766       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13767       if (SI1 != SelectTruncOp[1].end())
13768         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13769     }
13770 
13771     DAG.ReplaceAllUsesOfValueWith(PromOp,
13772       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13773   }
13774 
13775   // Now we're left with the initial extension itself.
13776   if (!ReallyNeedsExt)
13777     return N->getOperand(0);
13778 
13779   // To zero extend, just mask off everything except for the first bit (in the
13780   // i1 case).
13781   if (N->getOpcode() == ISD::ZERO_EXTEND)
13782     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13783                        DAG.getConstant(APInt::getLowBitsSet(
13784                                          N->getValueSizeInBits(0), PromBits),
13785                                        dl, N->getValueType(0)));
13786 
13787   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13788          "Invalid extension type");
13789   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13790   SDValue ShiftCst =
13791       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13792   return DAG.getNode(
13793       ISD::SRA, dl, N->getValueType(0),
13794       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13795       ShiftCst);
13796 }
13797 
13798 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13799                                         DAGCombinerInfo &DCI) const {
13800   assert(N->getOpcode() == ISD::SETCC &&
13801          "Should be called with a SETCC node");
13802 
13803   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13804   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13805     SDValue LHS = N->getOperand(0);
13806     SDValue RHS = N->getOperand(1);
13807 
13808     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13809     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13810         LHS.hasOneUse())
13811       std::swap(LHS, RHS);
13812 
13813     // x == 0-y --> x+y == 0
13814     // x != 0-y --> x+y != 0
13815     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13816         RHS.hasOneUse()) {
13817       SDLoc DL(N);
13818       SelectionDAG &DAG = DCI.DAG;
13819       EVT VT = N->getValueType(0);
13820       EVT OpVT = LHS.getValueType();
13821       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13822       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13823     }
13824   }
13825 
13826   return DAGCombineTruncBoolExt(N, DCI);
13827 }
13828 
13829 // Is this an extending load from an f32 to an f64?
13830 static bool isFPExtLoad(SDValue Op) {
13831   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13832     return LD->getExtensionType() == ISD::EXTLOAD &&
13833       Op.getValueType() == MVT::f64;
13834   return false;
13835 }
13836 
13837 /// Reduces the number of fp-to-int conversion when building a vector.
13838 ///
13839 /// If this vector is built out of floating to integer conversions,
13840 /// transform it to a vector built out of floating point values followed by a
13841 /// single floating to integer conversion of the vector.
13842 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13843 /// becomes (fptosi (build_vector ($A, $B, ...)))
13844 SDValue PPCTargetLowering::
13845 combineElementTruncationToVectorTruncation(SDNode *N,
13846                                            DAGCombinerInfo &DCI) const {
13847   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13848          "Should be called with a BUILD_VECTOR node");
13849 
13850   SelectionDAG &DAG = DCI.DAG;
13851   SDLoc dl(N);
13852 
13853   SDValue FirstInput = N->getOperand(0);
13854   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13855          "The input operand must be an fp-to-int conversion.");
13856 
13857   // This combine happens after legalization so the fp_to_[su]i nodes are
13858   // already converted to PPCSISD nodes.
13859   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13860   if (FirstConversion == PPCISD::FCTIDZ ||
13861       FirstConversion == PPCISD::FCTIDUZ ||
13862       FirstConversion == PPCISD::FCTIWZ ||
13863       FirstConversion == PPCISD::FCTIWUZ) {
13864     bool IsSplat = true;
13865     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13866       FirstConversion == PPCISD::FCTIWUZ;
13867     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13868     SmallVector<SDValue, 4> Ops;
13869     EVT TargetVT = N->getValueType(0);
13870     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13871       SDValue NextOp = N->getOperand(i);
13872       if (NextOp.getOpcode() != PPCISD::MFVSR)
13873         return SDValue();
13874       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13875       if (NextConversion != FirstConversion)
13876         return SDValue();
13877       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13878       // This is not valid if the input was originally double precision. It is
13879       // also not profitable to do unless this is an extending load in which
13880       // case doing this combine will allow us to combine consecutive loads.
13881       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13882         return SDValue();
13883       if (N->getOperand(i) != FirstInput)
13884         IsSplat = false;
13885     }
13886 
13887     // If this is a splat, we leave it as-is since there will be only a single
13888     // fp-to-int conversion followed by a splat of the integer. This is better
13889     // for 32-bit and smaller ints and neutral for 64-bit ints.
13890     if (IsSplat)
13891       return SDValue();
13892 
13893     // Now that we know we have the right type of node, get its operands
13894     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13895       SDValue In = N->getOperand(i).getOperand(0);
13896       if (Is32Bit) {
13897         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13898         // here, we know that all inputs are extending loads so this is safe).
13899         if (In.isUndef())
13900           Ops.push_back(DAG.getUNDEF(SrcVT));
13901         else {
13902           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13903                                       MVT::f32, In.getOperand(0),
13904                                       DAG.getIntPtrConstant(1, dl));
13905           Ops.push_back(Trunc);
13906         }
13907       } else
13908         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13909     }
13910 
13911     unsigned Opcode;
13912     if (FirstConversion == PPCISD::FCTIDZ ||
13913         FirstConversion == PPCISD::FCTIWZ)
13914       Opcode = ISD::FP_TO_SINT;
13915     else
13916       Opcode = ISD::FP_TO_UINT;
13917 
13918     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13919     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13920     return DAG.getNode(Opcode, dl, TargetVT, BV);
13921   }
13922   return SDValue();
13923 }
13924 
13925 /// Reduce the number of loads when building a vector.
13926 ///
13927 /// Building a vector out of multiple loads can be converted to a load
13928 /// of the vector type if the loads are consecutive. If the loads are
13929 /// consecutive but in descending order, a shuffle is added at the end
13930 /// to reorder the vector.
13931 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13932   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13933          "Should be called with a BUILD_VECTOR node");
13934 
13935   SDLoc dl(N);
13936 
13937   // Return early for non byte-sized type, as they can't be consecutive.
13938   if (!N->getValueType(0).getVectorElementType().isByteSized())
13939     return SDValue();
13940 
13941   bool InputsAreConsecutiveLoads = true;
13942   bool InputsAreReverseConsecutive = true;
13943   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13944   SDValue FirstInput = N->getOperand(0);
13945   bool IsRoundOfExtLoad = false;
13946 
13947   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13948       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13949     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13950     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13951   }
13952   // Not a build vector of (possibly fp_rounded) loads.
13953   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13954       N->getNumOperands() == 1)
13955     return SDValue();
13956 
13957   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13958     // If any inputs are fp_round(extload), they all must be.
13959     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13960       return SDValue();
13961 
13962     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13963       N->getOperand(i);
13964     if (NextInput.getOpcode() != ISD::LOAD)
13965       return SDValue();
13966 
13967     SDValue PreviousInput =
13968       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13969     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13970     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13971 
13972     // If any inputs are fp_round(extload), they all must be.
13973     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13974       return SDValue();
13975 
13976     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13977       InputsAreConsecutiveLoads = false;
13978     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13979       InputsAreReverseConsecutive = false;
13980 
13981     // Exit early if the loads are neither consecutive nor reverse consecutive.
13982     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13983       return SDValue();
13984   }
13985 
13986   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13987          "The loads cannot be both consecutive and reverse consecutive.");
13988 
13989   SDValue FirstLoadOp =
13990     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13991   SDValue LastLoadOp =
13992     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13993                        N->getOperand(N->getNumOperands()-1);
13994 
13995   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13996   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13997   if (InputsAreConsecutiveLoads) {
13998     assert(LD1 && "Input needs to be a LoadSDNode.");
13999     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
14000                        LD1->getBasePtr(), LD1->getPointerInfo(),
14001                        LD1->getAlignment());
14002   }
14003   if (InputsAreReverseConsecutive) {
14004     assert(LDL && "Input needs to be a LoadSDNode.");
14005     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
14006                                LDL->getBasePtr(), LDL->getPointerInfo(),
14007                                LDL->getAlignment());
14008     SmallVector<int, 16> Ops;
14009     for (int i = N->getNumOperands() - 1; i >= 0; i--)
14010       Ops.push_back(i);
14011 
14012     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
14013                                 DAG.getUNDEF(N->getValueType(0)), Ops);
14014   }
14015   return SDValue();
14016 }
14017 
14018 // This function adds the required vector_shuffle needed to get
14019 // the elements of the vector extract in the correct position
14020 // as specified by the CorrectElems encoding.
14021 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
14022                                       SDValue Input, uint64_t Elems,
14023                                       uint64_t CorrectElems) {
14024   SDLoc dl(N);
14025 
14026   unsigned NumElems = Input.getValueType().getVectorNumElements();
14027   SmallVector<int, 16> ShuffleMask(NumElems, -1);
14028 
14029   // Knowing the element indices being extracted from the original
14030   // vector and the order in which they're being inserted, just put
14031   // them at element indices required for the instruction.
14032   for (unsigned i = 0; i < N->getNumOperands(); i++) {
14033     if (DAG.getDataLayout().isLittleEndian())
14034       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
14035     else
14036       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
14037     CorrectElems = CorrectElems >> 8;
14038     Elems = Elems >> 8;
14039   }
14040 
14041   SDValue Shuffle =
14042       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
14043                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
14044 
14045   EVT VT = N->getValueType(0);
14046   SDValue Conv = DAG.getBitcast(VT, Shuffle);
14047 
14048   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
14049                                Input.getValueType().getVectorElementType(),
14050                                VT.getVectorNumElements());
14051   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
14052                      DAG.getValueType(ExtVT));
14053 }
14054 
14055 // Look for build vector patterns where input operands come from sign
14056 // extended vector_extract elements of specific indices. If the correct indices
14057 // aren't used, add a vector shuffle to fix up the indices and create
14058 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
14059 // during instruction selection.
14060 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
14061   // This array encodes the indices that the vector sign extend instructions
14062   // extract from when extending from one type to another for both BE and LE.
14063   // The right nibble of each byte corresponds to the LE incides.
14064   // and the left nibble of each byte corresponds to the BE incides.
14065   // For example: 0x3074B8FC  byte->word
14066   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
14067   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
14068   // For example: 0x000070F8  byte->double word
14069   // For LE: the allowed indices are: 0x0,0x8
14070   // For BE: the allowed indices are: 0x7,0xF
14071   uint64_t TargetElems[] = {
14072       0x3074B8FC, // b->w
14073       0x000070F8, // b->d
14074       0x10325476, // h->w
14075       0x00003074, // h->d
14076       0x00001032, // w->d
14077   };
14078 
14079   uint64_t Elems = 0;
14080   int Index;
14081   SDValue Input;
14082 
14083   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
14084     if (!Op)
14085       return false;
14086     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
14087         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
14088       return false;
14089 
14090     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
14091     // of the right width.
14092     SDValue Extract = Op.getOperand(0);
14093     if (Extract.getOpcode() == ISD::ANY_EXTEND)
14094       Extract = Extract.getOperand(0);
14095     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14096       return false;
14097 
14098     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
14099     if (!ExtOp)
14100       return false;
14101 
14102     Index = ExtOp->getZExtValue();
14103     if (Input && Input != Extract.getOperand(0))
14104       return false;
14105 
14106     if (!Input)
14107       Input = Extract.getOperand(0);
14108 
14109     Elems = Elems << 8;
14110     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
14111     Elems |= Index;
14112 
14113     return true;
14114   };
14115 
14116   // If the build vector operands aren't sign extended vector extracts,
14117   // of the same input vector, then return.
14118   for (unsigned i = 0; i < N->getNumOperands(); i++) {
14119     if (!isSExtOfVecExtract(N->getOperand(i))) {
14120       return SDValue();
14121     }
14122   }
14123 
14124   // If the vector extract indicies are not correct, add the appropriate
14125   // vector_shuffle.
14126   int TgtElemArrayIdx;
14127   int InputSize = Input.getValueType().getScalarSizeInBits();
14128   int OutputSize = N->getValueType(0).getScalarSizeInBits();
14129   if (InputSize + OutputSize == 40)
14130     TgtElemArrayIdx = 0;
14131   else if (InputSize + OutputSize == 72)
14132     TgtElemArrayIdx = 1;
14133   else if (InputSize + OutputSize == 48)
14134     TgtElemArrayIdx = 2;
14135   else if (InputSize + OutputSize == 80)
14136     TgtElemArrayIdx = 3;
14137   else if (InputSize + OutputSize == 96)
14138     TgtElemArrayIdx = 4;
14139   else
14140     return SDValue();
14141 
14142   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
14143   CorrectElems = DAG.getDataLayout().isLittleEndian()
14144                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
14145                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
14146   if (Elems != CorrectElems) {
14147     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
14148   }
14149 
14150   // Regular lowering will catch cases where a shuffle is not needed.
14151   return SDValue();
14152 }
14153 
14154 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
14155                                                  DAGCombinerInfo &DCI) const {
14156   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
14157          "Should be called with a BUILD_VECTOR node");
14158 
14159   SelectionDAG &DAG = DCI.DAG;
14160   SDLoc dl(N);
14161 
14162   if (!Subtarget.hasVSX())
14163     return SDValue();
14164 
14165   // The target independent DAG combiner will leave a build_vector of
14166   // float-to-int conversions intact. We can generate MUCH better code for
14167   // a float-to-int conversion of a vector of floats.
14168   SDValue FirstInput = N->getOperand(0);
14169   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
14170     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
14171     if (Reduced)
14172       return Reduced;
14173   }
14174 
14175   // If we're building a vector out of consecutive loads, just load that
14176   // vector type.
14177   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
14178   if (Reduced)
14179     return Reduced;
14180 
14181   // If we're building a vector out of extended elements from another vector
14182   // we have P9 vector integer extend instructions. The code assumes legal
14183   // input types (i.e. it can't handle things like v4i16) so do not run before
14184   // legalization.
14185   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
14186     Reduced = combineBVOfVecSExt(N, DAG);
14187     if (Reduced)
14188       return Reduced;
14189   }
14190 
14191 
14192   if (N->getValueType(0) != MVT::v2f64)
14193     return SDValue();
14194 
14195   // Looking for:
14196   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
14197   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
14198       FirstInput.getOpcode() != ISD::UINT_TO_FP)
14199     return SDValue();
14200   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
14201       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
14202     return SDValue();
14203   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
14204     return SDValue();
14205 
14206   SDValue Ext1 = FirstInput.getOperand(0);
14207   SDValue Ext2 = N->getOperand(1).getOperand(0);
14208   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
14209      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14210     return SDValue();
14211 
14212   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
14213   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
14214   if (!Ext1Op || !Ext2Op)
14215     return SDValue();
14216   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
14217       Ext1.getOperand(0) != Ext2.getOperand(0))
14218     return SDValue();
14219 
14220   int FirstElem = Ext1Op->getZExtValue();
14221   int SecondElem = Ext2Op->getZExtValue();
14222   int SubvecIdx;
14223   if (FirstElem == 0 && SecondElem == 1)
14224     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
14225   else if (FirstElem == 2 && SecondElem == 3)
14226     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
14227   else
14228     return SDValue();
14229 
14230   SDValue SrcVec = Ext1.getOperand(0);
14231   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
14232     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
14233   return DAG.getNode(NodeType, dl, MVT::v2f64,
14234                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
14235 }
14236 
14237 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
14238                                               DAGCombinerInfo &DCI) const {
14239   assert((N->getOpcode() == ISD::SINT_TO_FP ||
14240           N->getOpcode() == ISD::UINT_TO_FP) &&
14241          "Need an int -> FP conversion node here");
14242 
14243   if (useSoftFloat() || !Subtarget.has64BitSupport())
14244     return SDValue();
14245 
14246   SelectionDAG &DAG = DCI.DAG;
14247   SDLoc dl(N);
14248   SDValue Op(N, 0);
14249 
14250   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
14251   // from the hardware.
14252   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
14253     return SDValue();
14254   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
14255       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
14256     return SDValue();
14257 
14258   SDValue FirstOperand(Op.getOperand(0));
14259   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
14260     (FirstOperand.getValueType() == MVT::i8 ||
14261      FirstOperand.getValueType() == MVT::i16);
14262   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
14263     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
14264     bool DstDouble = Op.getValueType() == MVT::f64;
14265     unsigned ConvOp = Signed ?
14266       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
14267       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
14268     SDValue WidthConst =
14269       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
14270                             dl, false);
14271     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
14272     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
14273     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
14274                                          DAG.getVTList(MVT::f64, MVT::Other),
14275                                          Ops, MVT::i8, LDN->getMemOperand());
14276 
14277     // For signed conversion, we need to sign-extend the value in the VSR
14278     if (Signed) {
14279       SDValue ExtOps[] = { Ld, WidthConst };
14280       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
14281       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
14282     } else
14283       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
14284   }
14285 
14286 
14287   // For i32 intermediate values, unfortunately, the conversion functions
14288   // leave the upper 32 bits of the value are undefined. Within the set of
14289   // scalar instructions, we have no method for zero- or sign-extending the
14290   // value. Thus, we cannot handle i32 intermediate values here.
14291   if (Op.getOperand(0).getValueType() == MVT::i32)
14292     return SDValue();
14293 
14294   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
14295          "UINT_TO_FP is supported only with FPCVT");
14296 
14297   // If we have FCFIDS, then use it when converting to single-precision.
14298   // Otherwise, convert to double-precision and then round.
14299   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14300                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
14301                                                             : PPCISD::FCFIDS)
14302                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
14303                                                             : PPCISD::FCFID);
14304   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14305                   ? MVT::f32
14306                   : MVT::f64;
14307 
14308   // If we're converting from a float, to an int, and back to a float again,
14309   // then we don't need the store/load pair at all.
14310   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
14311        Subtarget.hasFPCVT()) ||
14312       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
14313     SDValue Src = Op.getOperand(0).getOperand(0);
14314     if (Src.getValueType() == MVT::f32) {
14315       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
14316       DCI.AddToWorklist(Src.getNode());
14317     } else if (Src.getValueType() != MVT::f64) {
14318       // Make sure that we don't pick up a ppc_fp128 source value.
14319       return SDValue();
14320     }
14321 
14322     unsigned FCTOp =
14323       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
14324                                                         PPCISD::FCTIDUZ;
14325 
14326     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
14327     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
14328 
14329     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
14330       FP = DAG.getNode(ISD::FP_ROUND, dl,
14331                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
14332       DCI.AddToWorklist(FP.getNode());
14333     }
14334 
14335     return FP;
14336   }
14337 
14338   return SDValue();
14339 }
14340 
14341 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
14342 // builtins) into loads with swaps.
14343 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
14344                                               DAGCombinerInfo &DCI) const {
14345   SelectionDAG &DAG = DCI.DAG;
14346   SDLoc dl(N);
14347   SDValue Chain;
14348   SDValue Base;
14349   MachineMemOperand *MMO;
14350 
14351   switch (N->getOpcode()) {
14352   default:
14353     llvm_unreachable("Unexpected opcode for little endian VSX load");
14354   case ISD::LOAD: {
14355     LoadSDNode *LD = cast<LoadSDNode>(N);
14356     Chain = LD->getChain();
14357     Base = LD->getBasePtr();
14358     MMO = LD->getMemOperand();
14359     // If the MMO suggests this isn't a load of a full vector, leave
14360     // things alone.  For a built-in, we have to make the change for
14361     // correctness, so if there is a size problem that will be a bug.
14362     if (MMO->getSize() < 16)
14363       return SDValue();
14364     break;
14365   }
14366   case ISD::INTRINSIC_W_CHAIN: {
14367     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14368     Chain = Intrin->getChain();
14369     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
14370     // us what we want. Get operand 2 instead.
14371     Base = Intrin->getOperand(2);
14372     MMO = Intrin->getMemOperand();
14373     break;
14374   }
14375   }
14376 
14377   MVT VecTy = N->getValueType(0).getSimpleVT();
14378 
14379   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
14380   // aligned and the type is a vector with elements up to 4 bytes
14381   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14382       VecTy.getScalarSizeInBits() <= 32) {
14383     return SDValue();
14384   }
14385 
14386   SDValue LoadOps[] = { Chain, Base };
14387   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
14388                                          DAG.getVTList(MVT::v2f64, MVT::Other),
14389                                          LoadOps, MVT::v2f64, MMO);
14390 
14391   DCI.AddToWorklist(Load.getNode());
14392   Chain = Load.getValue(1);
14393   SDValue Swap = DAG.getNode(
14394       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14395   DCI.AddToWorklist(Swap.getNode());
14396 
14397   // Add a bitcast if the resulting load type doesn't match v2f64.
14398   if (VecTy != MVT::v2f64) {
14399     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14400     DCI.AddToWorklist(N.getNode());
14401     // Package {bitcast value, swap's chain} to match Load's shape.
14402     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14403                        N, Swap.getValue(1));
14404   }
14405 
14406   return Swap;
14407 }
14408 
14409 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14410 // builtins) into stores with swaps.
14411 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14412                                                DAGCombinerInfo &DCI) const {
14413   SelectionDAG &DAG = DCI.DAG;
14414   SDLoc dl(N);
14415   SDValue Chain;
14416   SDValue Base;
14417   unsigned SrcOpnd;
14418   MachineMemOperand *MMO;
14419 
14420   switch (N->getOpcode()) {
14421   default:
14422     llvm_unreachable("Unexpected opcode for little endian VSX store");
14423   case ISD::STORE: {
14424     StoreSDNode *ST = cast<StoreSDNode>(N);
14425     Chain = ST->getChain();
14426     Base = ST->getBasePtr();
14427     MMO = ST->getMemOperand();
14428     SrcOpnd = 1;
14429     // If the MMO suggests this isn't a store of a full vector, leave
14430     // things alone.  For a built-in, we have to make the change for
14431     // correctness, so if there is a size problem that will be a bug.
14432     if (MMO->getSize() < 16)
14433       return SDValue();
14434     break;
14435   }
14436   case ISD::INTRINSIC_VOID: {
14437     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14438     Chain = Intrin->getChain();
14439     // Intrin->getBasePtr() oddly does not get what we want.
14440     Base = Intrin->getOperand(3);
14441     MMO = Intrin->getMemOperand();
14442     SrcOpnd = 2;
14443     break;
14444   }
14445   }
14446 
14447   SDValue Src = N->getOperand(SrcOpnd);
14448   MVT VecTy = Src.getValueType().getSimpleVT();
14449 
14450   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14451   // aligned and the type is a vector with elements up to 4 bytes
14452   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14453       VecTy.getScalarSizeInBits() <= 32) {
14454     return SDValue();
14455   }
14456 
14457   // All stores are done as v2f64 and possible bit cast.
14458   if (VecTy != MVT::v2f64) {
14459     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14460     DCI.AddToWorklist(Src.getNode());
14461   }
14462 
14463   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14464                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14465   DCI.AddToWorklist(Swap.getNode());
14466   Chain = Swap.getValue(1);
14467   SDValue StoreOps[] = { Chain, Swap, Base };
14468   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14469                                           DAG.getVTList(MVT::Other),
14470                                           StoreOps, VecTy, MMO);
14471   DCI.AddToWorklist(Store.getNode());
14472   return Store;
14473 }
14474 
14475 // Handle DAG combine for STORE (FP_TO_INT F).
14476 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14477                                                DAGCombinerInfo &DCI) const {
14478 
14479   SelectionDAG &DAG = DCI.DAG;
14480   SDLoc dl(N);
14481   unsigned Opcode = N->getOperand(1).getOpcode();
14482 
14483   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
14484          && "Not a FP_TO_INT Instruction!");
14485 
14486   SDValue Val = N->getOperand(1).getOperand(0);
14487   EVT Op1VT = N->getOperand(1).getValueType();
14488   EVT ResVT = Val.getValueType();
14489 
14490   // Floating point types smaller than 32 bits are not legal on Power.
14491   if (ResVT.getScalarSizeInBits() < 32)
14492     return SDValue();
14493 
14494   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14495   bool ValidTypeForStoreFltAsInt =
14496         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14497          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14498 
14499   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14500       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14501     return SDValue();
14502 
14503   // Extend f32 values to f64
14504   if (ResVT.getScalarSizeInBits() == 32) {
14505     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14506     DCI.AddToWorklist(Val.getNode());
14507   }
14508 
14509   // Set signed or unsigned conversion opcode.
14510   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14511                           PPCISD::FP_TO_SINT_IN_VSR :
14512                           PPCISD::FP_TO_UINT_IN_VSR;
14513 
14514   Val = DAG.getNode(ConvOpcode,
14515                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14516   DCI.AddToWorklist(Val.getNode());
14517 
14518   // Set number of bytes being converted.
14519   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14520   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14521                     DAG.getIntPtrConstant(ByteSize, dl, false),
14522                     DAG.getValueType(Op1VT) };
14523 
14524   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14525           DAG.getVTList(MVT::Other), Ops,
14526           cast<StoreSDNode>(N)->getMemoryVT(),
14527           cast<StoreSDNode>(N)->getMemOperand());
14528 
14529   DCI.AddToWorklist(Val.getNode());
14530   return Val;
14531 }
14532 
14533 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14534   // Check that the source of the element keeps flipping
14535   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14536   bool PrevElemFromFirstVec = Mask[0] < NumElts;
14537   for (int i = 1, e = Mask.size(); i < e; i++) {
14538     if (PrevElemFromFirstVec && Mask[i] < NumElts)
14539       return false;
14540     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14541       return false;
14542     PrevElemFromFirstVec = !PrevElemFromFirstVec;
14543   }
14544   return true;
14545 }
14546 
14547 static bool isSplatBV(SDValue Op) {
14548   if (Op.getOpcode() != ISD::BUILD_VECTOR)
14549     return false;
14550   SDValue FirstOp;
14551 
14552   // Find first non-undef input.
14553   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14554     FirstOp = Op.getOperand(i);
14555     if (!FirstOp.isUndef())
14556       break;
14557   }
14558 
14559   // All inputs are undef or the same as the first non-undef input.
14560   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14561     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14562       return false;
14563   return true;
14564 }
14565 
14566 static SDValue isScalarToVec(SDValue Op) {
14567   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14568     return Op;
14569   if (Op.getOpcode() != ISD::BITCAST)
14570     return SDValue();
14571   Op = Op.getOperand(0);
14572   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14573     return Op;
14574   return SDValue();
14575 }
14576 
14577 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14578                                             int LHSMaxIdx, int RHSMinIdx,
14579                                             int RHSMaxIdx, int HalfVec) {
14580   for (int i = 0, e = ShuffV.size(); i < e; i++) {
14581     int Idx = ShuffV[i];
14582     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14583       ShuffV[i] += HalfVec;
14584   }
14585   return;
14586 }
14587 
14588 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14589 // the original is:
14590 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14591 // In such a case, just change the shuffle mask to extract the element
14592 // from the permuted index.
14593 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
14594   SDLoc dl(OrigSToV);
14595   EVT VT = OrigSToV.getValueType();
14596   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
14597          "Expecting a SCALAR_TO_VECTOR here");
14598   SDValue Input = OrigSToV.getOperand(0);
14599 
14600   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14601     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14602     SDValue OrigVector = Input.getOperand(0);
14603 
14604     // Can't handle non-const element indices or different vector types
14605     // for the input to the extract and the output of the scalar_to_vector.
14606     if (Idx && VT == OrigVector.getValueType()) {
14607       SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
14608       NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
14609       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14610     }
14611   }
14612   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14613                      OrigSToV.getOperand(0));
14614 }
14615 
14616 // On little endian subtargets, combine shuffles such as:
14617 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14618 // into:
14619 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14620 // because the latter can be matched to a single instruction merge.
14621 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14622 // to put the value into element zero. Adjust the shuffle mask so that the
14623 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
14624 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14625                                                 SelectionDAG &DAG) const {
14626   SDValue LHS = SVN->getOperand(0);
14627   SDValue RHS = SVN->getOperand(1);
14628   auto Mask = SVN->getMask();
14629   int NumElts = LHS.getValueType().getVectorNumElements();
14630   SDValue Res(SVN, 0);
14631   SDLoc dl(SVN);
14632 
14633   // None of these combines are useful on big endian systems since the ISA
14634   // already has a big endian bias.
14635   if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14636     return Res;
14637 
14638   // If this is not a shuffle of a shuffle and the first element comes from
14639   // the second vector, canonicalize to the commuted form. This will make it
14640   // more likely to match one of the single instruction patterns.
14641   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14642       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14643     std::swap(LHS, RHS);
14644     Res = DAG.getCommutedVectorShuffle(*SVN);
14645     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14646   }
14647 
14648   // Adjust the shuffle mask if either input vector comes from a
14649   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14650   // form (to prevent the need for a swap).
14651   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14652   SDValue SToVLHS = isScalarToVec(LHS);
14653   SDValue SToVRHS = isScalarToVec(RHS);
14654   if (SToVLHS || SToVRHS) {
14655     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14656                             : SToVRHS.getValueType().getVectorNumElements();
14657     int NumEltsOut = ShuffV.size();
14658 
14659     // Initially assume that neither input is permuted. These will be adjusted
14660     // accordingly if either input is.
14661     int LHSMaxIdx = -1;
14662     int RHSMinIdx = -1;
14663     int RHSMaxIdx = -1;
14664     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14665 
14666     // Get the permuted scalar to vector nodes for the source(s) that come from
14667     // ISD::SCALAR_TO_VECTOR.
14668     if (SToVLHS) {
14669       // Set up the values for the shuffle vector fixup.
14670       LHSMaxIdx = NumEltsOut / NumEltsIn;
14671       SToVLHS = getSToVPermuted(SToVLHS, DAG);
14672       if (SToVLHS.getValueType() != LHS.getValueType())
14673         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14674       LHS = SToVLHS;
14675     }
14676     if (SToVRHS) {
14677       RHSMinIdx = NumEltsOut;
14678       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14679       SToVRHS = getSToVPermuted(SToVRHS, DAG);
14680       if (SToVRHS.getValueType() != RHS.getValueType())
14681         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14682       RHS = SToVRHS;
14683     }
14684 
14685     // Fix up the shuffle mask to reflect where the desired element actually is.
14686     // The minimum and maximum indices that correspond to element zero for both
14687     // the LHS and RHS are computed and will control which shuffle mask entries
14688     // are to be changed. For example, if the RHS is permuted, any shuffle mask
14689     // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
14690     // HalfVec to refer to the corresponding element in the permuted vector.
14691     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14692                                     HalfVec);
14693     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14694 
14695     // We may have simplified away the shuffle. We won't be able to do anything
14696     // further with it here.
14697     if (!isa<ShuffleVectorSDNode>(Res))
14698       return Res;
14699     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14700   }
14701 
14702   // The common case after we commuted the shuffle is that the RHS is a splat
14703   // and we have elements coming in from the splat at indices that are not
14704   // conducive to using a merge.
14705   // Example:
14706   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14707   if (!isSplatBV(RHS))
14708     return Res;
14709 
14710   // We are looking for a mask such that all even elements are from
14711   // one vector and all odd elements from the other.
14712   if (!isAlternatingShuffMask(Mask, NumElts))
14713     return Res;
14714 
14715   // Adjust the mask so we are pulling in the same index from the splat
14716   // as the index from the interesting vector in consecutive elements.
14717   // Example (even elements from first vector):
14718   // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14719   if (Mask[0] < NumElts)
14720     for (int i = 1, e = Mask.size(); i < e; i += 2)
14721       ShuffV[i] = (ShuffV[i - 1] + NumElts);
14722   // Example (odd elements from first vector):
14723   // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14724   else
14725     for (int i = 0, e = Mask.size(); i < e; i += 2)
14726       ShuffV[i] = (ShuffV[i + 1] + NumElts);
14727 
14728   // If the RHS has undefs, we need to remove them since we may have created
14729   // a shuffle that adds those instead of the splat value.
14730   SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
14731   RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
14732 
14733   Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14734   return Res;
14735 }
14736 
14737 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14738                                                 LSBaseSDNode *LSBase,
14739                                                 DAGCombinerInfo &DCI) const {
14740   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14741         "Not a reverse memop pattern!");
14742 
14743   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14744     auto Mask = SVN->getMask();
14745     int i = 0;
14746     auto I = Mask.rbegin();
14747     auto E = Mask.rend();
14748 
14749     for (; I != E; ++I) {
14750       if (*I != i)
14751         return false;
14752       i++;
14753     }
14754     return true;
14755   };
14756 
14757   SelectionDAG &DAG = DCI.DAG;
14758   EVT VT = SVN->getValueType(0);
14759 
14760   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14761     return SDValue();
14762 
14763   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14764   // See comment in PPCVSXSwapRemoval.cpp.
14765   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14766   if (!Subtarget.hasP9Vector())
14767     return SDValue();
14768 
14769   if(!IsElementReverse(SVN))
14770     return SDValue();
14771 
14772   if (LSBase->getOpcode() == ISD::LOAD) {
14773     SDLoc dl(SVN);
14774     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14775     return DAG.getMemIntrinsicNode(
14776         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14777         LSBase->getMemoryVT(), LSBase->getMemOperand());
14778   }
14779 
14780   if (LSBase->getOpcode() == ISD::STORE) {
14781     SDLoc dl(LSBase);
14782     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14783                           LSBase->getBasePtr()};
14784     return DAG.getMemIntrinsicNode(
14785         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14786         LSBase->getMemoryVT(), LSBase->getMemOperand());
14787   }
14788 
14789   llvm_unreachable("Expected a load or store node here");
14790 }
14791 
14792 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14793                                              DAGCombinerInfo &DCI) const {
14794   SelectionDAG &DAG = DCI.DAG;
14795   SDLoc dl(N);
14796   switch (N->getOpcode()) {
14797   default: break;
14798   case ISD::ADD:
14799     return combineADD(N, DCI);
14800   case ISD::SHL:
14801     return combineSHL(N, DCI);
14802   case ISD::SRA:
14803     return combineSRA(N, DCI);
14804   case ISD::SRL:
14805     return combineSRL(N, DCI);
14806   case ISD::MUL:
14807     return combineMUL(N, DCI);
14808   case ISD::FMA:
14809   case PPCISD::FNMSUB:
14810     return combineFMALike(N, DCI);
14811   case PPCISD::SHL:
14812     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14813         return N->getOperand(0);
14814     break;
14815   case PPCISD::SRL:
14816     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14817         return N->getOperand(0);
14818     break;
14819   case PPCISD::SRA:
14820     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14821       if (C->isNullValue() ||   //  0 >>s V -> 0.
14822           C->isAllOnesValue())    // -1 >>s V -> -1.
14823         return N->getOperand(0);
14824     }
14825     break;
14826   case ISD::SIGN_EXTEND:
14827   case ISD::ZERO_EXTEND:
14828   case ISD::ANY_EXTEND:
14829     return DAGCombineExtBoolTrunc(N, DCI);
14830   case ISD::TRUNCATE:
14831     return combineTRUNCATE(N, DCI);
14832   case ISD::SETCC:
14833     if (SDValue CSCC = combineSetCC(N, DCI))
14834       return CSCC;
14835     LLVM_FALLTHROUGH;
14836   case ISD::SELECT_CC:
14837     return DAGCombineTruncBoolExt(N, DCI);
14838   case ISD::SINT_TO_FP:
14839   case ISD::UINT_TO_FP:
14840     return combineFPToIntToFP(N, DCI);
14841   case ISD::VECTOR_SHUFFLE:
14842     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14843       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14844       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14845     }
14846     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14847   case ISD::STORE: {
14848 
14849     EVT Op1VT = N->getOperand(1).getValueType();
14850     unsigned Opcode = N->getOperand(1).getOpcode();
14851 
14852     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14853       SDValue Val= combineStoreFPToInt(N, DCI);
14854       if (Val)
14855         return Val;
14856     }
14857 
14858     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14859       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14860       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14861       if (Val)
14862         return Val;
14863     }
14864 
14865     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14866     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14867         N->getOperand(1).getNode()->hasOneUse() &&
14868         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14869          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14870 
14871       // STBRX can only handle simple types and it makes no sense to store less
14872       // two bytes in byte-reversed order.
14873       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14874       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14875         break;
14876 
14877       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14878       // Do an any-extend to 32-bits if this is a half-word input.
14879       if (BSwapOp.getValueType() == MVT::i16)
14880         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14881 
14882       // If the type of BSWAP operand is wider than stored memory width
14883       // it need to be shifted to the right side before STBRX.
14884       if (Op1VT.bitsGT(mVT)) {
14885         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14886         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14887                               DAG.getConstant(Shift, dl, MVT::i32));
14888         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14889         if (Op1VT == MVT::i64)
14890           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14891       }
14892 
14893       SDValue Ops[] = {
14894         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14895       };
14896       return
14897         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14898                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14899                                 cast<StoreSDNode>(N)->getMemOperand());
14900     }
14901 
14902     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14903     // So it can increase the chance of CSE constant construction.
14904     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14905         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14906       // Need to sign-extended to 64-bits to handle negative values.
14907       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14908       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14909                                     MemVT.getSizeInBits());
14910       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14911 
14912       // DAG.getTruncStore() can't be used here because it doesn't accept
14913       // the general (base + offset) addressing mode.
14914       // So we use UpdateNodeOperands and setTruncatingStore instead.
14915       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14916                              N->getOperand(3));
14917       cast<StoreSDNode>(N)->setTruncatingStore(true);
14918       return SDValue(N, 0);
14919     }
14920 
14921     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14922     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14923     if (Op1VT.isSimple()) {
14924       MVT StoreVT = Op1VT.getSimpleVT();
14925       if (Subtarget.needsSwapsForVSXMemOps() &&
14926           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14927            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14928         return expandVSXStoreForLE(N, DCI);
14929     }
14930     break;
14931   }
14932   case ISD::LOAD: {
14933     LoadSDNode *LD = cast<LoadSDNode>(N);
14934     EVT VT = LD->getValueType(0);
14935 
14936     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14937     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14938     if (VT.isSimple()) {
14939       MVT LoadVT = VT.getSimpleVT();
14940       if (Subtarget.needsSwapsForVSXMemOps() &&
14941           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14942            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14943         return expandVSXLoadForLE(N, DCI);
14944     }
14945 
14946     // We sometimes end up with a 64-bit integer load, from which we extract
14947     // two single-precision floating-point numbers. This happens with
14948     // std::complex<float>, and other similar structures, because of the way we
14949     // canonicalize structure copies. However, if we lack direct moves,
14950     // then the final bitcasts from the extracted integer values to the
14951     // floating-point numbers turn into store/load pairs. Even with direct moves,
14952     // just loading the two floating-point numbers is likely better.
14953     auto ReplaceTwoFloatLoad = [&]() {
14954       if (VT != MVT::i64)
14955         return false;
14956 
14957       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14958           LD->isVolatile())
14959         return false;
14960 
14961       //  We're looking for a sequence like this:
14962       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14963       //      t16: i64 = srl t13, Constant:i32<32>
14964       //    t17: i32 = truncate t16
14965       //  t18: f32 = bitcast t17
14966       //    t19: i32 = truncate t13
14967       //  t20: f32 = bitcast t19
14968 
14969       if (!LD->hasNUsesOfValue(2, 0))
14970         return false;
14971 
14972       auto UI = LD->use_begin();
14973       while (UI.getUse().getResNo() != 0) ++UI;
14974       SDNode *Trunc = *UI++;
14975       while (UI.getUse().getResNo() != 0) ++UI;
14976       SDNode *RightShift = *UI;
14977       if (Trunc->getOpcode() != ISD::TRUNCATE)
14978         std::swap(Trunc, RightShift);
14979 
14980       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14981           Trunc->getValueType(0) != MVT::i32 ||
14982           !Trunc->hasOneUse())
14983         return false;
14984       if (RightShift->getOpcode() != ISD::SRL ||
14985           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14986           RightShift->getConstantOperandVal(1) != 32 ||
14987           !RightShift->hasOneUse())
14988         return false;
14989 
14990       SDNode *Trunc2 = *RightShift->use_begin();
14991       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14992           Trunc2->getValueType(0) != MVT::i32 ||
14993           !Trunc2->hasOneUse())
14994         return false;
14995 
14996       SDNode *Bitcast = *Trunc->use_begin();
14997       SDNode *Bitcast2 = *Trunc2->use_begin();
14998 
14999       if (Bitcast->getOpcode() != ISD::BITCAST ||
15000           Bitcast->getValueType(0) != MVT::f32)
15001         return false;
15002       if (Bitcast2->getOpcode() != ISD::BITCAST ||
15003           Bitcast2->getValueType(0) != MVT::f32)
15004         return false;
15005 
15006       if (Subtarget.isLittleEndian())
15007         std::swap(Bitcast, Bitcast2);
15008 
15009       // Bitcast has the second float (in memory-layout order) and Bitcast2
15010       // has the first one.
15011 
15012       SDValue BasePtr = LD->getBasePtr();
15013       if (LD->isIndexed()) {
15014         assert(LD->getAddressingMode() == ISD::PRE_INC &&
15015                "Non-pre-inc AM on PPC?");
15016         BasePtr =
15017           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
15018                       LD->getOffset());
15019       }
15020 
15021       auto MMOFlags =
15022           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
15023       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
15024                                       LD->getPointerInfo(), LD->getAlignment(),
15025                                       MMOFlags, LD->getAAInfo());
15026       SDValue AddPtr =
15027         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
15028                     BasePtr, DAG.getIntPtrConstant(4, dl));
15029       SDValue FloatLoad2 = DAG.getLoad(
15030           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
15031           LD->getPointerInfo().getWithOffset(4),
15032           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
15033 
15034       if (LD->isIndexed()) {
15035         // Note that DAGCombine should re-form any pre-increment load(s) from
15036         // what is produced here if that makes sense.
15037         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
15038       }
15039 
15040       DCI.CombineTo(Bitcast2, FloatLoad);
15041       DCI.CombineTo(Bitcast, FloatLoad2);
15042 
15043       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
15044                                     SDValue(FloatLoad2.getNode(), 1));
15045       return true;
15046     };
15047 
15048     if (ReplaceTwoFloatLoad())
15049       return SDValue(N, 0);
15050 
15051     EVT MemVT = LD->getMemoryVT();
15052     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
15053     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
15054     Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
15055     Align ScalarABIAlignment = DAG.getDataLayout().getABITypeAlign(STy);
15056     if (LD->isUnindexed() && VT.isVector() &&
15057         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
15058           // P8 and later hardware should just use LOAD.
15059           !Subtarget.hasP8Vector() &&
15060           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
15061            VT == MVT::v4f32)) ||
15062          (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
15063           LD->getAlign() >= ScalarABIAlignment)) &&
15064         LD->getAlign() < ABIAlignment) {
15065       // This is a type-legal unaligned Altivec or QPX load.
15066       SDValue Chain = LD->getChain();
15067       SDValue Ptr = LD->getBasePtr();
15068       bool isLittleEndian = Subtarget.isLittleEndian();
15069 
15070       // This implements the loading of unaligned vectors as described in
15071       // the venerable Apple Velocity Engine overview. Specifically:
15072       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
15073       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
15074       //
15075       // The general idea is to expand a sequence of one or more unaligned
15076       // loads into an alignment-based permutation-control instruction (lvsl
15077       // or lvsr), a series of regular vector loads (which always truncate
15078       // their input address to an aligned address), and a series of
15079       // permutations.  The results of these permutations are the requested
15080       // loaded values.  The trick is that the last "extra" load is not taken
15081       // from the address you might suspect (sizeof(vector) bytes after the
15082       // last requested load), but rather sizeof(vector) - 1 bytes after the
15083       // last requested vector. The point of this is to avoid a page fault if
15084       // the base address happened to be aligned. This works because if the
15085       // base address is aligned, then adding less than a full vector length
15086       // will cause the last vector in the sequence to be (re)loaded.
15087       // Otherwise, the next vector will be fetched as you might suspect was
15088       // necessary.
15089 
15090       // We might be able to reuse the permutation generation from
15091       // a different base address offset from this one by an aligned amount.
15092       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
15093       // optimization later.
15094       Intrinsic::ID Intr, IntrLD, IntrPerm;
15095       MVT PermCntlTy, PermTy, LDTy;
15096       if (Subtarget.hasAltivec()) {
15097         Intr = isLittleEndian ?  Intrinsic::ppc_altivec_lvsr :
15098                                  Intrinsic::ppc_altivec_lvsl;
15099         IntrLD = Intrinsic::ppc_altivec_lvx;
15100         IntrPerm = Intrinsic::ppc_altivec_vperm;
15101         PermCntlTy = MVT::v16i8;
15102         PermTy = MVT::v4i32;
15103         LDTy = MVT::v4i32;
15104       } else {
15105         Intr =   MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
15106                                        Intrinsic::ppc_qpx_qvlpcls;
15107         IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
15108                                        Intrinsic::ppc_qpx_qvlfs;
15109         IntrPerm = Intrinsic::ppc_qpx_qvfperm;
15110         PermCntlTy = MVT::v4f64;
15111         PermTy = MVT::v4f64;
15112         LDTy = MemVT.getSimpleVT();
15113       }
15114 
15115       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
15116 
15117       // Create the new MMO for the new base load. It is like the original MMO,
15118       // but represents an area in memory almost twice the vector size centered
15119       // on the original address. If the address is unaligned, we might start
15120       // reading up to (sizeof(vector)-1) bytes below the address of the
15121       // original unaligned load.
15122       MachineFunction &MF = DAG.getMachineFunction();
15123       MachineMemOperand *BaseMMO =
15124         MF.getMachineMemOperand(LD->getMemOperand(),
15125                                 -(long)MemVT.getStoreSize()+1,
15126                                 2*MemVT.getStoreSize()-1);
15127 
15128       // Create the new base load.
15129       SDValue LDXIntID =
15130           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
15131       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
15132       SDValue BaseLoad =
15133         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15134                                 DAG.getVTList(PermTy, MVT::Other),
15135                                 BaseLoadOps, LDTy, BaseMMO);
15136 
15137       // Note that the value of IncOffset (which is provided to the next
15138       // load's pointer info offset value, and thus used to calculate the
15139       // alignment), and the value of IncValue (which is actually used to
15140       // increment the pointer value) are different! This is because we
15141       // require the next load to appear to be aligned, even though it
15142       // is actually offset from the base pointer by a lesser amount.
15143       int IncOffset = VT.getSizeInBits() / 8;
15144       int IncValue = IncOffset;
15145 
15146       // Walk (both up and down) the chain looking for another load at the real
15147       // (aligned) offset (the alignment of the other load does not matter in
15148       // this case). If found, then do not use the offset reduction trick, as
15149       // that will prevent the loads from being later combined (as they would
15150       // otherwise be duplicates).
15151       if (!findConsecutiveLoad(LD, DAG))
15152         --IncValue;
15153 
15154       SDValue Increment =
15155           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
15156       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
15157 
15158       MachineMemOperand *ExtraMMO =
15159         MF.getMachineMemOperand(LD->getMemOperand(),
15160                                 1, 2*MemVT.getStoreSize()-1);
15161       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
15162       SDValue ExtraLoad =
15163         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15164                                 DAG.getVTList(PermTy, MVT::Other),
15165                                 ExtraLoadOps, LDTy, ExtraMMO);
15166 
15167       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15168         BaseLoad.getValue(1), ExtraLoad.getValue(1));
15169 
15170       // Because vperm has a big-endian bias, we must reverse the order
15171       // of the input vectors and complement the permute control vector
15172       // when generating little endian code.  We have already handled the
15173       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
15174       // and ExtraLoad here.
15175       SDValue Perm;
15176       if (isLittleEndian)
15177         Perm = BuildIntrinsicOp(IntrPerm,
15178                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
15179       else
15180         Perm = BuildIntrinsicOp(IntrPerm,
15181                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
15182 
15183       if (VT != PermTy)
15184         Perm = Subtarget.hasAltivec() ?
15185                  DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
15186                  DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
15187                                DAG.getTargetConstant(1, dl, MVT::i64));
15188                                // second argument is 1 because this rounding
15189                                // is always exact.
15190 
15191       // The output of the permutation is our loaded result, the TokenFactor is
15192       // our new chain.
15193       DCI.CombineTo(N, Perm, TF);
15194       return SDValue(N, 0);
15195     }
15196     }
15197     break;
15198     case ISD::INTRINSIC_WO_CHAIN: {
15199       bool isLittleEndian = Subtarget.isLittleEndian();
15200       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
15201       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15202                                            : Intrinsic::ppc_altivec_lvsl);
15203       if ((IID == Intr ||
15204            IID == Intrinsic::ppc_qpx_qvlpcld  ||
15205            IID == Intrinsic::ppc_qpx_qvlpcls) &&
15206         N->getOperand(1)->getOpcode() == ISD::ADD) {
15207         SDValue Add = N->getOperand(1);
15208 
15209         int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
15210                    5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
15211 
15212         if (DAG.MaskedValueIsZero(Add->getOperand(1),
15213                                   APInt::getAllOnesValue(Bits /* alignment */)
15214                                       .zext(Add.getScalarValueSizeInBits()))) {
15215           SDNode *BasePtr = Add->getOperand(0).getNode();
15216           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15217                                     UE = BasePtr->use_end();
15218                UI != UE; ++UI) {
15219             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15220                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
15221               // We've found another LVSL/LVSR, and this address is an aligned
15222               // multiple of that one. The results will be the same, so use the
15223               // one we've just found instead.
15224 
15225               return SDValue(*UI, 0);
15226             }
15227           }
15228         }
15229 
15230         if (isa<ConstantSDNode>(Add->getOperand(1))) {
15231           SDNode *BasePtr = Add->getOperand(0).getNode();
15232           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15233                UE = BasePtr->use_end(); UI != UE; ++UI) {
15234             if (UI->getOpcode() == ISD::ADD &&
15235                 isa<ConstantSDNode>(UI->getOperand(1)) &&
15236                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
15237                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
15238                 (1ULL << Bits) == 0) {
15239               SDNode *OtherAdd = *UI;
15240               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
15241                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
15242                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15243                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
15244                   return SDValue(*VI, 0);
15245                 }
15246               }
15247             }
15248           }
15249         }
15250       }
15251 
15252       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
15253       // Expose the vabsduw/h/b opportunity for down stream
15254       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
15255           (IID == Intrinsic::ppc_altivec_vmaxsw ||
15256            IID == Intrinsic::ppc_altivec_vmaxsh ||
15257            IID == Intrinsic::ppc_altivec_vmaxsb)) {
15258         SDValue V1 = N->getOperand(1);
15259         SDValue V2 = N->getOperand(2);
15260         if ((V1.getSimpleValueType() == MVT::v4i32 ||
15261              V1.getSimpleValueType() == MVT::v8i16 ||
15262              V1.getSimpleValueType() == MVT::v16i8) &&
15263             V1.getSimpleValueType() == V2.getSimpleValueType()) {
15264           // (0-a, a)
15265           if (V1.getOpcode() == ISD::SUB &&
15266               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
15267               V1.getOperand(1) == V2) {
15268             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
15269           }
15270           // (a, 0-a)
15271           if (V2.getOpcode() == ISD::SUB &&
15272               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
15273               V2.getOperand(1) == V1) {
15274             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15275           }
15276           // (x-y, y-x)
15277           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
15278               V1.getOperand(0) == V2.getOperand(1) &&
15279               V1.getOperand(1) == V2.getOperand(0)) {
15280             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15281           }
15282         }
15283       }
15284     }
15285 
15286     break;
15287   case ISD::INTRINSIC_W_CHAIN:
15288     // For little endian, VSX loads require generating lxvd2x/xxswapd.
15289     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
15290     if (Subtarget.needsSwapsForVSXMemOps()) {
15291       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15292       default:
15293         break;
15294       case Intrinsic::ppc_vsx_lxvw4x:
15295       case Intrinsic::ppc_vsx_lxvd2x:
15296         return expandVSXLoadForLE(N, DCI);
15297       }
15298     }
15299     break;
15300   case ISD::INTRINSIC_VOID:
15301     // For little endian, VSX stores require generating xxswapd/stxvd2x.
15302     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
15303     if (Subtarget.needsSwapsForVSXMemOps()) {
15304       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15305       default:
15306         break;
15307       case Intrinsic::ppc_vsx_stxvw4x:
15308       case Intrinsic::ppc_vsx_stxvd2x:
15309         return expandVSXStoreForLE(N, DCI);
15310       }
15311     }
15312     break;
15313   case ISD::BSWAP:
15314     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
15315     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
15316         N->getOperand(0).hasOneUse() &&
15317         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
15318          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
15319           N->getValueType(0) == MVT::i64))) {
15320       SDValue Load = N->getOperand(0);
15321       LoadSDNode *LD = cast<LoadSDNode>(Load);
15322       // Create the byte-swapping load.
15323       SDValue Ops[] = {
15324         LD->getChain(),    // Chain
15325         LD->getBasePtr(),  // Ptr
15326         DAG.getValueType(N->getValueType(0)) // VT
15327       };
15328       SDValue BSLoad =
15329         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
15330                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
15331                                               MVT::i64 : MVT::i32, MVT::Other),
15332                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
15333 
15334       // If this is an i16 load, insert the truncate.
15335       SDValue ResVal = BSLoad;
15336       if (N->getValueType(0) == MVT::i16)
15337         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
15338 
15339       // First, combine the bswap away.  This makes the value produced by the
15340       // load dead.
15341       DCI.CombineTo(N, ResVal);
15342 
15343       // Next, combine the load away, we give it a bogus result value but a real
15344       // chain result.  The result value is dead because the bswap is dead.
15345       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
15346 
15347       // Return N so it doesn't get rechecked!
15348       return SDValue(N, 0);
15349     }
15350     break;
15351   case PPCISD::VCMP:
15352     // If a VCMPo node already exists with exactly the same operands as this
15353     // node, use its result instead of this node (VCMPo computes both a CR6 and
15354     // a normal output).
15355     //
15356     if (!N->getOperand(0).hasOneUse() &&
15357         !N->getOperand(1).hasOneUse() &&
15358         !N->getOperand(2).hasOneUse()) {
15359 
15360       // Scan all of the users of the LHS, looking for VCMPo's that match.
15361       SDNode *VCMPoNode = nullptr;
15362 
15363       SDNode *LHSN = N->getOperand(0).getNode();
15364       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
15365            UI != E; ++UI)
15366         if (UI->getOpcode() == PPCISD::VCMPo &&
15367             UI->getOperand(1) == N->getOperand(1) &&
15368             UI->getOperand(2) == N->getOperand(2) &&
15369             UI->getOperand(0) == N->getOperand(0)) {
15370           VCMPoNode = *UI;
15371           break;
15372         }
15373 
15374       // If there is no VCMPo node, or if the flag value has a single use, don't
15375       // transform this.
15376       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
15377         break;
15378 
15379       // Look at the (necessarily single) use of the flag value.  If it has a
15380       // chain, this transformation is more complex.  Note that multiple things
15381       // could use the value result, which we should ignore.
15382       SDNode *FlagUser = nullptr;
15383       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
15384            FlagUser == nullptr; ++UI) {
15385         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
15386         SDNode *User = *UI;
15387         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
15388           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
15389             FlagUser = User;
15390             break;
15391           }
15392         }
15393       }
15394 
15395       // If the user is a MFOCRF instruction, we know this is safe.
15396       // Otherwise we give up for right now.
15397       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
15398         return SDValue(VCMPoNode, 0);
15399     }
15400     break;
15401   case ISD::BRCOND: {
15402     SDValue Cond = N->getOperand(1);
15403     SDValue Target = N->getOperand(2);
15404 
15405     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15406         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
15407           Intrinsic::loop_decrement) {
15408 
15409       // We now need to make the intrinsic dead (it cannot be instruction
15410       // selected).
15411       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
15412       assert(Cond.getNode()->hasOneUse() &&
15413              "Counter decrement has more than one use");
15414 
15415       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15416                          N->getOperand(0), Target);
15417     }
15418   }
15419   break;
15420   case ISD::BR_CC: {
15421     // If this is a branch on an altivec predicate comparison, lower this so
15422     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
15423     // lowering is done pre-legalize, because the legalizer lowers the predicate
15424     // compare down to code that is difficult to reassemble.
15425     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15426     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15427 
15428     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15429     // value. If so, pass-through the AND to get to the intrinsic.
15430     if (LHS.getOpcode() == ISD::AND &&
15431         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15432         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15433           Intrinsic::loop_decrement &&
15434         isa<ConstantSDNode>(LHS.getOperand(1)) &&
15435         !isNullConstant(LHS.getOperand(1)))
15436       LHS = LHS.getOperand(0);
15437 
15438     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15439         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15440           Intrinsic::loop_decrement &&
15441         isa<ConstantSDNode>(RHS)) {
15442       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
15443              "Counter decrement comparison is not EQ or NE");
15444 
15445       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15446       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15447                     (CC == ISD::SETNE && !Val);
15448 
15449       // We now need to make the intrinsic dead (it cannot be instruction
15450       // selected).
15451       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15452       assert(LHS.getNode()->hasOneUse() &&
15453              "Counter decrement has more than one use");
15454 
15455       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15456                          N->getOperand(0), N->getOperand(4));
15457     }
15458 
15459     int CompareOpc;
15460     bool isDot;
15461 
15462     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15463         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15464         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15465       assert(isDot && "Can't compare against a vector result!");
15466 
15467       // If this is a comparison against something other than 0/1, then we know
15468       // that the condition is never/always true.
15469       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15470       if (Val != 0 && Val != 1) {
15471         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
15472           return N->getOperand(0);
15473         // Always !=, turn it into an unconditional branch.
15474         return DAG.getNode(ISD::BR, dl, MVT::Other,
15475                            N->getOperand(0), N->getOperand(4));
15476       }
15477 
15478       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15479 
15480       // Create the PPCISD altivec 'dot' comparison node.
15481       SDValue Ops[] = {
15482         LHS.getOperand(2),  // LHS of compare
15483         LHS.getOperand(3),  // RHS of compare
15484         DAG.getConstant(CompareOpc, dl, MVT::i32)
15485       };
15486       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15487       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
15488 
15489       // Unpack the result based on how the target uses it.
15490       PPC::Predicate CompOpc;
15491       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15492       default:  // Can't happen, don't crash on invalid number though.
15493       case 0:   // Branch on the value of the EQ bit of CR6.
15494         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15495         break;
15496       case 1:   // Branch on the inverted value of the EQ bit of CR6.
15497         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15498         break;
15499       case 2:   // Branch on the value of the LT bit of CR6.
15500         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15501         break;
15502       case 3:   // Branch on the inverted value of the LT bit of CR6.
15503         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15504         break;
15505       }
15506 
15507       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15508                          DAG.getConstant(CompOpc, dl, MVT::i32),
15509                          DAG.getRegister(PPC::CR6, MVT::i32),
15510                          N->getOperand(4), CompNode.getValue(1));
15511     }
15512     break;
15513   }
15514   case ISD::BUILD_VECTOR:
15515     return DAGCombineBuildVector(N, DCI);
15516   case ISD::ABS:
15517     return combineABS(N, DCI);
15518   case ISD::VSELECT:
15519     return combineVSelect(N, DCI);
15520   }
15521 
15522   return SDValue();
15523 }
15524 
15525 SDValue
15526 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15527                                  SelectionDAG &DAG,
15528                                  SmallVectorImpl<SDNode *> &Created) const {
15529   // fold (sdiv X, pow2)
15530   EVT VT = N->getValueType(0);
15531   if (VT == MVT::i64 && !Subtarget.isPPC64())
15532     return SDValue();
15533   if ((VT != MVT::i32 && VT != MVT::i64) ||
15534       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15535     return SDValue();
15536 
15537   SDLoc DL(N);
15538   SDValue N0 = N->getOperand(0);
15539 
15540   bool IsNegPow2 = (-Divisor).isPowerOf2();
15541   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15542   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15543 
15544   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15545   Created.push_back(Op.getNode());
15546 
15547   if (IsNegPow2) {
15548     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15549     Created.push_back(Op.getNode());
15550   }
15551 
15552   return Op;
15553 }
15554 
15555 //===----------------------------------------------------------------------===//
15556 // Inline Assembly Support
15557 //===----------------------------------------------------------------------===//
15558 
15559 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15560                                                       KnownBits &Known,
15561                                                       const APInt &DemandedElts,
15562                                                       const SelectionDAG &DAG,
15563                                                       unsigned Depth) const {
15564   Known.resetAll();
15565   switch (Op.getOpcode()) {
15566   default: break;
15567   case PPCISD::LBRX: {
15568     // lhbrx is known to have the top bits cleared out.
15569     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15570       Known.Zero = 0xFFFF0000;
15571     break;
15572   }
15573   case ISD::INTRINSIC_WO_CHAIN: {
15574     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15575     default: break;
15576     case Intrinsic::ppc_altivec_vcmpbfp_p:
15577     case Intrinsic::ppc_altivec_vcmpeqfp_p:
15578     case Intrinsic::ppc_altivec_vcmpequb_p:
15579     case Intrinsic::ppc_altivec_vcmpequh_p:
15580     case Intrinsic::ppc_altivec_vcmpequw_p:
15581     case Intrinsic::ppc_altivec_vcmpequd_p:
15582     case Intrinsic::ppc_altivec_vcmpgefp_p:
15583     case Intrinsic::ppc_altivec_vcmpgtfp_p:
15584     case Intrinsic::ppc_altivec_vcmpgtsb_p:
15585     case Intrinsic::ppc_altivec_vcmpgtsh_p:
15586     case Intrinsic::ppc_altivec_vcmpgtsw_p:
15587     case Intrinsic::ppc_altivec_vcmpgtsd_p:
15588     case Intrinsic::ppc_altivec_vcmpgtub_p:
15589     case Intrinsic::ppc_altivec_vcmpgtuh_p:
15590     case Intrinsic::ppc_altivec_vcmpgtuw_p:
15591     case Intrinsic::ppc_altivec_vcmpgtud_p:
15592       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
15593       break;
15594     }
15595   }
15596   }
15597 }
15598 
15599 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15600   switch (Subtarget.getCPUDirective()) {
15601   default: break;
15602   case PPC::DIR_970:
15603   case PPC::DIR_PWR4:
15604   case PPC::DIR_PWR5:
15605   case PPC::DIR_PWR5X:
15606   case PPC::DIR_PWR6:
15607   case PPC::DIR_PWR6X:
15608   case PPC::DIR_PWR7:
15609   case PPC::DIR_PWR8:
15610   case PPC::DIR_PWR9:
15611   case PPC::DIR_PWR10:
15612   case PPC::DIR_PWR_FUTURE: {
15613     if (!ML)
15614       break;
15615 
15616     if (!DisableInnermostLoopAlign32) {
15617       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15618       // so that we can decrease cache misses and branch-prediction misses.
15619       // Actual alignment of the loop will depend on the hotness check and other
15620       // logic in alignBlocks.
15621       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15622         return Align(32);
15623     }
15624 
15625     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15626 
15627     // For small loops (between 5 and 8 instructions), align to a 32-byte
15628     // boundary so that the entire loop fits in one instruction-cache line.
15629     uint64_t LoopSize = 0;
15630     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15631       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15632         LoopSize += TII->getInstSizeInBytes(*J);
15633         if (LoopSize > 32)
15634           break;
15635       }
15636 
15637     if (LoopSize > 16 && LoopSize <= 32)
15638       return Align(32);
15639 
15640     break;
15641   }
15642   }
15643 
15644   return TargetLowering::getPrefLoopAlignment(ML);
15645 }
15646 
15647 /// getConstraintType - Given a constraint, return the type of
15648 /// constraint it is for this target.
15649 PPCTargetLowering::ConstraintType
15650 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15651   if (Constraint.size() == 1) {
15652     switch (Constraint[0]) {
15653     default: break;
15654     case 'b':
15655     case 'r':
15656     case 'f':
15657     case 'd':
15658     case 'v':
15659     case 'y':
15660       return C_RegisterClass;
15661     case 'Z':
15662       // FIXME: While Z does indicate a memory constraint, it specifically
15663       // indicates an r+r address (used in conjunction with the 'y' modifier
15664       // in the replacement string). Currently, we're forcing the base
15665       // register to be r0 in the asm printer (which is interpreted as zero)
15666       // and forming the complete address in the second register. This is
15667       // suboptimal.
15668       return C_Memory;
15669     }
15670   } else if (Constraint == "wc") { // individual CR bits.
15671     return C_RegisterClass;
15672   } else if (Constraint == "wa" || Constraint == "wd" ||
15673              Constraint == "wf" || Constraint == "ws" ||
15674              Constraint == "wi" || Constraint == "ww") {
15675     return C_RegisterClass; // VSX registers.
15676   }
15677   return TargetLowering::getConstraintType(Constraint);
15678 }
15679 
15680 /// Examine constraint type and operand type and determine a weight value.
15681 /// This object must already have been set up with the operand type
15682 /// and the current alternative constraint selected.
15683 TargetLowering::ConstraintWeight
15684 PPCTargetLowering::getSingleConstraintMatchWeight(
15685     AsmOperandInfo &info, const char *constraint) const {
15686   ConstraintWeight weight = CW_Invalid;
15687   Value *CallOperandVal = info.CallOperandVal;
15688     // If we don't have a value, we can't do a match,
15689     // but allow it at the lowest weight.
15690   if (!CallOperandVal)
15691     return CW_Default;
15692   Type *type = CallOperandVal->getType();
15693 
15694   // Look at the constraint type.
15695   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15696     return CW_Register; // an individual CR bit.
15697   else if ((StringRef(constraint) == "wa" ||
15698             StringRef(constraint) == "wd" ||
15699             StringRef(constraint) == "wf") &&
15700            type->isVectorTy())
15701     return CW_Register;
15702   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15703     return CW_Register; // just hold 64-bit integers data.
15704   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15705     return CW_Register;
15706   else if (StringRef(constraint) == "ww" && type->isFloatTy())
15707     return CW_Register;
15708 
15709   switch (*constraint) {
15710   default:
15711     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15712     break;
15713   case 'b':
15714     if (type->isIntegerTy())
15715       weight = CW_Register;
15716     break;
15717   case 'f':
15718     if (type->isFloatTy())
15719       weight = CW_Register;
15720     break;
15721   case 'd':
15722     if (type->isDoubleTy())
15723       weight = CW_Register;
15724     break;
15725   case 'v':
15726     if (type->isVectorTy())
15727       weight = CW_Register;
15728     break;
15729   case 'y':
15730     weight = CW_Register;
15731     break;
15732   case 'Z':
15733     weight = CW_Memory;
15734     break;
15735   }
15736   return weight;
15737 }
15738 
15739 std::pair<unsigned, const TargetRegisterClass *>
15740 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15741                                                 StringRef Constraint,
15742                                                 MVT VT) const {
15743   if (Constraint.size() == 1) {
15744     // GCC RS6000 Constraint Letters
15745     switch (Constraint[0]) {
15746     case 'b':   // R1-R31
15747       if (VT == MVT::i64 && Subtarget.isPPC64())
15748         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15749       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15750     case 'r':   // R0-R31
15751       if (VT == MVT::i64 && Subtarget.isPPC64())
15752         return std::make_pair(0U, &PPC::G8RCRegClass);
15753       return std::make_pair(0U, &PPC::GPRCRegClass);
15754     // 'd' and 'f' constraints are both defined to be "the floating point
15755     // registers", where one is for 32-bit and the other for 64-bit. We don't
15756     // really care overly much here so just give them all the same reg classes.
15757     case 'd':
15758     case 'f':
15759       if (Subtarget.hasSPE()) {
15760         if (VT == MVT::f32 || VT == MVT::i32)
15761           return std::make_pair(0U, &PPC::GPRCRegClass);
15762         if (VT == MVT::f64 || VT == MVT::i64)
15763           return std::make_pair(0U, &PPC::SPERCRegClass);
15764       } else {
15765         if (VT == MVT::f32 || VT == MVT::i32)
15766           return std::make_pair(0U, &PPC::F4RCRegClass);
15767         if (VT == MVT::f64 || VT == MVT::i64)
15768           return std::make_pair(0U, &PPC::F8RCRegClass);
15769         if (VT == MVT::v4f64 && Subtarget.hasQPX())
15770           return std::make_pair(0U, &PPC::QFRCRegClass);
15771         if (VT == MVT::v4f32 && Subtarget.hasQPX())
15772           return std::make_pair(0U, &PPC::QSRCRegClass);
15773       }
15774       break;
15775     case 'v':
15776       if (VT == MVT::v4f64 && Subtarget.hasQPX())
15777         return std::make_pair(0U, &PPC::QFRCRegClass);
15778       if (VT == MVT::v4f32 && Subtarget.hasQPX())
15779         return std::make_pair(0U, &PPC::QSRCRegClass);
15780       if (Subtarget.hasAltivec())
15781         return std::make_pair(0U, &PPC::VRRCRegClass);
15782       break;
15783     case 'y':   // crrc
15784       return std::make_pair(0U, &PPC::CRRCRegClass);
15785     }
15786   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15787     // An individual CR bit.
15788     return std::make_pair(0U, &PPC::CRBITRCRegClass);
15789   } else if ((Constraint == "wa" || Constraint == "wd" ||
15790              Constraint == "wf" || Constraint == "wi") &&
15791              Subtarget.hasVSX()) {
15792     return std::make_pair(0U, &PPC::VSRCRegClass);
15793   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15794     if (VT == MVT::f32 && Subtarget.hasP8Vector())
15795       return std::make_pair(0U, &PPC::VSSRCRegClass);
15796     else
15797       return std::make_pair(0U, &PPC::VSFRCRegClass);
15798   }
15799 
15800   // If we name a VSX register, we can't defer to the base class because it
15801   // will not recognize the correct register (their names will be VSL{0-31}
15802   // and V{0-31} so they won't match). So we match them here.
15803   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15804     int VSNum = atoi(Constraint.data() + 3);
15805     assert(VSNum >= 0 && VSNum <= 63 &&
15806            "Attempted to access a vsr out of range");
15807     if (VSNum < 32)
15808       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15809     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15810   }
15811   std::pair<unsigned, const TargetRegisterClass *> R =
15812       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15813 
15814   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15815   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15816   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15817   // register.
15818   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15819   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15820   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15821       PPC::GPRCRegClass.contains(R.first))
15822     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15823                             PPC::sub_32, &PPC::G8RCRegClass),
15824                           &PPC::G8RCRegClass);
15825 
15826   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15827   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15828     R.first = PPC::CR0;
15829     R.second = &PPC::CRRCRegClass;
15830   }
15831 
15832   return R;
15833 }
15834 
15835 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15836 /// vector.  If it is invalid, don't add anything to Ops.
15837 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15838                                                      std::string &Constraint,
15839                                                      std::vector<SDValue>&Ops,
15840                                                      SelectionDAG &DAG) const {
15841   SDValue Result;
15842 
15843   // Only support length 1 constraints.
15844   if (Constraint.length() > 1) return;
15845 
15846   char Letter = Constraint[0];
15847   switch (Letter) {
15848   default: break;
15849   case 'I':
15850   case 'J':
15851   case 'K':
15852   case 'L':
15853   case 'M':
15854   case 'N':
15855   case 'O':
15856   case 'P': {
15857     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15858     if (!CST) return; // Must be an immediate to match.
15859     SDLoc dl(Op);
15860     int64_t Value = CST->getSExtValue();
15861     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15862                          // numbers are printed as such.
15863     switch (Letter) {
15864     default: llvm_unreachable("Unknown constraint letter!");
15865     case 'I':  // "I" is a signed 16-bit constant.
15866       if (isInt<16>(Value))
15867         Result = DAG.getTargetConstant(Value, dl, TCVT);
15868       break;
15869     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15870       if (isShiftedUInt<16, 16>(Value))
15871         Result = DAG.getTargetConstant(Value, dl, TCVT);
15872       break;
15873     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15874       if (isShiftedInt<16, 16>(Value))
15875         Result = DAG.getTargetConstant(Value, dl, TCVT);
15876       break;
15877     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15878       if (isUInt<16>(Value))
15879         Result = DAG.getTargetConstant(Value, dl, TCVT);
15880       break;
15881     case 'M':  // "M" is a constant that is greater than 31.
15882       if (Value > 31)
15883         Result = DAG.getTargetConstant(Value, dl, TCVT);
15884       break;
15885     case 'N':  // "N" is a positive constant that is an exact power of two.
15886       if (Value > 0 && isPowerOf2_64(Value))
15887         Result = DAG.getTargetConstant(Value, dl, TCVT);
15888       break;
15889     case 'O':  // "O" is the constant zero.
15890       if (Value == 0)
15891         Result = DAG.getTargetConstant(Value, dl, TCVT);
15892       break;
15893     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15894       if (isInt<16>(-Value))
15895         Result = DAG.getTargetConstant(Value, dl, TCVT);
15896       break;
15897     }
15898     break;
15899   }
15900   }
15901 
15902   if (Result.getNode()) {
15903     Ops.push_back(Result);
15904     return;
15905   }
15906 
15907   // Handle standard constraint letters.
15908   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15909 }
15910 
15911 // isLegalAddressingMode - Return true if the addressing mode represented
15912 // by AM is legal for this target, for a load/store of the specified type.
15913 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15914                                               const AddrMode &AM, Type *Ty,
15915                                               unsigned AS, Instruction *I) const {
15916   // PPC does not allow r+i addressing modes for vectors!
15917   if (Ty->isVectorTy() && AM.BaseOffs != 0)
15918     return false;
15919 
15920   // PPC allows a sign-extended 16-bit immediate field.
15921   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15922     return false;
15923 
15924   // No global is ever allowed as a base.
15925   if (AM.BaseGV)
15926     return false;
15927 
15928   // PPC only support r+r,
15929   switch (AM.Scale) {
15930   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15931     break;
15932   case 1:
15933     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15934       return false;
15935     // Otherwise we have r+r or r+i.
15936     break;
15937   case 2:
15938     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15939       return false;
15940     // Allow 2*r as r+r.
15941     break;
15942   default:
15943     // No other scales are supported.
15944     return false;
15945   }
15946 
15947   return true;
15948 }
15949 
15950 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15951                                            SelectionDAG &DAG) const {
15952   MachineFunction &MF = DAG.getMachineFunction();
15953   MachineFrameInfo &MFI = MF.getFrameInfo();
15954   MFI.setReturnAddressIsTaken(true);
15955 
15956   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15957     return SDValue();
15958 
15959   SDLoc dl(Op);
15960   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15961 
15962   // Make sure the function does not optimize away the store of the RA to
15963   // the stack.
15964   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15965   FuncInfo->setLRStoreRequired();
15966   bool isPPC64 = Subtarget.isPPC64();
15967   auto PtrVT = getPointerTy(MF.getDataLayout());
15968 
15969   if (Depth > 0) {
15970     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15971     SDValue Offset =
15972         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15973                         isPPC64 ? MVT::i64 : MVT::i32);
15974     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15975                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15976                        MachinePointerInfo());
15977   }
15978 
15979   // Just load the return address off the stack.
15980   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15981   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15982                      MachinePointerInfo());
15983 }
15984 
15985 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15986                                           SelectionDAG &DAG) const {
15987   SDLoc dl(Op);
15988   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15989 
15990   MachineFunction &MF = DAG.getMachineFunction();
15991   MachineFrameInfo &MFI = MF.getFrameInfo();
15992   MFI.setFrameAddressIsTaken(true);
15993 
15994   EVT PtrVT = getPointerTy(MF.getDataLayout());
15995   bool isPPC64 = PtrVT == MVT::i64;
15996 
15997   // Naked functions never have a frame pointer, and so we use r1. For all
15998   // other functions, this decision must be delayed until during PEI.
15999   unsigned FrameReg;
16000   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
16001     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
16002   else
16003     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
16004 
16005   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
16006                                          PtrVT);
16007   while (Depth--)
16008     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
16009                             FrameAddr, MachinePointerInfo());
16010   return FrameAddr;
16011 }
16012 
16013 // FIXME? Maybe this could be a TableGen attribute on some registers and
16014 // this table could be generated automatically from RegInfo.
16015 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
16016                                               const MachineFunction &MF) const {
16017   bool isPPC64 = Subtarget.isPPC64();
16018 
16019   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
16020   if (!is64Bit && VT != LLT::scalar(32))
16021     report_fatal_error("Invalid register global variable type");
16022 
16023   Register Reg = StringSwitch<Register>(RegName)
16024                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
16025                      .Case("r2", isPPC64 ? Register() : PPC::R2)
16026                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
16027                      .Default(Register());
16028 
16029   if (Reg)
16030     return Reg;
16031   report_fatal_error("Invalid register name global variable");
16032 }
16033 
16034 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
16035   // 32-bit SVR4 ABI access everything as got-indirect.
16036   if (Subtarget.is32BitELFABI())
16037     return true;
16038 
16039   // AIX accesses everything indirectly through the TOC, which is similar to
16040   // the GOT.
16041   if (Subtarget.isAIXABI())
16042     return true;
16043 
16044   CodeModel::Model CModel = getTargetMachine().getCodeModel();
16045   // If it is small or large code model, module locals are accessed
16046   // indirectly by loading their address from .toc/.got.
16047   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
16048     return true;
16049 
16050   // JumpTable and BlockAddress are accessed as got-indirect.
16051   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
16052     return true;
16053 
16054   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
16055     return Subtarget.isGVIndirectSymbol(G->getGlobal());
16056 
16057   return false;
16058 }
16059 
16060 bool
16061 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
16062   // The PowerPC target isn't yet aware of offsets.
16063   return false;
16064 }
16065 
16066 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
16067                                            const CallInst &I,
16068                                            MachineFunction &MF,
16069                                            unsigned Intrinsic) const {
16070   switch (Intrinsic) {
16071   case Intrinsic::ppc_qpx_qvlfd:
16072   case Intrinsic::ppc_qpx_qvlfs:
16073   case Intrinsic::ppc_qpx_qvlfcd:
16074   case Intrinsic::ppc_qpx_qvlfcs:
16075   case Intrinsic::ppc_qpx_qvlfiwa:
16076   case Intrinsic::ppc_qpx_qvlfiwz:
16077   case Intrinsic::ppc_altivec_lvx:
16078   case Intrinsic::ppc_altivec_lvxl:
16079   case Intrinsic::ppc_altivec_lvebx:
16080   case Intrinsic::ppc_altivec_lvehx:
16081   case Intrinsic::ppc_altivec_lvewx:
16082   case Intrinsic::ppc_vsx_lxvd2x:
16083   case Intrinsic::ppc_vsx_lxvw4x: {
16084     EVT VT;
16085     switch (Intrinsic) {
16086     case Intrinsic::ppc_altivec_lvebx:
16087       VT = MVT::i8;
16088       break;
16089     case Intrinsic::ppc_altivec_lvehx:
16090       VT = MVT::i16;
16091       break;
16092     case Intrinsic::ppc_altivec_lvewx:
16093       VT = MVT::i32;
16094       break;
16095     case Intrinsic::ppc_vsx_lxvd2x:
16096       VT = MVT::v2f64;
16097       break;
16098     case Intrinsic::ppc_qpx_qvlfd:
16099       VT = MVT::v4f64;
16100       break;
16101     case Intrinsic::ppc_qpx_qvlfs:
16102       VT = MVT::v4f32;
16103       break;
16104     case Intrinsic::ppc_qpx_qvlfcd:
16105       VT = MVT::v2f64;
16106       break;
16107     case Intrinsic::ppc_qpx_qvlfcs:
16108       VT = MVT::v2f32;
16109       break;
16110     default:
16111       VT = MVT::v4i32;
16112       break;
16113     }
16114 
16115     Info.opc = ISD::INTRINSIC_W_CHAIN;
16116     Info.memVT = VT;
16117     Info.ptrVal = I.getArgOperand(0);
16118     Info.offset = -VT.getStoreSize()+1;
16119     Info.size = 2*VT.getStoreSize()-1;
16120     Info.align = Align(1);
16121     Info.flags = MachineMemOperand::MOLoad;
16122     return true;
16123   }
16124   case Intrinsic::ppc_qpx_qvlfda:
16125   case Intrinsic::ppc_qpx_qvlfsa:
16126   case Intrinsic::ppc_qpx_qvlfcda:
16127   case Intrinsic::ppc_qpx_qvlfcsa:
16128   case Intrinsic::ppc_qpx_qvlfiwaa:
16129   case Intrinsic::ppc_qpx_qvlfiwza: {
16130     EVT VT;
16131     switch (Intrinsic) {
16132     case Intrinsic::ppc_qpx_qvlfda:
16133       VT = MVT::v4f64;
16134       break;
16135     case Intrinsic::ppc_qpx_qvlfsa:
16136       VT = MVT::v4f32;
16137       break;
16138     case Intrinsic::ppc_qpx_qvlfcda:
16139       VT = MVT::v2f64;
16140       break;
16141     case Intrinsic::ppc_qpx_qvlfcsa:
16142       VT = MVT::v2f32;
16143       break;
16144     default:
16145       VT = MVT::v4i32;
16146       break;
16147     }
16148 
16149     Info.opc = ISD::INTRINSIC_W_CHAIN;
16150     Info.memVT = VT;
16151     Info.ptrVal = I.getArgOperand(0);
16152     Info.offset = 0;
16153     Info.size = VT.getStoreSize();
16154     Info.align = Align(1);
16155     Info.flags = MachineMemOperand::MOLoad;
16156     return true;
16157   }
16158   case Intrinsic::ppc_qpx_qvstfd:
16159   case Intrinsic::ppc_qpx_qvstfs:
16160   case Intrinsic::ppc_qpx_qvstfcd:
16161   case Intrinsic::ppc_qpx_qvstfcs:
16162   case Intrinsic::ppc_qpx_qvstfiw:
16163   case Intrinsic::ppc_altivec_stvx:
16164   case Intrinsic::ppc_altivec_stvxl:
16165   case Intrinsic::ppc_altivec_stvebx:
16166   case Intrinsic::ppc_altivec_stvehx:
16167   case Intrinsic::ppc_altivec_stvewx:
16168   case Intrinsic::ppc_vsx_stxvd2x:
16169   case Intrinsic::ppc_vsx_stxvw4x: {
16170     EVT VT;
16171     switch (Intrinsic) {
16172     case Intrinsic::ppc_altivec_stvebx:
16173       VT = MVT::i8;
16174       break;
16175     case Intrinsic::ppc_altivec_stvehx:
16176       VT = MVT::i16;
16177       break;
16178     case Intrinsic::ppc_altivec_stvewx:
16179       VT = MVT::i32;
16180       break;
16181     case Intrinsic::ppc_vsx_stxvd2x:
16182       VT = MVT::v2f64;
16183       break;
16184     case Intrinsic::ppc_qpx_qvstfd:
16185       VT = MVT::v4f64;
16186       break;
16187     case Intrinsic::ppc_qpx_qvstfs:
16188       VT = MVT::v4f32;
16189       break;
16190     case Intrinsic::ppc_qpx_qvstfcd:
16191       VT = MVT::v2f64;
16192       break;
16193     case Intrinsic::ppc_qpx_qvstfcs:
16194       VT = MVT::v2f32;
16195       break;
16196     default:
16197       VT = MVT::v4i32;
16198       break;
16199     }
16200 
16201     Info.opc = ISD::INTRINSIC_VOID;
16202     Info.memVT = VT;
16203     Info.ptrVal = I.getArgOperand(1);
16204     Info.offset = -VT.getStoreSize()+1;
16205     Info.size = 2*VT.getStoreSize()-1;
16206     Info.align = Align(1);
16207     Info.flags = MachineMemOperand::MOStore;
16208     return true;
16209   }
16210   case Intrinsic::ppc_qpx_qvstfda:
16211   case Intrinsic::ppc_qpx_qvstfsa:
16212   case Intrinsic::ppc_qpx_qvstfcda:
16213   case Intrinsic::ppc_qpx_qvstfcsa:
16214   case Intrinsic::ppc_qpx_qvstfiwa: {
16215     EVT VT;
16216     switch (Intrinsic) {
16217     case Intrinsic::ppc_qpx_qvstfda:
16218       VT = MVT::v4f64;
16219       break;
16220     case Intrinsic::ppc_qpx_qvstfsa:
16221       VT = MVT::v4f32;
16222       break;
16223     case Intrinsic::ppc_qpx_qvstfcda:
16224       VT = MVT::v2f64;
16225       break;
16226     case Intrinsic::ppc_qpx_qvstfcsa:
16227       VT = MVT::v2f32;
16228       break;
16229     default:
16230       VT = MVT::v4i32;
16231       break;
16232     }
16233 
16234     Info.opc = ISD::INTRINSIC_VOID;
16235     Info.memVT = VT;
16236     Info.ptrVal = I.getArgOperand(1);
16237     Info.offset = 0;
16238     Info.size = VT.getStoreSize();
16239     Info.align = Align(1);
16240     Info.flags = MachineMemOperand::MOStore;
16241     return true;
16242   }
16243   default:
16244     break;
16245   }
16246 
16247   return false;
16248 }
16249 
16250 /// It returns EVT::Other if the type should be determined using generic
16251 /// target-independent logic.
16252 EVT PPCTargetLowering::getOptimalMemOpType(
16253     const MemOp &Op, const AttributeList &FuncAttributes) const {
16254   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
16255     // When expanding a memset, require at least two QPX instructions to cover
16256     // the cost of loading the value to be stored from the constant pool.
16257     if (Subtarget.hasQPX() && Op.size() >= 32 &&
16258         (Op.isMemcpy() || Op.size() >= 64) && Op.isAligned(Align(32)) &&
16259         !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
16260       return MVT::v4f64;
16261     }
16262 
16263     // We should use Altivec/VSX loads and stores when available. For unaligned
16264     // addresses, unaligned VSX loads are only fast starting with the P8.
16265     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
16266         (Op.isAligned(Align(16)) ||
16267          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
16268       return MVT::v4i32;
16269   }
16270 
16271   if (Subtarget.isPPC64()) {
16272     return MVT::i64;
16273   }
16274 
16275   return MVT::i32;
16276 }
16277 
16278 /// Returns true if it is beneficial to convert a load of a constant
16279 /// to just the constant itself.
16280 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
16281                                                           Type *Ty) const {
16282   assert(Ty->isIntegerTy());
16283 
16284   unsigned BitSize = Ty->getPrimitiveSizeInBits();
16285   return !(BitSize == 0 || BitSize > 64);
16286 }
16287 
16288 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
16289   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16290     return false;
16291   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
16292   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
16293   return NumBits1 == 64 && NumBits2 == 32;
16294 }
16295 
16296 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
16297   if (!VT1.isInteger() || !VT2.isInteger())
16298     return false;
16299   unsigned NumBits1 = VT1.getSizeInBits();
16300   unsigned NumBits2 = VT2.getSizeInBits();
16301   return NumBits1 == 64 && NumBits2 == 32;
16302 }
16303 
16304 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16305   // Generally speaking, zexts are not free, but they are free when they can be
16306   // folded with other operations.
16307   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
16308     EVT MemVT = LD->getMemoryVT();
16309     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
16310          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
16311         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
16312          LD->getExtensionType() == ISD::ZEXTLOAD))
16313       return true;
16314   }
16315 
16316   // FIXME: Add other cases...
16317   //  - 32-bit shifts with a zext to i64
16318   //  - zext after ctlz, bswap, etc.
16319   //  - zext after and by a constant mask
16320 
16321   return TargetLowering::isZExtFree(Val, VT2);
16322 }
16323 
16324 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
16325   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
16326          "invalid fpext types");
16327   // Extending to float128 is not free.
16328   if (DestVT == MVT::f128)
16329     return false;
16330   return true;
16331 }
16332 
16333 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16334   return isInt<16>(Imm) || isUInt<16>(Imm);
16335 }
16336 
16337 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16338   return isInt<16>(Imm) || isUInt<16>(Imm);
16339 }
16340 
16341 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
16342                                                        unsigned,
16343                                                        unsigned,
16344                                                        MachineMemOperand::Flags,
16345                                                        bool *Fast) const {
16346   if (DisablePPCUnaligned)
16347     return false;
16348 
16349   // PowerPC supports unaligned memory access for simple non-vector types.
16350   // Although accessing unaligned addresses is not as efficient as accessing
16351   // aligned addresses, it is generally more efficient than manual expansion,
16352   // and generally only traps for software emulation when crossing page
16353   // boundaries.
16354 
16355   if (!VT.isSimple())
16356     return false;
16357 
16358   if (VT.isFloatingPoint() && !VT.isVector() &&
16359       !Subtarget.allowsUnalignedFPAccess())
16360     return false;
16361 
16362   if (VT.getSimpleVT().isVector()) {
16363     if (Subtarget.hasVSX()) {
16364       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
16365           VT != MVT::v4f32 && VT != MVT::v4i32)
16366         return false;
16367     } else {
16368       return false;
16369     }
16370   }
16371 
16372   if (VT == MVT::ppcf128)
16373     return false;
16374 
16375   if (Fast)
16376     *Fast = true;
16377 
16378   return true;
16379 }
16380 
16381 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16382                                                    EVT VT) const {
16383   return isFMAFasterThanFMulAndFAdd(
16384       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
16385 }
16386 
16387 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
16388                                                    Type *Ty) const {
16389   switch (Ty->getScalarType()->getTypeID()) {
16390   case Type::FloatTyID:
16391   case Type::DoubleTyID:
16392     return true;
16393   case Type::FP128TyID:
16394     return Subtarget.hasP9Vector();
16395   default:
16396     return false;
16397   }
16398 }
16399 
16400 // FIXME: add more patterns which are not profitable to hoist.
16401 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
16402   if (!I->hasOneUse())
16403     return true;
16404 
16405   Instruction *User = I->user_back();
16406   assert(User && "A single use instruction with no uses.");
16407 
16408   switch (I->getOpcode()) {
16409   case Instruction::FMul: {
16410     // Don't break FMA, PowerPC prefers FMA.
16411     if (User->getOpcode() != Instruction::FSub &&
16412         User->getOpcode() != Instruction::FAdd)
16413       return true;
16414 
16415     const TargetOptions &Options = getTargetMachine().Options;
16416     const Function *F = I->getFunction();
16417     const DataLayout &DL = F->getParent()->getDataLayout();
16418     Type *Ty = User->getOperand(0)->getType();
16419 
16420     return !(
16421         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
16422         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
16423         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
16424   }
16425   case Instruction::Load: {
16426     // Don't break "store (load float*)" pattern, this pattern will be combined
16427     // to "store (load int32)" in later InstCombine pass. See function
16428     // combineLoadToOperationType. On PowerPC, loading a float point takes more
16429     // cycles than loading a 32 bit integer.
16430     LoadInst *LI = cast<LoadInst>(I);
16431     // For the loads that combineLoadToOperationType does nothing, like
16432     // ordered load, it should be profitable to hoist them.
16433     // For swifterror load, it can only be used for pointer to pointer type, so
16434     // later type check should get rid of this case.
16435     if (!LI->isUnordered())
16436       return true;
16437 
16438     if (User->getOpcode() != Instruction::Store)
16439       return true;
16440 
16441     if (I->getType()->getTypeID() != Type::FloatTyID)
16442       return true;
16443 
16444     return false;
16445   }
16446   default:
16447     return true;
16448   }
16449   return true;
16450 }
16451 
16452 const MCPhysReg *
16453 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
16454   // LR is a callee-save register, but we must treat it as clobbered by any call
16455   // site. Hence we include LR in the scratch registers, which are in turn added
16456   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
16457   // to CTR, which is used by any indirect call.
16458   static const MCPhysReg ScratchRegs[] = {
16459     PPC::X12, PPC::LR8, PPC::CTR8, 0
16460   };
16461 
16462   return ScratchRegs;
16463 }
16464 
16465 Register PPCTargetLowering::getExceptionPointerRegister(
16466     const Constant *PersonalityFn) const {
16467   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
16468 }
16469 
16470 Register PPCTargetLowering::getExceptionSelectorRegister(
16471     const Constant *PersonalityFn) const {
16472   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
16473 }
16474 
16475 bool
16476 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
16477                      EVT VT , unsigned DefinedValues) const {
16478   if (VT == MVT::v2i64)
16479     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
16480 
16481   if (Subtarget.hasVSX() || Subtarget.hasQPX())
16482     return true;
16483 
16484   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
16485 }
16486 
16487 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
16488   if (DisableILPPref || Subtarget.enableMachineScheduler())
16489     return TargetLowering::getSchedulingPreference(N);
16490 
16491   return Sched::ILP;
16492 }
16493 
16494 // Create a fast isel object.
16495 FastISel *
16496 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
16497                                   const TargetLibraryInfo *LibInfo) const {
16498   return PPC::createFastISel(FuncInfo, LibInfo);
16499 }
16500 
16501 // 'Inverted' means the FMA opcode after negating one multiplicand.
16502 // For example, (fma -a b c) = (fnmsub a b c)
16503 static unsigned invertFMAOpcode(unsigned Opc) {
16504   switch (Opc) {
16505   default:
16506     llvm_unreachable("Invalid FMA opcode for PowerPC!");
16507   case ISD::FMA:
16508     return PPCISD::FNMSUB;
16509   case PPCISD::FNMSUB:
16510     return ISD::FMA;
16511   }
16512 }
16513 
16514 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
16515                                                 bool LegalOps, bool OptForSize,
16516                                                 NegatibleCost &Cost,
16517                                                 unsigned Depth) const {
16518   if (Depth > SelectionDAG::MaxRecursionDepth)
16519     return SDValue();
16520 
16521   unsigned Opc = Op.getOpcode();
16522   EVT VT = Op.getValueType();
16523   SDNodeFlags Flags = Op.getNode()->getFlags();
16524 
16525   switch (Opc) {
16526   case PPCISD::FNMSUB:
16527     // TODO: QPX subtarget is deprecated. No transformation here.
16528     if (!Op.hasOneUse() || !isTypeLegal(VT) || Subtarget.hasQPX())
16529       break;
16530 
16531     const TargetOptions &Options = getTargetMachine().Options;
16532     SDValue N0 = Op.getOperand(0);
16533     SDValue N1 = Op.getOperand(1);
16534     SDValue N2 = Op.getOperand(2);
16535     SDLoc Loc(Op);
16536 
16537     NegatibleCost N2Cost = NegatibleCost::Expensive;
16538     SDValue NegN2 =
16539         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16540 
16541     if (!NegN2)
16542       return SDValue();
16543 
16544     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16545     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16546     // These transformations may change sign of zeroes. For example,
16547     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16548     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16549       // Try and choose the cheaper one to negate.
16550       NegatibleCost N0Cost = NegatibleCost::Expensive;
16551       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16552                                            N0Cost, Depth + 1);
16553 
16554       NegatibleCost N1Cost = NegatibleCost::Expensive;
16555       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16556                                            N1Cost, Depth + 1);
16557 
16558       if (NegN0 && N0Cost <= N1Cost) {
16559         Cost = std::min(N0Cost, N2Cost);
16560         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16561       } else if (NegN1) {
16562         Cost = std::min(N1Cost, N2Cost);
16563         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16564       }
16565     }
16566 
16567     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16568     if (isOperationLegal(ISD::FMA, VT)) {
16569       Cost = N2Cost;
16570       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16571     }
16572 
16573     break;
16574   }
16575 
16576   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16577                                               Cost, Depth);
16578 }
16579 
16580 // Override to enable LOAD_STACK_GUARD lowering on Linux.
16581 bool PPCTargetLowering::useLoadStackGuardNode() const {
16582   if (!Subtarget.isTargetLinux())
16583     return TargetLowering::useLoadStackGuardNode();
16584   return true;
16585 }
16586 
16587 // Override to disable global variable loading on Linux.
16588 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16589   if (!Subtarget.isTargetLinux())
16590     return TargetLowering::insertSSPDeclarations(M);
16591 }
16592 
16593 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16594                                      bool ForCodeSize) const {
16595   if (!VT.isSimple() || !Subtarget.hasVSX())
16596     return false;
16597 
16598   switch(VT.getSimpleVT().SimpleTy) {
16599   default:
16600     // For FP types that are currently not supported by PPC backend, return
16601     // false. Examples: f16, f80.
16602     return false;
16603   case MVT::f32:
16604   case MVT::f64:
16605     if (Subtarget.hasPrefixInstrs()) {
16606       // With prefixed instructions, we can materialize anything that can be
16607       // represented with a 32-bit immediate, not just positive zero.
16608       APFloat APFloatOfImm = Imm;
16609       return convertToNonDenormSingle(APFloatOfImm);
16610     }
16611     LLVM_FALLTHROUGH;
16612   case MVT::ppcf128:
16613     return Imm.isPosZero();
16614   }
16615 }
16616 
16617 // For vector shift operation op, fold
16618 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
16619 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16620                                   SelectionDAG &DAG) {
16621   SDValue N0 = N->getOperand(0);
16622   SDValue N1 = N->getOperand(1);
16623   EVT VT = N0.getValueType();
16624   unsigned OpSizeInBits = VT.getScalarSizeInBits();
16625   unsigned Opcode = N->getOpcode();
16626   unsigned TargetOpcode;
16627 
16628   switch (Opcode) {
16629   default:
16630     llvm_unreachable("Unexpected shift operation");
16631   case ISD::SHL:
16632     TargetOpcode = PPCISD::SHL;
16633     break;
16634   case ISD::SRL:
16635     TargetOpcode = PPCISD::SRL;
16636     break;
16637   case ISD::SRA:
16638     TargetOpcode = PPCISD::SRA;
16639     break;
16640   }
16641 
16642   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16643       N1->getOpcode() == ISD::AND)
16644     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16645       if (Mask->getZExtValue() == OpSizeInBits - 1)
16646         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16647 
16648   return SDValue();
16649 }
16650 
16651 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16652   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16653     return Value;
16654 
16655   SDValue N0 = N->getOperand(0);
16656   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16657   if (!Subtarget.isISA3_0() ||
16658       N0.getOpcode() != ISD::SIGN_EXTEND ||
16659       N0.getOperand(0).getValueType() != MVT::i32 ||
16660       CN1 == nullptr || N->getValueType(0) != MVT::i64)
16661     return SDValue();
16662 
16663   // We can't save an operation here if the value is already extended, and
16664   // the existing shift is easier to combine.
16665   SDValue ExtsSrc = N0.getOperand(0);
16666   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16667       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16668     return SDValue();
16669 
16670   SDLoc DL(N0);
16671   SDValue ShiftBy = SDValue(CN1, 0);
16672   // We want the shift amount to be i32 on the extswli, but the shift could
16673   // have an i64.
16674   if (ShiftBy.getValueType() == MVT::i64)
16675     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16676 
16677   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16678                          ShiftBy);
16679 }
16680 
16681 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16682   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16683     return Value;
16684 
16685   return SDValue();
16686 }
16687 
16688 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16689   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16690     return Value;
16691 
16692   return SDValue();
16693 }
16694 
16695 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16696 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16697 // When C is zero, the equation (addi Z, -C) can be simplified to Z
16698 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
16699 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16700                                  const PPCSubtarget &Subtarget) {
16701   if (!Subtarget.isPPC64())
16702     return SDValue();
16703 
16704   SDValue LHS = N->getOperand(0);
16705   SDValue RHS = N->getOperand(1);
16706 
16707   auto isZextOfCompareWithConstant = [](SDValue Op) {
16708     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16709         Op.getValueType() != MVT::i64)
16710       return false;
16711 
16712     SDValue Cmp = Op.getOperand(0);
16713     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16714         Cmp.getOperand(0).getValueType() != MVT::i64)
16715       return false;
16716 
16717     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16718       int64_t NegConstant = 0 - Constant->getSExtValue();
16719       // Due to the limitations of the addi instruction,
16720       // -C is required to be [-32768, 32767].
16721       return isInt<16>(NegConstant);
16722     }
16723 
16724     return false;
16725   };
16726 
16727   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16728   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16729 
16730   // If there is a pattern, canonicalize a zext operand to the RHS.
16731   if (LHSHasPattern && !RHSHasPattern)
16732     std::swap(LHS, RHS);
16733   else if (!LHSHasPattern && !RHSHasPattern)
16734     return SDValue();
16735 
16736   SDLoc DL(N);
16737   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16738   SDValue Cmp = RHS.getOperand(0);
16739   SDValue Z = Cmp.getOperand(0);
16740   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16741 
16742   assert(Constant && "Constant Should not be a null pointer.");
16743   int64_t NegConstant = 0 - Constant->getSExtValue();
16744 
16745   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16746   default: break;
16747   case ISD::SETNE: {
16748     //                                 when C == 0
16749     //                             --> addze X, (addic Z, -1).carry
16750     //                            /
16751     // add X, (zext(setne Z, C))--
16752     //                            \    when -32768 <= -C <= 32767 && C != 0
16753     //                             --> addze X, (addic (addi Z, -C), -1).carry
16754     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16755                               DAG.getConstant(NegConstant, DL, MVT::i64));
16756     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16757     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16758                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16759     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16760                        SDValue(Addc.getNode(), 1));
16761     }
16762   case ISD::SETEQ: {
16763     //                                 when C == 0
16764     //                             --> addze X, (subfic Z, 0).carry
16765     //                            /
16766     // add X, (zext(sete  Z, C))--
16767     //                            \    when -32768 <= -C <= 32767 && C != 0
16768     //                             --> addze X, (subfic (addi Z, -C), 0).carry
16769     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16770                               DAG.getConstant(NegConstant, DL, MVT::i64));
16771     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16772     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16773                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16774     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16775                        SDValue(Subc.getNode(), 1));
16776     }
16777   }
16778 
16779   return SDValue();
16780 }
16781 
16782 // Transform
16783 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16784 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16785 // In this case both C1 and C2 must be known constants.
16786 // C1+C2 must fit into a 34 bit signed integer.
16787 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16788                                           const PPCSubtarget &Subtarget) {
16789   if (!Subtarget.isUsingPCRelativeCalls())
16790     return SDValue();
16791 
16792   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16793   // If we find that node try to cast the Global Address and the Constant.
16794   SDValue LHS = N->getOperand(0);
16795   SDValue RHS = N->getOperand(1);
16796 
16797   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16798     std::swap(LHS, RHS);
16799 
16800   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16801     return SDValue();
16802 
16803   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16804   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16805   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16806 
16807   // Check that both casts succeeded.
16808   if (!GSDN || !ConstNode)
16809     return SDValue();
16810 
16811   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16812   SDLoc DL(GSDN);
16813 
16814   // The signed int offset needs to fit in 34 bits.
16815   if (!isInt<34>(NewOffset))
16816     return SDValue();
16817 
16818   // The new global address is a copy of the old global address except
16819   // that it has the updated Offset.
16820   SDValue GA =
16821       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16822                                  NewOffset, GSDN->getTargetFlags());
16823   SDValue MatPCRel =
16824       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16825   return MatPCRel;
16826 }
16827 
16828 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16829   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16830     return Value;
16831 
16832   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16833     return Value;
16834 
16835   return SDValue();
16836 }
16837 
16838 // Detect TRUNCATE operations on bitcasts of float128 values.
16839 // What we are looking for here is the situtation where we extract a subset
16840 // of bits from a 128 bit float.
16841 // This can be of two forms:
16842 // 1) BITCAST of f128 feeding TRUNCATE
16843 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16844 // The reason this is required is because we do not have a legal i128 type
16845 // and so we want to prevent having to store the f128 and then reload part
16846 // of it.
16847 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16848                                            DAGCombinerInfo &DCI) const {
16849   // If we are using CRBits then try that first.
16850   if (Subtarget.useCRBits()) {
16851     // Check if CRBits did anything and return that if it did.
16852     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16853       return CRTruncValue;
16854   }
16855 
16856   SDLoc dl(N);
16857   SDValue Op0 = N->getOperand(0);
16858 
16859   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16860   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16861     EVT VT = N->getValueType(0);
16862     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16863       return SDValue();
16864     SDValue Sub = Op0.getOperand(0);
16865     if (Sub.getOpcode() == ISD::SUB) {
16866       SDValue SubOp0 = Sub.getOperand(0);
16867       SDValue SubOp1 = Sub.getOperand(1);
16868       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16869           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16870         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16871                                SubOp1.getOperand(0),
16872                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16873       }
16874     }
16875   }
16876 
16877   // Looking for a truncate of i128 to i64.
16878   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16879     return SDValue();
16880 
16881   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16882 
16883   // SRL feeding TRUNCATE.
16884   if (Op0.getOpcode() == ISD::SRL) {
16885     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16886     // The right shift has to be by 64 bits.
16887     if (!ConstNode || ConstNode->getZExtValue() != 64)
16888       return SDValue();
16889 
16890     // Switch the element number to extract.
16891     EltToExtract = EltToExtract ? 0 : 1;
16892     // Update Op0 past the SRL.
16893     Op0 = Op0.getOperand(0);
16894   }
16895 
16896   // BITCAST feeding a TRUNCATE possibly via SRL.
16897   if (Op0.getOpcode() == ISD::BITCAST &&
16898       Op0.getValueType() == MVT::i128 &&
16899       Op0.getOperand(0).getValueType() == MVT::f128) {
16900     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16901     return DCI.DAG.getNode(
16902         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16903         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16904   }
16905   return SDValue();
16906 }
16907 
16908 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16909   SelectionDAG &DAG = DCI.DAG;
16910 
16911   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16912   if (!ConstOpOrElement)
16913     return SDValue();
16914 
16915   // An imul is usually smaller than the alternative sequence for legal type.
16916   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16917       isOperationLegal(ISD::MUL, N->getValueType(0)))
16918     return SDValue();
16919 
16920   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16921     switch (this->Subtarget.getCPUDirective()) {
16922     default:
16923       // TODO: enhance the condition for subtarget before pwr8
16924       return false;
16925     case PPC::DIR_PWR8:
16926       //  type        mul     add    shl
16927       // scalar        4       1      1
16928       // vector        7       2      2
16929       return true;
16930     case PPC::DIR_PWR9:
16931     case PPC::DIR_PWR10:
16932     case PPC::DIR_PWR_FUTURE:
16933       //  type        mul     add    shl
16934       // scalar        5       2      2
16935       // vector        7       2      2
16936 
16937       // The cycle RATIO of related operations are showed as a table above.
16938       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16939       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16940       // are 4, it is always profitable; but for 3 instrs patterns
16941       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16942       // So we should only do it for vector type.
16943       return IsAddOne && IsNeg ? VT.isVector() : true;
16944     }
16945   };
16946 
16947   EVT VT = N->getValueType(0);
16948   SDLoc DL(N);
16949 
16950   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16951   bool IsNeg = MulAmt.isNegative();
16952   APInt MulAmtAbs = MulAmt.abs();
16953 
16954   if ((MulAmtAbs - 1).isPowerOf2()) {
16955     // (mul x, 2^N + 1) => (add (shl x, N), x)
16956     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16957 
16958     if (!IsProfitable(IsNeg, true, VT))
16959       return SDValue();
16960 
16961     SDValue Op0 = N->getOperand(0);
16962     SDValue Op1 =
16963         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16964                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16965     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16966 
16967     if (!IsNeg)
16968       return Res;
16969 
16970     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16971   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16972     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16973     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16974 
16975     if (!IsProfitable(IsNeg, false, VT))
16976       return SDValue();
16977 
16978     SDValue Op0 = N->getOperand(0);
16979     SDValue Op1 =
16980         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16981                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16982 
16983     if (!IsNeg)
16984       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16985     else
16986       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16987 
16988   } else {
16989     return SDValue();
16990   }
16991 }
16992 
16993 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16994 // in combiner since we need to check SD flags and other subtarget features.
16995 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16996                                           DAGCombinerInfo &DCI) const {
16997   SDValue N0 = N->getOperand(0);
16998   SDValue N1 = N->getOperand(1);
16999   SDValue N2 = N->getOperand(2);
17000   SDNodeFlags Flags = N->getFlags();
17001   EVT VT = N->getValueType(0);
17002   SelectionDAG &DAG = DCI.DAG;
17003   const TargetOptions &Options = getTargetMachine().Options;
17004   unsigned Opc = N->getOpcode();
17005   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
17006   bool LegalOps = !DCI.isBeforeLegalizeOps();
17007   SDLoc Loc(N);
17008 
17009   // TODO: QPX subtarget is deprecated. No transformation here.
17010   if (Subtarget.hasQPX() || !isOperationLegal(ISD::FMA, VT))
17011     return SDValue();
17012 
17013   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
17014   // since (fnmsub a b c)=-0 while c-ab=+0.
17015   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
17016     return SDValue();
17017 
17018   // (fma (fneg a) b c) => (fnmsub a b c)
17019   // (fnmsub (fneg a) b c) => (fma a b c)
17020   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
17021     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
17022 
17023   // (fma a (fneg b) c) => (fnmsub a b c)
17024   // (fnmsub a (fneg b) c) => (fma a b c)
17025   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
17026     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
17027 
17028   return SDValue();
17029 }
17030 
17031 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
17032   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
17033   if (!Subtarget.is64BitELFABI())
17034     return false;
17035 
17036   // If not a tail call then no need to proceed.
17037   if (!CI->isTailCall())
17038     return false;
17039 
17040   // If sibling calls have been disabled and tail-calls aren't guaranteed
17041   // there is no reason to duplicate.
17042   auto &TM = getTargetMachine();
17043   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
17044     return false;
17045 
17046   // Can't tail call a function called indirectly, or if it has variadic args.
17047   const Function *Callee = CI->getCalledFunction();
17048   if (!Callee || Callee->isVarArg())
17049     return false;
17050 
17051   // Make sure the callee and caller calling conventions are eligible for tco.
17052   const Function *Caller = CI->getParent()->getParent();
17053   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
17054                                            CI->getCallingConv()))
17055       return false;
17056 
17057   // If the function is local then we have a good chance at tail-calling it
17058   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
17059 }
17060 
17061 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
17062   if (!Subtarget.hasVSX())
17063     return false;
17064   if (Subtarget.hasP9Vector() && VT == MVT::f128)
17065     return true;
17066   return VT == MVT::f32 || VT == MVT::f64 ||
17067     VT == MVT::v4f32 || VT == MVT::v2f64;
17068 }
17069 
17070 bool PPCTargetLowering::
17071 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
17072   const Value *Mask = AndI.getOperand(1);
17073   // If the mask is suitable for andi. or andis. we should sink the and.
17074   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
17075     // Can't handle constants wider than 64-bits.
17076     if (CI->getBitWidth() > 64)
17077       return false;
17078     int64_t ConstVal = CI->getZExtValue();
17079     return isUInt<16>(ConstVal) ||
17080       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
17081   }
17082 
17083   // For non-constant masks, we can always use the record-form and.
17084   return true;
17085 }
17086 
17087 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
17088 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
17089 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
17090 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
17091 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
17092 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
17093   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
17094   assert(Subtarget.hasP9Altivec() &&
17095          "Only combine this when P9 altivec supported!");
17096   EVT VT = N->getValueType(0);
17097   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17098     return SDValue();
17099 
17100   SelectionDAG &DAG = DCI.DAG;
17101   SDLoc dl(N);
17102   if (N->getOperand(0).getOpcode() == ISD::SUB) {
17103     // Even for signed integers, if it's known to be positive (as signed
17104     // integer) due to zero-extended inputs.
17105     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
17106     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
17107     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
17108          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
17109         (SubOpcd1 == ISD::ZERO_EXTEND ||
17110          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
17111       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17112                          N->getOperand(0)->getOperand(0),
17113                          N->getOperand(0)->getOperand(1),
17114                          DAG.getTargetConstant(0, dl, MVT::i32));
17115     }
17116 
17117     // For type v4i32, it can be optimized with xvnegsp + vabsduw
17118     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
17119         N->getOperand(0).hasOneUse()) {
17120       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17121                          N->getOperand(0)->getOperand(0),
17122                          N->getOperand(0)->getOperand(1),
17123                          DAG.getTargetConstant(1, dl, MVT::i32));
17124     }
17125   }
17126 
17127   return SDValue();
17128 }
17129 
17130 // For type v4i32/v8ii16/v16i8, transform
17131 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
17132 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
17133 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
17134 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
17135 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
17136                                           DAGCombinerInfo &DCI) const {
17137   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
17138   assert(Subtarget.hasP9Altivec() &&
17139          "Only combine this when P9 altivec supported!");
17140 
17141   SelectionDAG &DAG = DCI.DAG;
17142   SDLoc dl(N);
17143   SDValue Cond = N->getOperand(0);
17144   SDValue TrueOpnd = N->getOperand(1);
17145   SDValue FalseOpnd = N->getOperand(2);
17146   EVT VT = N->getOperand(1).getValueType();
17147 
17148   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
17149       FalseOpnd.getOpcode() != ISD::SUB)
17150     return SDValue();
17151 
17152   // ABSD only available for type v4i32/v8i16/v16i8
17153   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17154     return SDValue();
17155 
17156   // At least to save one more dependent computation
17157   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
17158     return SDValue();
17159 
17160   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
17161 
17162   // Can only handle unsigned comparison here
17163   switch (CC) {
17164   default:
17165     return SDValue();
17166   case ISD::SETUGT:
17167   case ISD::SETUGE:
17168     break;
17169   case ISD::SETULT:
17170   case ISD::SETULE:
17171     std::swap(TrueOpnd, FalseOpnd);
17172     break;
17173   }
17174 
17175   SDValue CmpOpnd1 = Cond.getOperand(0);
17176   SDValue CmpOpnd2 = Cond.getOperand(1);
17177 
17178   // SETCC CmpOpnd1 CmpOpnd2 cond
17179   // TrueOpnd = CmpOpnd1 - CmpOpnd2
17180   // FalseOpnd = CmpOpnd2 - CmpOpnd1
17181   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
17182       TrueOpnd.getOperand(1) == CmpOpnd2 &&
17183       FalseOpnd.getOperand(0) == CmpOpnd2 &&
17184       FalseOpnd.getOperand(1) == CmpOpnd1) {
17185     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
17186                        CmpOpnd1, CmpOpnd2,
17187                        DAG.getTargetConstant(0, dl, MVT::i32));
17188   }
17189 
17190   return SDValue();
17191 }
17192