1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSectionXCOFF.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <list>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "ppc-lowering"
105 
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 
121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123 
124 STATISTIC(NumTailCalls, "Number of tail calls");
125 STATISTIC(NumSiblingCalls, "Number of sibling calls");
126 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
127 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
128 
129 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
130 
131 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
132 
133 // FIXME: Remove this once the bug has been fixed!
134 extern cl::opt<bool> ANDIGlueBug;
135 
136 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
137                                      const PPCSubtarget &STI)
138     : TargetLowering(TM), Subtarget(STI) {
139   // Initialize map that relates the PPC addressing modes to the computed flags
140   // of a load/store instruction. The map is used to determine the optimal
141   // addressing mode when selecting load and stores.
142   initializeAddrModeMap();
143   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
144   // arguments are at least 4/8 bytes aligned.
145   bool isPPC64 = Subtarget.isPPC64();
146   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
147 
148   // Set up the register classes.
149   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
150   if (!useSoftFloat()) {
151     if (hasSPE()) {
152       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
153       // EFPU2 APU only supports f32
154       if (!Subtarget.hasEFPU2())
155         addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
156     } else {
157       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
158       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
159     }
160   }
161 
162   // Match BITREVERSE to customized fast code sequence in the td file.
163   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
164   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
165 
166   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
167   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
168 
169   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
170   for (MVT VT : MVT::integer_valuetypes()) {
171     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
172     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
173   }
174 
175   if (Subtarget.isISA3_0()) {
176     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
177     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
178     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
179     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
180   } else {
181     // No extending loads from f16 or HW conversions back and forth.
182     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
183     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
184     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
185     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
186     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
187     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
188     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
189     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
190   }
191 
192   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
193 
194   // PowerPC has pre-inc load and store's.
195   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
196   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
197   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
198   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
199   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
200   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
201   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
202   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
203   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
204   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
205   if (!Subtarget.hasSPE()) {
206     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
207     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
208     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
209     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
210   }
211 
212   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
213   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
214   for (MVT VT : ScalarIntVTs) {
215     setOperationAction(ISD::ADDC, VT, Legal);
216     setOperationAction(ISD::ADDE, VT, Legal);
217     setOperationAction(ISD::SUBC, VT, Legal);
218     setOperationAction(ISD::SUBE, VT, Legal);
219   }
220 
221   if (Subtarget.useCRBits()) {
222     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
223 
224     if (isPPC64 || Subtarget.hasFPCVT()) {
225       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
226       AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
227                         isPPC64 ? MVT::i64 : MVT::i32);
228       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
229       AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
230                         isPPC64 ? MVT::i64 : MVT::i32);
231 
232       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
233       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
234                          isPPC64 ? MVT::i64 : MVT::i32);
235       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
236       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
237                         isPPC64 ? MVT::i64 : MVT::i32);
238 
239       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote);
240       AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1,
241                         isPPC64 ? MVT::i64 : MVT::i32);
242       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote);
243       AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1,
244                         isPPC64 ? MVT::i64 : MVT::i32);
245 
246       setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
247       AddPromotedToType(ISD::FP_TO_SINT, MVT::i1,
248                         isPPC64 ? MVT::i64 : MVT::i32);
249       setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
250       AddPromotedToType(ISD::FP_TO_UINT, MVT::i1,
251                         isPPC64 ? MVT::i64 : MVT::i32);
252     } else {
253       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
254       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
255       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
256       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
257     }
258 
259     // PowerPC does not support direct load/store of condition registers.
260     setOperationAction(ISD::LOAD, MVT::i1, Custom);
261     setOperationAction(ISD::STORE, MVT::i1, Custom);
262 
263     // FIXME: Remove this once the ANDI glue bug is fixed:
264     if (ANDIGlueBug)
265       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
266 
267     for (MVT VT : MVT::integer_valuetypes()) {
268       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
269       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
270       setTruncStoreAction(VT, MVT::i1, Expand);
271     }
272 
273     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
274   }
275 
276   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
277   // PPC (the libcall is not available).
278   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
279   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
280   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom);
281   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom);
282 
283   // We do not currently implement these libm ops for PowerPC.
284   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
285   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
286   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
287   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
288   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
289   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
290 
291   // PowerPC has no SREM/UREM instructions unless we are on P9
292   // On P9 we may use a hardware instruction to compute the remainder.
293   // When the result of both the remainder and the division is required it is
294   // more efficient to compute the remainder from the result of the division
295   // rather than use the remainder instruction. The instructions are legalized
296   // directly because the DivRemPairsPass performs the transformation at the IR
297   // level.
298   if (Subtarget.isISA3_0()) {
299     setOperationAction(ISD::SREM, MVT::i32, Legal);
300     setOperationAction(ISD::UREM, MVT::i32, Legal);
301     setOperationAction(ISD::SREM, MVT::i64, Legal);
302     setOperationAction(ISD::UREM, MVT::i64, Legal);
303   } else {
304     setOperationAction(ISD::SREM, MVT::i32, Expand);
305     setOperationAction(ISD::UREM, MVT::i32, Expand);
306     setOperationAction(ISD::SREM, MVT::i64, Expand);
307     setOperationAction(ISD::UREM, MVT::i64, Expand);
308   }
309 
310   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
311   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
312   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
313   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
314   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
315   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
316   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
317   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
318   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
319 
320   // Handle constrained floating-point operations of scalar.
321   // TODO: Handle SPE specific operation.
322   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
323   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
324   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
325   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
326   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
327 
328   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
329   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
330   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
331   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
332 
333   if (!Subtarget.hasSPE()) {
334     setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
335     setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
336   }
337 
338   if (Subtarget.hasVSX()) {
339     setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal);
340     setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal);
341   }
342 
343   if (Subtarget.hasFSQRT()) {
344     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
345     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
346   }
347 
348   if (Subtarget.hasFPRND()) {
349     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
350     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
351     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
352     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
353 
354     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
355     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
356     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
357     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
358   }
359 
360   // We don't support sin/cos/sqrt/fmod/pow
361   setOperationAction(ISD::FSIN , MVT::f64, Expand);
362   setOperationAction(ISD::FCOS , MVT::f64, Expand);
363   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
364   setOperationAction(ISD::FREM , MVT::f64, Expand);
365   setOperationAction(ISD::FPOW , MVT::f64, Expand);
366   setOperationAction(ISD::FSIN , MVT::f32, Expand);
367   setOperationAction(ISD::FCOS , MVT::f32, Expand);
368   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
369   setOperationAction(ISD::FREM , MVT::f32, Expand);
370   setOperationAction(ISD::FPOW , MVT::f32, Expand);
371   if (Subtarget.hasSPE()) {
372     setOperationAction(ISD::FMA  , MVT::f64, Expand);
373     setOperationAction(ISD::FMA  , MVT::f32, Expand);
374   } else {
375     setOperationAction(ISD::FMA  , MVT::f64, Legal);
376     setOperationAction(ISD::FMA  , MVT::f32, Legal);
377   }
378 
379   if (Subtarget.hasSPE())
380     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
381 
382   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
383 
384   // If we're enabling GP optimizations, use hardware square root
385   if (!Subtarget.hasFSQRT() &&
386       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
387         Subtarget.hasFRE()))
388     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
389 
390   if (!Subtarget.hasFSQRT() &&
391       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
392         Subtarget.hasFRES()))
393     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
394 
395   if (Subtarget.hasFCPSGN()) {
396     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
397     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
398   } else {
399     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
400     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
401   }
402 
403   if (Subtarget.hasFPRND()) {
404     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
405     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
406     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
407     setOperationAction(ISD::FROUND, MVT::f64, Legal);
408 
409     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
410     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
411     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
412     setOperationAction(ISD::FROUND, MVT::f32, Legal);
413   }
414 
415   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
416   // to speed up scalar BSWAP64.
417   // CTPOP or CTTZ were introduced in P8/P9 respectively
418   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
419   if (Subtarget.hasP9Vector() && Subtarget.isPPC64())
420     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
421   else
422     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
423   if (Subtarget.isISA3_0()) {
424     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
425     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
426   } else {
427     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
428     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
429   }
430 
431   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
432     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
433     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
434   } else {
435     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
436     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
437   }
438 
439   // PowerPC does not have ROTR
440   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
441   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
442 
443   if (!Subtarget.useCRBits()) {
444     // PowerPC does not have Select
445     setOperationAction(ISD::SELECT, MVT::i32, Expand);
446     setOperationAction(ISD::SELECT, MVT::i64, Expand);
447     setOperationAction(ISD::SELECT, MVT::f32, Expand);
448     setOperationAction(ISD::SELECT, MVT::f64, Expand);
449   }
450 
451   // PowerPC wants to turn select_cc of FP into fsel when possible.
452   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
453   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
454 
455   // PowerPC wants to optimize integer setcc a bit
456   if (!Subtarget.useCRBits())
457     setOperationAction(ISD::SETCC, MVT::i32, Custom);
458 
459   if (Subtarget.hasFPU()) {
460     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
461     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
462     setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
463 
464     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
465     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
466     setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
467   }
468 
469   // PowerPC does not have BRCOND which requires SetCC
470   if (!Subtarget.useCRBits())
471     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
472 
473   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
474 
475   if (Subtarget.hasSPE()) {
476     // SPE has built-in conversions
477     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
478     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
479     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
480     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
481     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
482     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
483 
484     // SPE supports signaling compare of f32/f64.
485     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
486     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
487   } else {
488     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
489     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
490     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
491 
492     // PowerPC does not have [U|S]INT_TO_FP
493     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
494     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
495     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
496     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
497   }
498 
499   if (Subtarget.hasDirectMove() && isPPC64) {
500     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
501     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
502     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
503     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
504     if (TM.Options.UnsafeFPMath) {
505       setOperationAction(ISD::LRINT, MVT::f64, Legal);
506       setOperationAction(ISD::LRINT, MVT::f32, Legal);
507       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
508       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
509       setOperationAction(ISD::LROUND, MVT::f64, Legal);
510       setOperationAction(ISD::LROUND, MVT::f32, Legal);
511       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
512       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
513     }
514   } else {
515     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
516     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
517     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
518     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
519   }
520 
521   // We cannot sextinreg(i1).  Expand to shifts.
522   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
523 
524   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
525   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
526   // support continuation, user-level threading, and etc.. As a result, no
527   // other SjLj exception interfaces are implemented and please don't build
528   // your own exception handling based on them.
529   // LLVM/Clang supports zero-cost DWARF exception handling.
530   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
531   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
532 
533   // We want to legalize GlobalAddress and ConstantPool nodes into the
534   // appropriate instructions to materialize the address.
535   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
536   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
537   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
538   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
539   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
540   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
541   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
542   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
543   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
544   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
545 
546   // TRAP is legal.
547   setOperationAction(ISD::TRAP, MVT::Other, Legal);
548 
549   // TRAMPOLINE is custom lowered.
550   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
551   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
552 
553   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
554   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
555 
556   if (Subtarget.is64BitELFABI()) {
557     // VAARG always uses double-word chunks, so promote anything smaller.
558     setOperationAction(ISD::VAARG, MVT::i1, Promote);
559     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
560     setOperationAction(ISD::VAARG, MVT::i8, Promote);
561     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
562     setOperationAction(ISD::VAARG, MVT::i16, Promote);
563     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
564     setOperationAction(ISD::VAARG, MVT::i32, Promote);
565     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
566     setOperationAction(ISD::VAARG, MVT::Other, Expand);
567   } else if (Subtarget.is32BitELFABI()) {
568     // VAARG is custom lowered with the 32-bit SVR4 ABI.
569     setOperationAction(ISD::VAARG, MVT::Other, Custom);
570     setOperationAction(ISD::VAARG, MVT::i64, Custom);
571   } else
572     setOperationAction(ISD::VAARG, MVT::Other, Expand);
573 
574   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
575   if (Subtarget.is32BitELFABI())
576     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
577   else
578     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
579 
580   // Use the default implementation.
581   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
582   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
583   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
584   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
585   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
586   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
587   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
588   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
589   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
590 
591   // We want to custom lower some of our intrinsics.
592   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
593 
594   // To handle counter-based loop conditions.
595   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
596 
597   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
598   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
599   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
600   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
601 
602   // Comparisons that require checking two conditions.
603   if (Subtarget.hasSPE()) {
604     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
605     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
606     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
607     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
608   }
609   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
610   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
611   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
612   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
613   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
614   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
615   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
616   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
617   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
618   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
619   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
620   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
621 
622   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
623   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
624 
625   if (Subtarget.has64BitSupport()) {
626     // They also have instructions for converting between i64 and fp.
627     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
628     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
629     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
630     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
631     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
632     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
633     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
634     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
635     // This is just the low 32 bits of a (signed) fp->i64 conversion.
636     // We cannot do this with Promote because i64 is not a legal type.
637     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
638     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
639 
640     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
641       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
642       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
643     }
644   } else {
645     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
646     if (Subtarget.hasSPE()) {
647       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
648       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
649     } else {
650       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
651       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
652     }
653   }
654 
655   // With the instructions enabled under FPCVT, we can do everything.
656   if (Subtarget.hasFPCVT()) {
657     if (Subtarget.has64BitSupport()) {
658       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
659       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
660       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
661       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
662       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
663       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
664       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
665       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
666     }
667 
668     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
669     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
670     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
671     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
672     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
673     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
674     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
675     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
676   }
677 
678   if (Subtarget.use64BitRegs()) {
679     // 64-bit PowerPC implementations can support i64 types directly
680     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
681     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
682     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
683     // 64-bit PowerPC wants to expand i128 shifts itself.
684     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
685     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
686     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
687   } else {
688     // 32-bit PowerPC wants to expand i64 shifts itself.
689     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
690     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
691     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
692   }
693 
694   // PowerPC has better expansions for funnel shifts than the generic
695   // TargetLowering::expandFunnelShift.
696   if (Subtarget.has64BitSupport()) {
697     setOperationAction(ISD::FSHL, MVT::i64, Custom);
698     setOperationAction(ISD::FSHR, MVT::i64, Custom);
699   }
700   setOperationAction(ISD::FSHL, MVT::i32, Custom);
701   setOperationAction(ISD::FSHR, MVT::i32, Custom);
702 
703   if (Subtarget.hasVSX()) {
704     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
705     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
706     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
707     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
708   }
709 
710   if (Subtarget.hasAltivec()) {
711     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
712       setOperationAction(ISD::SADDSAT, VT, Legal);
713       setOperationAction(ISD::SSUBSAT, VT, Legal);
714       setOperationAction(ISD::UADDSAT, VT, Legal);
715       setOperationAction(ISD::USUBSAT, VT, Legal);
716     }
717     // First set operation action for all vector types to expand. Then we
718     // will selectively turn on ones that can be effectively codegen'd.
719     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
720       // add/sub are legal for all supported vector VT's.
721       setOperationAction(ISD::ADD, VT, Legal);
722       setOperationAction(ISD::SUB, VT, Legal);
723 
724       // For v2i64, these are only valid with P8Vector. This is corrected after
725       // the loop.
726       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
727         setOperationAction(ISD::SMAX, VT, Legal);
728         setOperationAction(ISD::SMIN, VT, Legal);
729         setOperationAction(ISD::UMAX, VT, Legal);
730         setOperationAction(ISD::UMIN, VT, Legal);
731       }
732       else {
733         setOperationAction(ISD::SMAX, VT, Expand);
734         setOperationAction(ISD::SMIN, VT, Expand);
735         setOperationAction(ISD::UMAX, VT, Expand);
736         setOperationAction(ISD::UMIN, VT, Expand);
737       }
738 
739       if (Subtarget.hasVSX()) {
740         setOperationAction(ISD::FMAXNUM, VT, Legal);
741         setOperationAction(ISD::FMINNUM, VT, Legal);
742       }
743 
744       // Vector instructions introduced in P8
745       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
746         setOperationAction(ISD::CTPOP, VT, Legal);
747         setOperationAction(ISD::CTLZ, VT, Legal);
748       }
749       else {
750         setOperationAction(ISD::CTPOP, VT, Expand);
751         setOperationAction(ISD::CTLZ, VT, Expand);
752       }
753 
754       // Vector instructions introduced in P9
755       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
756         setOperationAction(ISD::CTTZ, VT, Legal);
757       else
758         setOperationAction(ISD::CTTZ, VT, Expand);
759 
760       // We promote all shuffles to v16i8.
761       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
762       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
763 
764       // We promote all non-typed operations to v4i32.
765       setOperationAction(ISD::AND   , VT, Promote);
766       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
767       setOperationAction(ISD::OR    , VT, Promote);
768       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
769       setOperationAction(ISD::XOR   , VT, Promote);
770       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
771       setOperationAction(ISD::LOAD  , VT, Promote);
772       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
773       setOperationAction(ISD::SELECT, VT, Promote);
774       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
775       setOperationAction(ISD::VSELECT, VT, Legal);
776       setOperationAction(ISD::SELECT_CC, VT, Promote);
777       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
778       setOperationAction(ISD::STORE, VT, Promote);
779       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
780 
781       // No other operations are legal.
782       setOperationAction(ISD::MUL , VT, Expand);
783       setOperationAction(ISD::SDIV, VT, Expand);
784       setOperationAction(ISD::SREM, VT, Expand);
785       setOperationAction(ISD::UDIV, VT, Expand);
786       setOperationAction(ISD::UREM, VT, Expand);
787       setOperationAction(ISD::FDIV, VT, Expand);
788       setOperationAction(ISD::FREM, VT, Expand);
789       setOperationAction(ISD::FNEG, VT, Expand);
790       setOperationAction(ISD::FSQRT, VT, Expand);
791       setOperationAction(ISD::FLOG, VT, Expand);
792       setOperationAction(ISD::FLOG10, VT, Expand);
793       setOperationAction(ISD::FLOG2, VT, Expand);
794       setOperationAction(ISD::FEXP, VT, Expand);
795       setOperationAction(ISD::FEXP2, VT, Expand);
796       setOperationAction(ISD::FSIN, VT, Expand);
797       setOperationAction(ISD::FCOS, VT, Expand);
798       setOperationAction(ISD::FABS, VT, Expand);
799       setOperationAction(ISD::FFLOOR, VT, Expand);
800       setOperationAction(ISD::FCEIL,  VT, Expand);
801       setOperationAction(ISD::FTRUNC, VT, Expand);
802       setOperationAction(ISD::FRINT,  VT, Expand);
803       setOperationAction(ISD::FNEARBYINT, VT, Expand);
804       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
805       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
806       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
807       setOperationAction(ISD::MULHU, VT, Expand);
808       setOperationAction(ISD::MULHS, VT, Expand);
809       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
810       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
811       setOperationAction(ISD::UDIVREM, VT, Expand);
812       setOperationAction(ISD::SDIVREM, VT, Expand);
813       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
814       setOperationAction(ISD::FPOW, VT, Expand);
815       setOperationAction(ISD::BSWAP, VT, Expand);
816       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
817       setOperationAction(ISD::ROTL, VT, Expand);
818       setOperationAction(ISD::ROTR, VT, Expand);
819 
820       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
821         setTruncStoreAction(VT, InnerVT, Expand);
822         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
823         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
824         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
825       }
826     }
827     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
828     if (!Subtarget.hasP8Vector()) {
829       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
830       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
831       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
832       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
833     }
834 
835     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
836     // with merges, splats, etc.
837     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
838 
839     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
840     // are cheap, so handle them before they get expanded to scalar.
841     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
842     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
843     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
844     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
845     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
846 
847     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
848     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
849     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
850     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
851     setOperationAction(ISD::SELECT, MVT::v4i32,
852                        Subtarget.useCRBits() ? Legal : Expand);
853     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
854     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
855     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
856     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
857     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
858     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
859     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
860     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
861     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
862     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
863     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
864     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
865     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
866 
867     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
868     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
869     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
870     if (Subtarget.hasAltivec())
871       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
872         setOperationAction(ISD::ROTL, VT, Legal);
873     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
874     if (Subtarget.hasP8Altivec())
875       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
876 
877     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
878     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
879     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
880     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
881 
882     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
883     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
884 
885     if (Subtarget.hasVSX()) {
886       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
887       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
888       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
889     }
890 
891     if (Subtarget.hasP8Altivec())
892       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
893     else
894       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
895 
896     if (Subtarget.isISA3_1()) {
897       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
898       setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
899       setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
900       setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
901       setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
902       setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
903       setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
904       setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
905       setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
906       setOperationAction(ISD::UREM, MVT::v2i64, Legal);
907       setOperationAction(ISD::SREM, MVT::v2i64, Legal);
908       setOperationAction(ISD::UREM, MVT::v4i32, Legal);
909       setOperationAction(ISD::SREM, MVT::v4i32, Legal);
910       setOperationAction(ISD::UREM, MVT::v1i128, Legal);
911       setOperationAction(ISD::SREM, MVT::v1i128, Legal);
912       setOperationAction(ISD::UDIV, MVT::v1i128, Legal);
913       setOperationAction(ISD::SDIV, MVT::v1i128, Legal);
914       setOperationAction(ISD::ROTL, MVT::v1i128, Legal);
915     }
916 
917     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
918     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
919 
920     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
921     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
922 
923     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
924     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
925     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
926     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
927 
928     // Altivec does not contain unordered floating-point compare instructions
929     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
930     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
931     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
932     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
933 
934     if (Subtarget.hasVSX()) {
935       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
936       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
937       if (Subtarget.hasP8Vector()) {
938         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
939         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
940       }
941       if (Subtarget.hasDirectMove() && isPPC64) {
942         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
943         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
944         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
945         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
946         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
947         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
948         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
949         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
950       }
951       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
952 
953       // The nearbyint variants are not allowed to raise the inexact exception
954       // so we can only code-gen them with unsafe math.
955       if (TM.Options.UnsafeFPMath) {
956         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
957         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
958       }
959 
960       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
961       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
962       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
963       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
964       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
965       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
966       setOperationAction(ISD::FROUND, MVT::f64, Legal);
967       setOperationAction(ISD::FRINT, MVT::f64, Legal);
968 
969       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
970       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
971       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
972       setOperationAction(ISD::FROUND, MVT::f32, Legal);
973       setOperationAction(ISD::FRINT, MVT::f32, Legal);
974 
975       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
976       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
977 
978       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
979       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
980 
981       // Share the Altivec comparison restrictions.
982       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
983       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
984       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
985       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
986 
987       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
988       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
989 
990       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
991 
992       if (Subtarget.hasP8Vector())
993         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
994 
995       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
996 
997       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
998       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
999       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
1000 
1001       if (Subtarget.hasP8Altivec()) {
1002         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
1003         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
1004         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
1005 
1006         // 128 bit shifts can be accomplished via 3 instructions for SHL and
1007         // SRL, but not for SRA because of the instructions available:
1008         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
1009         // doing
1010         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
1011         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
1012         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1013 
1014         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
1015       }
1016       else {
1017         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
1018         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
1019         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
1020 
1021         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
1022 
1023         // VSX v2i64 only supports non-arithmetic operations.
1024         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
1025         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
1026       }
1027 
1028       if (Subtarget.isISA3_1())
1029         setOperationAction(ISD::SETCC, MVT::v1i128, Legal);
1030       else
1031         setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1032 
1033       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1034       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1035       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
1036       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1037 
1038       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
1039 
1040       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
1041       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
1042       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
1043       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
1044       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1045       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1046       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1047       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1048 
1049       // Custom handling for partial vectors of integers converted to
1050       // floating point. We already have optimal handling for v2i32 through
1051       // the DAG combine, so those aren't necessary.
1052       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
1053       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
1054       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
1055       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
1056       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
1057       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
1058       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
1059       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
1060       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
1061       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1062       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
1063       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1064       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
1065       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
1066       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
1067       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
1068 
1069       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1070       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1071       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1072       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1073       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1074       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
1075 
1076       if (Subtarget.hasDirectMove())
1077         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1078       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1079 
1080       // Handle constrained floating-point operations of vector.
1081       // The predictor is `hasVSX` because altivec instruction has
1082       // no exception but VSX vector instruction has.
1083       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1084       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1085       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1086       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1087       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1088       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1089       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1090       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1091       setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
1092       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1093       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
1094       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1095       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1096 
1097       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1098       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1099       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1100       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1101       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1102       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1103       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1104       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1105       setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
1106       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1107       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
1108       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1109       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1110 
1111       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1112       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1113 
1114       for (MVT FPT : MVT::fp_valuetypes())
1115         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1116 
1117       // Expand the SELECT to SELECT_CC
1118       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1119 
1120       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1121       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1122 
1123       // No implementation for these ops for PowerPC.
1124       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1125       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1126       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1127       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1128       setOperationAction(ISD::FREM, MVT::f128, Expand);
1129     }
1130 
1131     if (Subtarget.hasP8Altivec()) {
1132       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1133       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1134     }
1135 
1136     if (Subtarget.hasP9Vector()) {
1137       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1138       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1139 
1140       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1141       // SRL, but not for SRA because of the instructions available:
1142       // VS{RL} and VS{RL}O.
1143       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1144       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1145       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1146 
1147       setOperationAction(ISD::FADD, MVT::f128, Legal);
1148       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1149       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1150       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1151       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1152 
1153       setOperationAction(ISD::FMA, MVT::f128, Legal);
1154       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1155       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1156       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1157       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1158       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1159       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1160 
1161       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1162       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1163       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1164       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1165       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1166       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1167 
1168       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1169       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1170       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1171 
1172       // Handle constrained floating-point operations of fp128
1173       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1174       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1175       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1176       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1177       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1178       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1179       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1180       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1181       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1182       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1183       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1184       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1185       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1186       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1187       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1188       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1189       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1190       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1191       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1192       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1193     } else if (Subtarget.hasVSX()) {
1194       setOperationAction(ISD::LOAD, MVT::f128, Promote);
1195       setOperationAction(ISD::STORE, MVT::f128, Promote);
1196 
1197       AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32);
1198       AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32);
1199 
1200       // Set FADD/FSUB as libcall to avoid the legalizer to expand the
1201       // fp_to_uint and int_to_fp.
1202       setOperationAction(ISD::FADD, MVT::f128, LibCall);
1203       setOperationAction(ISD::FSUB, MVT::f128, LibCall);
1204 
1205       setOperationAction(ISD::FMUL, MVT::f128, Expand);
1206       setOperationAction(ISD::FDIV, MVT::f128, Expand);
1207       setOperationAction(ISD::FNEG, MVT::f128, Expand);
1208       setOperationAction(ISD::FABS, MVT::f128, Expand);
1209       setOperationAction(ISD::FSQRT, MVT::f128, Expand);
1210       setOperationAction(ISD::FMA, MVT::f128, Expand);
1211       setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1212 
1213       // Expand the fp_extend if the target type is fp128.
1214       setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
1215       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand);
1216 
1217       // Expand the fp_round if the source type is fp128.
1218       for (MVT VT : {MVT::f32, MVT::f64}) {
1219         setOperationAction(ISD::FP_ROUND, VT, Custom);
1220         setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
1221       }
1222 
1223       setOperationAction(ISD::SETCC, MVT::f128, Custom);
1224       setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
1225       setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
1226       setOperationAction(ISD::BR_CC, MVT::f128, Expand);
1227 
1228       // Lower following f128 select_cc pattern:
1229       // select_cc x, y, tv, fv, cc -> select_cc (setcc x, y, cc), 0, tv, fv, NE
1230       setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1231 
1232       // We need to handle f128 SELECT_CC with integer result type.
1233       setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1234       setOperationAction(ISD::SELECT_CC, MVT::i64, isPPC64 ? Custom : Expand);
1235     }
1236 
1237     if (Subtarget.hasP9Altivec()) {
1238       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1239       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1240 
1241       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1242       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1243       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1244       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1245       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1246       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1247       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1248     }
1249 
1250     if (Subtarget.isISA3_1())
1251       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1252   }
1253 
1254   if (Subtarget.pairedVectorMemops()) {
1255     addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass);
1256     setOperationAction(ISD::LOAD, MVT::v256i1, Custom);
1257     setOperationAction(ISD::STORE, MVT::v256i1, Custom);
1258   }
1259   if (Subtarget.hasMMA()) {
1260     addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass);
1261     setOperationAction(ISD::LOAD, MVT::v512i1, Custom);
1262     setOperationAction(ISD::STORE, MVT::v512i1, Custom);
1263     setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom);
1264   }
1265 
1266   if (Subtarget.has64BitSupport())
1267     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1268 
1269   if (Subtarget.isISA3_1())
1270     setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1271 
1272   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1273 
1274   if (!isPPC64) {
1275     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1276     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1277   }
1278 
1279   setBooleanContents(ZeroOrOneBooleanContent);
1280 
1281   if (Subtarget.hasAltivec()) {
1282     // Altivec instructions set fields to all zeros or all ones.
1283     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1284   }
1285 
1286   if (!isPPC64) {
1287     // These libcalls are not available in 32-bit.
1288     setLibcallName(RTLIB::SHL_I128, nullptr);
1289     setLibcallName(RTLIB::SRL_I128, nullptr);
1290     setLibcallName(RTLIB::SRA_I128, nullptr);
1291   }
1292 
1293   if (!isPPC64)
1294     setMaxAtomicSizeInBitsSupported(32);
1295 
1296   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1297 
1298   // We have target-specific dag combine patterns for the following nodes:
1299   setTargetDAGCombine(ISD::ADD);
1300   setTargetDAGCombine(ISD::SHL);
1301   setTargetDAGCombine(ISD::SRA);
1302   setTargetDAGCombine(ISD::SRL);
1303   setTargetDAGCombine(ISD::MUL);
1304   setTargetDAGCombine(ISD::FMA);
1305   setTargetDAGCombine(ISD::SINT_TO_FP);
1306   setTargetDAGCombine(ISD::BUILD_VECTOR);
1307   if (Subtarget.hasFPCVT())
1308     setTargetDAGCombine(ISD::UINT_TO_FP);
1309   setTargetDAGCombine(ISD::LOAD);
1310   setTargetDAGCombine(ISD::STORE);
1311   setTargetDAGCombine(ISD::BR_CC);
1312   if (Subtarget.useCRBits())
1313     setTargetDAGCombine(ISD::BRCOND);
1314   setTargetDAGCombine(ISD::BSWAP);
1315   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1316   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1317   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1318 
1319   setTargetDAGCombine(ISD::SIGN_EXTEND);
1320   setTargetDAGCombine(ISD::ZERO_EXTEND);
1321   setTargetDAGCombine(ISD::ANY_EXTEND);
1322 
1323   setTargetDAGCombine(ISD::TRUNCATE);
1324   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1325 
1326 
1327   if (Subtarget.useCRBits()) {
1328     setTargetDAGCombine(ISD::TRUNCATE);
1329     setTargetDAGCombine(ISD::SETCC);
1330     setTargetDAGCombine(ISD::SELECT_CC);
1331   }
1332 
1333   if (Subtarget.hasP9Altivec()) {
1334     setTargetDAGCombine(ISD::ABS);
1335     setTargetDAGCombine(ISD::VSELECT);
1336   }
1337 
1338   setLibcallName(RTLIB::LOG_F128, "logf128");
1339   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1340   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1341   setLibcallName(RTLIB::EXP_F128, "expf128");
1342   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1343   setLibcallName(RTLIB::SIN_F128, "sinf128");
1344   setLibcallName(RTLIB::COS_F128, "cosf128");
1345   setLibcallName(RTLIB::POW_F128, "powf128");
1346   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1347   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1348   setLibcallName(RTLIB::REM_F128, "fmodf128");
1349   setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
1350   setLibcallName(RTLIB::CEIL_F128, "ceilf128");
1351   setLibcallName(RTLIB::FLOOR_F128, "floorf128");
1352   setLibcallName(RTLIB::TRUNC_F128, "truncf128");
1353   setLibcallName(RTLIB::ROUND_F128, "roundf128");
1354   setLibcallName(RTLIB::LROUND_F128, "lroundf128");
1355   setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
1356   setLibcallName(RTLIB::RINT_F128, "rintf128");
1357   setLibcallName(RTLIB::LRINT_F128, "lrintf128");
1358   setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
1359   setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
1360   setLibcallName(RTLIB::FMA_F128, "fmaf128");
1361 
1362   // With 32 condition bits, we don't need to sink (and duplicate) compares
1363   // aggressively in CodeGenPrep.
1364   if (Subtarget.useCRBits()) {
1365     setHasMultipleConditionRegisters();
1366     setJumpIsExpensive();
1367   }
1368 
1369   setMinFunctionAlignment(Align(4));
1370 
1371   switch (Subtarget.getCPUDirective()) {
1372   default: break;
1373   case PPC::DIR_970:
1374   case PPC::DIR_A2:
1375   case PPC::DIR_E500:
1376   case PPC::DIR_E500mc:
1377   case PPC::DIR_E5500:
1378   case PPC::DIR_PWR4:
1379   case PPC::DIR_PWR5:
1380   case PPC::DIR_PWR5X:
1381   case PPC::DIR_PWR6:
1382   case PPC::DIR_PWR6X:
1383   case PPC::DIR_PWR7:
1384   case PPC::DIR_PWR8:
1385   case PPC::DIR_PWR9:
1386   case PPC::DIR_PWR10:
1387   case PPC::DIR_PWR_FUTURE:
1388     setPrefLoopAlignment(Align(16));
1389     setPrefFunctionAlignment(Align(16));
1390     break;
1391   }
1392 
1393   if (Subtarget.enableMachineScheduler())
1394     setSchedulingPreference(Sched::Source);
1395   else
1396     setSchedulingPreference(Sched::Hybrid);
1397 
1398   computeRegisterProperties(STI.getRegisterInfo());
1399 
1400   // The Freescale cores do better with aggressive inlining of memcpy and
1401   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1402   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1403       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1404     MaxStoresPerMemset = 32;
1405     MaxStoresPerMemsetOptSize = 16;
1406     MaxStoresPerMemcpy = 32;
1407     MaxStoresPerMemcpyOptSize = 8;
1408     MaxStoresPerMemmove = 32;
1409     MaxStoresPerMemmoveOptSize = 8;
1410   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1411     // The A2 also benefits from (very) aggressive inlining of memcpy and
1412     // friends. The overhead of a the function call, even when warm, can be
1413     // over one hundred cycles.
1414     MaxStoresPerMemset = 128;
1415     MaxStoresPerMemcpy = 128;
1416     MaxStoresPerMemmove = 128;
1417     MaxLoadsPerMemcmp = 128;
1418   } else {
1419     MaxLoadsPerMemcmp = 8;
1420     MaxLoadsPerMemcmpOptSize = 4;
1421   }
1422 
1423   IsStrictFPEnabled = true;
1424 
1425   // Let the subtarget (CPU) decide if a predictable select is more expensive
1426   // than the corresponding branch. This information is used in CGP to decide
1427   // when to convert selects into branches.
1428   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1429 }
1430 
1431 // *********************************** NOTE ************************************
1432 // For selecting load and store instructions, the addressing modes are defined
1433 // as ComplexPatterns in PPCInstrInfo.td, which are then utilized in the TD
1434 // patterns to match the load the store instructions.
1435 //
1436 // The TD definitions for the addressing modes correspond to their respective
1437 // Select<AddrMode>Form() function in PPCISelDAGToDAG.cpp. These functions rely
1438 // on SelectOptimalAddrMode(), which calls computeMOFlags() to compute the
1439 // address mode flags of a particular node. Afterwards, the computed address
1440 // flags are passed into getAddrModeForFlags() in order to retrieve the optimal
1441 // addressing mode. SelectOptimalAddrMode() then sets the Base and Displacement
1442 // accordingly, based on the preferred addressing mode.
1443 //
1444 // Within PPCISelLowering.h, there are two enums: MemOpFlags and AddrMode.
1445 // MemOpFlags contains all the possible flags that can be used to compute the
1446 // optimal addressing mode for load and store instructions.
1447 // AddrMode contains all the possible load and store addressing modes available
1448 // on Power (such as DForm, DSForm, DQForm, XForm, etc.)
1449 //
1450 // When adding new load and store instructions, it is possible that new address
1451 // flags may need to be added into MemOpFlags, and a new addressing mode will
1452 // need to be added to AddrMode. An entry of the new addressing mode (consisting
1453 // of the minimal and main distinguishing address flags for the new load/store
1454 // instructions) will need to be added into initializeAddrModeMap() below.
1455 // Finally, when adding new addressing modes, the getAddrModeForFlags() will
1456 // need to be updated to account for selecting the optimal addressing mode.
1457 // *****************************************************************************
1458 /// Initialize the map that relates the different addressing modes of the load
1459 /// and store instructions to a set of flags. This ensures the load/store
1460 /// instruction is correctly matched during instruction selection.
1461 void PPCTargetLowering::initializeAddrModeMap() {
1462   AddrModesMap[PPC::AM_DForm] = {
1463       // LWZ, STW
1464       PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_WordInt,
1465       PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_WordInt,
1466       PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt,
1467       PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt,
1468       // LBZ, LHZ, STB, STH
1469       PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt,
1470       PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt,
1471       PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt,
1472       PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt,
1473       // LHA
1474       PPC::MOF_SExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt,
1475       PPC::MOF_SExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt,
1476       PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt,
1477       PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt,
1478       // LFS, LFD, STFS, STFD
1479       PPC::MOF_RPlusSImm16 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1480       PPC::MOF_RPlusLo | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1481       PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1482       PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1483   };
1484   AddrModesMap[PPC::AM_DSForm] = {
1485       // LWA
1486       PPC::MOF_SExt | PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_WordInt,
1487       PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt,
1488       PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt,
1489       // LD, STD
1490       PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_DoubleWordInt,
1491       PPC::MOF_NotAddNorCst | PPC::MOF_DoubleWordInt,
1492       PPC::MOF_AddrIsSImm32 | PPC::MOF_DoubleWordInt,
1493       // DFLOADf32, DFLOADf64, DSTOREf32, DSTOREf64
1494       PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
1495       PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
1496       PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
1497   };
1498   AddrModesMap[PPC::AM_DQForm] = {
1499       // LXV, STXV
1500       PPC::MOF_RPlusSImm16Mult16 | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
1501       PPC::MOF_NotAddNorCst | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
1502       PPC::MOF_AddrIsSImm32 | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
1503       PPC::MOF_RPlusSImm16Mult16 | PPC::MOF_Vector256 | PPC::MOF_SubtargetP10,
1504       PPC::MOF_NotAddNorCst | PPC::MOF_Vector256 | PPC::MOF_SubtargetP10,
1505       PPC::MOF_AddrIsSImm32 | PPC::MOF_Vector256 | PPC::MOF_SubtargetP10,
1506   };
1507 }
1508 
1509 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1510 /// the desired ByVal argument alignment.
1511 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1512   if (MaxAlign == MaxMaxAlign)
1513     return;
1514   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1515     if (MaxMaxAlign >= 32 &&
1516         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1517       MaxAlign = Align(32);
1518     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1519              MaxAlign < 16)
1520       MaxAlign = Align(16);
1521   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1522     Align EltAlign;
1523     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1524     if (EltAlign > MaxAlign)
1525       MaxAlign = EltAlign;
1526   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1527     for (auto *EltTy : STy->elements()) {
1528       Align EltAlign;
1529       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1530       if (EltAlign > MaxAlign)
1531         MaxAlign = EltAlign;
1532       if (MaxAlign == MaxMaxAlign)
1533         break;
1534     }
1535   }
1536 }
1537 
1538 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1539 /// function arguments in the caller parameter area.
1540 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1541                                                   const DataLayout &DL) const {
1542   // 16byte and wider vectors are passed on 16byte boundary.
1543   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1544   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1545   if (Subtarget.hasAltivec())
1546     getMaxByValAlign(Ty, Alignment, Align(16));
1547   return Alignment.value();
1548 }
1549 
1550 bool PPCTargetLowering::useSoftFloat() const {
1551   return Subtarget.useSoftFloat();
1552 }
1553 
1554 bool PPCTargetLowering::hasSPE() const {
1555   return Subtarget.hasSPE();
1556 }
1557 
1558 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1559   return VT.isScalarInteger();
1560 }
1561 
1562 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1563   switch ((PPCISD::NodeType)Opcode) {
1564   case PPCISD::FIRST_NUMBER:    break;
1565   case PPCISD::FSEL:            return "PPCISD::FSEL";
1566   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1567   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1568   case PPCISD::FCFID:           return "PPCISD::FCFID";
1569   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1570   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1571   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1572   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1573   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1574   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1575   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1576   case PPCISD::FP_TO_UINT_IN_VSR:
1577                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1578   case PPCISD::FP_TO_SINT_IN_VSR:
1579                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1580   case PPCISD::FRE:             return "PPCISD::FRE";
1581   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1582   case PPCISD::FTSQRT:
1583     return "PPCISD::FTSQRT";
1584   case PPCISD::FSQRT:
1585     return "PPCISD::FSQRT";
1586   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1587   case PPCISD::VPERM:           return "PPCISD::VPERM";
1588   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1589   case PPCISD::XXSPLTI_SP_TO_DP:
1590     return "PPCISD::XXSPLTI_SP_TO_DP";
1591   case PPCISD::XXSPLTI32DX:
1592     return "PPCISD::XXSPLTI32DX";
1593   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1594   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1595   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1596   case PPCISD::CMPB:            return "PPCISD::CMPB";
1597   case PPCISD::Hi:              return "PPCISD::Hi";
1598   case PPCISD::Lo:              return "PPCISD::Lo";
1599   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1600   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1601   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1602   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1603   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1604   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1605   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1606   case PPCISD::SRL:             return "PPCISD::SRL";
1607   case PPCISD::SRA:             return "PPCISD::SRA";
1608   case PPCISD::SHL:             return "PPCISD::SHL";
1609   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1610   case PPCISD::CALL:            return "PPCISD::CALL";
1611   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1612   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1613   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1614   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1615   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1616   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1617   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1618   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1619   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1620   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1621   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1622   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1623   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1624   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1625   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1626   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1627     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1628   case PPCISD::ANDI_rec_1_EQ_BIT:
1629     return "PPCISD::ANDI_rec_1_EQ_BIT";
1630   case PPCISD::ANDI_rec_1_GT_BIT:
1631     return "PPCISD::ANDI_rec_1_GT_BIT";
1632   case PPCISD::VCMP:            return "PPCISD::VCMP";
1633   case PPCISD::VCMP_rec:        return "PPCISD::VCMP_rec";
1634   case PPCISD::LBRX:            return "PPCISD::LBRX";
1635   case PPCISD::STBRX:           return "PPCISD::STBRX";
1636   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1637   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1638   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1639   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1640   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1641   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1642   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1643   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1644   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1645   case PPCISD::ST_VSR_SCAL_INT:
1646                                 return "PPCISD::ST_VSR_SCAL_INT";
1647   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1648   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1649   case PPCISD::BDZ:             return "PPCISD::BDZ";
1650   case PPCISD::MFFS:            return "PPCISD::MFFS";
1651   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1652   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1653   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1654   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1655   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1656   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1657   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1658   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1659   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1660   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1661   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1662   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1663   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1664   case PPCISD::TLSGD_AIX:       return "PPCISD::TLSGD_AIX";
1665   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1666   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1667   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1668   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1669   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1670   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1671   case PPCISD::PADDI_DTPREL:
1672     return "PPCISD::PADDI_DTPREL";
1673   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1674   case PPCISD::SC:              return "PPCISD::SC";
1675   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1676   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1677   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1678   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1679   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1680   case PPCISD::VABSD:           return "PPCISD::VABSD";
1681   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1682   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1683   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1684   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1685   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1686   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1687   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1688   case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
1689     return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1690   case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR:
1691     return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1692   case PPCISD::ACC_BUILD:       return "PPCISD::ACC_BUILD";
1693   case PPCISD::PAIR_BUILD:      return "PPCISD::PAIR_BUILD";
1694   case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG";
1695   case PPCISD::XXMFACC:         return "PPCISD::XXMFACC";
1696   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1697   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1698   case PPCISD::STRICT_FADDRTZ:
1699     return "PPCISD::STRICT_FADDRTZ";
1700   case PPCISD::STRICT_FCTIDZ:
1701     return "PPCISD::STRICT_FCTIDZ";
1702   case PPCISD::STRICT_FCTIWZ:
1703     return "PPCISD::STRICT_FCTIWZ";
1704   case PPCISD::STRICT_FCTIDUZ:
1705     return "PPCISD::STRICT_FCTIDUZ";
1706   case PPCISD::STRICT_FCTIWUZ:
1707     return "PPCISD::STRICT_FCTIWUZ";
1708   case PPCISD::STRICT_FCFID:
1709     return "PPCISD::STRICT_FCFID";
1710   case PPCISD::STRICT_FCFIDU:
1711     return "PPCISD::STRICT_FCFIDU";
1712   case PPCISD::STRICT_FCFIDS:
1713     return "PPCISD::STRICT_FCFIDS";
1714   case PPCISD::STRICT_FCFIDUS:
1715     return "PPCISD::STRICT_FCFIDUS";
1716   case PPCISD::LXVRZX:          return "PPCISD::LXVRZX";
1717   }
1718   return nullptr;
1719 }
1720 
1721 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1722                                           EVT VT) const {
1723   if (!VT.isVector())
1724     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1725 
1726   return VT.changeVectorElementTypeToInteger();
1727 }
1728 
1729 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1730   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1731   return true;
1732 }
1733 
1734 //===----------------------------------------------------------------------===//
1735 // Node matching predicates, for use by the tblgen matching code.
1736 //===----------------------------------------------------------------------===//
1737 
1738 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1739 static bool isFloatingPointZero(SDValue Op) {
1740   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1741     return CFP->getValueAPF().isZero();
1742   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1743     // Maybe this has already been legalized into the constant pool?
1744     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1745       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1746         return CFP->getValueAPF().isZero();
1747   }
1748   return false;
1749 }
1750 
1751 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1752 /// true if Op is undef or if it matches the specified value.
1753 static bool isConstantOrUndef(int Op, int Val) {
1754   return Op < 0 || Op == Val;
1755 }
1756 
1757 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1758 /// VPKUHUM instruction.
1759 /// The ShuffleKind distinguishes between big-endian operations with
1760 /// two different inputs (0), either-endian operations with two identical
1761 /// inputs (1), and little-endian operations with two different inputs (2).
1762 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1763 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1764                                SelectionDAG &DAG) {
1765   bool IsLE = DAG.getDataLayout().isLittleEndian();
1766   if (ShuffleKind == 0) {
1767     if (IsLE)
1768       return false;
1769     for (unsigned i = 0; i != 16; ++i)
1770       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1771         return false;
1772   } else if (ShuffleKind == 2) {
1773     if (!IsLE)
1774       return false;
1775     for (unsigned i = 0; i != 16; ++i)
1776       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1777         return false;
1778   } else if (ShuffleKind == 1) {
1779     unsigned j = IsLE ? 0 : 1;
1780     for (unsigned i = 0; i != 8; ++i)
1781       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1782           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1783         return false;
1784   }
1785   return true;
1786 }
1787 
1788 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1789 /// VPKUWUM instruction.
1790 /// The ShuffleKind distinguishes between big-endian operations with
1791 /// two different inputs (0), either-endian operations with two identical
1792 /// inputs (1), and little-endian operations with two different inputs (2).
1793 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1794 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1795                                SelectionDAG &DAG) {
1796   bool IsLE = DAG.getDataLayout().isLittleEndian();
1797   if (ShuffleKind == 0) {
1798     if (IsLE)
1799       return false;
1800     for (unsigned i = 0; i != 16; i += 2)
1801       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1802           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1803         return false;
1804   } else if (ShuffleKind == 2) {
1805     if (!IsLE)
1806       return false;
1807     for (unsigned i = 0; i != 16; i += 2)
1808       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1809           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1810         return false;
1811   } else if (ShuffleKind == 1) {
1812     unsigned j = IsLE ? 0 : 2;
1813     for (unsigned i = 0; i != 8; i += 2)
1814       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1815           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1816           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1817           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1818         return false;
1819   }
1820   return true;
1821 }
1822 
1823 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1824 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1825 /// current subtarget.
1826 ///
1827 /// The ShuffleKind distinguishes between big-endian operations with
1828 /// two different inputs (0), either-endian operations with two identical
1829 /// inputs (1), and little-endian operations with two different inputs (2).
1830 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1831 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1832                                SelectionDAG &DAG) {
1833   const PPCSubtarget& Subtarget =
1834       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1835   if (!Subtarget.hasP8Vector())
1836     return false;
1837 
1838   bool IsLE = DAG.getDataLayout().isLittleEndian();
1839   if (ShuffleKind == 0) {
1840     if (IsLE)
1841       return false;
1842     for (unsigned i = 0; i != 16; i += 4)
1843       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1844           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1845           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1846           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1847         return false;
1848   } else if (ShuffleKind == 2) {
1849     if (!IsLE)
1850       return false;
1851     for (unsigned i = 0; i != 16; i += 4)
1852       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1853           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1854           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1855           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1856         return false;
1857   } else if (ShuffleKind == 1) {
1858     unsigned j = IsLE ? 0 : 4;
1859     for (unsigned i = 0; i != 8; i += 4)
1860       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1861           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1862           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1863           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1864           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1865           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1866           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1867           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1868         return false;
1869   }
1870   return true;
1871 }
1872 
1873 /// isVMerge - Common function, used to match vmrg* shuffles.
1874 ///
1875 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1876                      unsigned LHSStart, unsigned RHSStart) {
1877   if (N->getValueType(0) != MVT::v16i8)
1878     return false;
1879   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1880          "Unsupported merge size!");
1881 
1882   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1883     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1884       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1885                              LHSStart+j+i*UnitSize) ||
1886           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1887                              RHSStart+j+i*UnitSize))
1888         return false;
1889     }
1890   return true;
1891 }
1892 
1893 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1894 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1895 /// The ShuffleKind distinguishes between big-endian merges with two
1896 /// different inputs (0), either-endian merges with two identical inputs (1),
1897 /// and little-endian merges with two different inputs (2).  For the latter,
1898 /// the input operands are swapped (see PPCInstrAltivec.td).
1899 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1900                              unsigned ShuffleKind, SelectionDAG &DAG) {
1901   if (DAG.getDataLayout().isLittleEndian()) {
1902     if (ShuffleKind == 1) // unary
1903       return isVMerge(N, UnitSize, 0, 0);
1904     else if (ShuffleKind == 2) // swapped
1905       return isVMerge(N, UnitSize, 0, 16);
1906     else
1907       return false;
1908   } else {
1909     if (ShuffleKind == 1) // unary
1910       return isVMerge(N, UnitSize, 8, 8);
1911     else if (ShuffleKind == 0) // normal
1912       return isVMerge(N, UnitSize, 8, 24);
1913     else
1914       return false;
1915   }
1916 }
1917 
1918 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1919 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1920 /// The ShuffleKind distinguishes between big-endian merges with two
1921 /// different inputs (0), either-endian merges with two identical inputs (1),
1922 /// and little-endian merges with two different inputs (2).  For the latter,
1923 /// the input operands are swapped (see PPCInstrAltivec.td).
1924 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1925                              unsigned ShuffleKind, SelectionDAG &DAG) {
1926   if (DAG.getDataLayout().isLittleEndian()) {
1927     if (ShuffleKind == 1) // unary
1928       return isVMerge(N, UnitSize, 8, 8);
1929     else if (ShuffleKind == 2) // swapped
1930       return isVMerge(N, UnitSize, 8, 24);
1931     else
1932       return false;
1933   } else {
1934     if (ShuffleKind == 1) // unary
1935       return isVMerge(N, UnitSize, 0, 0);
1936     else if (ShuffleKind == 0) // normal
1937       return isVMerge(N, UnitSize, 0, 16);
1938     else
1939       return false;
1940   }
1941 }
1942 
1943 /**
1944  * Common function used to match vmrgew and vmrgow shuffles
1945  *
1946  * The indexOffset determines whether to look for even or odd words in
1947  * the shuffle mask. This is based on the of the endianness of the target
1948  * machine.
1949  *   - Little Endian:
1950  *     - Use offset of 0 to check for odd elements
1951  *     - Use offset of 4 to check for even elements
1952  *   - Big Endian:
1953  *     - Use offset of 0 to check for even elements
1954  *     - Use offset of 4 to check for odd elements
1955  * A detailed description of the vector element ordering for little endian and
1956  * big endian can be found at
1957  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1958  * Targeting your applications - what little endian and big endian IBM XL C/C++
1959  * compiler differences mean to you
1960  *
1961  * The mask to the shuffle vector instruction specifies the indices of the
1962  * elements from the two input vectors to place in the result. The elements are
1963  * numbered in array-access order, starting with the first vector. These vectors
1964  * are always of type v16i8, thus each vector will contain 16 elements of size
1965  * 8. More info on the shuffle vector can be found in the
1966  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1967  * Language Reference.
1968  *
1969  * The RHSStartValue indicates whether the same input vectors are used (unary)
1970  * or two different input vectors are used, based on the following:
1971  *   - If the instruction uses the same vector for both inputs, the range of the
1972  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1973  *     be 0.
1974  *   - If the instruction has two different vectors then the range of the
1975  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1976  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1977  *     to 31 specify elements in the second vector).
1978  *
1979  * \param[in] N The shuffle vector SD Node to analyze
1980  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1981  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1982  * vector to the shuffle_vector instruction
1983  * \return true iff this shuffle vector represents an even or odd word merge
1984  */
1985 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1986                      unsigned RHSStartValue) {
1987   if (N->getValueType(0) != MVT::v16i8)
1988     return false;
1989 
1990   for (unsigned i = 0; i < 2; ++i)
1991     for (unsigned j = 0; j < 4; ++j)
1992       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1993                              i*RHSStartValue+j+IndexOffset) ||
1994           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1995                              i*RHSStartValue+j+IndexOffset+8))
1996         return false;
1997   return true;
1998 }
1999 
2000 /**
2001  * Determine if the specified shuffle mask is suitable for the vmrgew or
2002  * vmrgow instructions.
2003  *
2004  * \param[in] N The shuffle vector SD Node to analyze
2005  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
2006  * \param[in] ShuffleKind Identify the type of merge:
2007  *   - 0 = big-endian merge with two different inputs;
2008  *   - 1 = either-endian merge with two identical inputs;
2009  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
2010  *     little-endian merges).
2011  * \param[in] DAG The current SelectionDAG
2012  * \return true iff this shuffle mask
2013  */
2014 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
2015                               unsigned ShuffleKind, SelectionDAG &DAG) {
2016   if (DAG.getDataLayout().isLittleEndian()) {
2017     unsigned indexOffset = CheckEven ? 4 : 0;
2018     if (ShuffleKind == 1) // Unary
2019       return isVMerge(N, indexOffset, 0);
2020     else if (ShuffleKind == 2) // swapped
2021       return isVMerge(N, indexOffset, 16);
2022     else
2023       return false;
2024   }
2025   else {
2026     unsigned indexOffset = CheckEven ? 0 : 4;
2027     if (ShuffleKind == 1) // Unary
2028       return isVMerge(N, indexOffset, 0);
2029     else if (ShuffleKind == 0) // Normal
2030       return isVMerge(N, indexOffset, 16);
2031     else
2032       return false;
2033   }
2034   return false;
2035 }
2036 
2037 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
2038 /// amount, otherwise return -1.
2039 /// The ShuffleKind distinguishes between big-endian operations with two
2040 /// different inputs (0), either-endian operations with two identical inputs
2041 /// (1), and little-endian operations with two different inputs (2).  For the
2042 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
2043 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
2044                              SelectionDAG &DAG) {
2045   if (N->getValueType(0) != MVT::v16i8)
2046     return -1;
2047 
2048   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2049 
2050   // Find the first non-undef value in the shuffle mask.
2051   unsigned i;
2052   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
2053     /*search*/;
2054 
2055   if (i == 16) return -1;  // all undef.
2056 
2057   // Otherwise, check to see if the rest of the elements are consecutively
2058   // numbered from this value.
2059   unsigned ShiftAmt = SVOp->getMaskElt(i);
2060   if (ShiftAmt < i) return -1;
2061 
2062   ShiftAmt -= i;
2063   bool isLE = DAG.getDataLayout().isLittleEndian();
2064 
2065   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
2066     // Check the rest of the elements to see if they are consecutive.
2067     for (++i; i != 16; ++i)
2068       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2069         return -1;
2070   } else if (ShuffleKind == 1) {
2071     // Check the rest of the elements to see if they are consecutive.
2072     for (++i; i != 16; ++i)
2073       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
2074         return -1;
2075   } else
2076     return -1;
2077 
2078   if (isLE)
2079     ShiftAmt = 16 - ShiftAmt;
2080 
2081   return ShiftAmt;
2082 }
2083 
2084 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
2085 /// specifies a splat of a single element that is suitable for input to
2086 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
2087 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
2088   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
2089          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
2090 
2091   // The consecutive indices need to specify an element, not part of two
2092   // different elements.  So abandon ship early if this isn't the case.
2093   if (N->getMaskElt(0) % EltSize != 0)
2094     return false;
2095 
2096   // This is a splat operation if each element of the permute is the same, and
2097   // if the value doesn't reference the second vector.
2098   unsigned ElementBase = N->getMaskElt(0);
2099 
2100   // FIXME: Handle UNDEF elements too!
2101   if (ElementBase >= 16)
2102     return false;
2103 
2104   // Check that the indices are consecutive, in the case of a multi-byte element
2105   // splatted with a v16i8 mask.
2106   for (unsigned i = 1; i != EltSize; ++i)
2107     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
2108       return false;
2109 
2110   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2111     if (N->getMaskElt(i) < 0) continue;
2112     for (unsigned j = 0; j != EltSize; ++j)
2113       if (N->getMaskElt(i+j) != N->getMaskElt(j))
2114         return false;
2115   }
2116   return true;
2117 }
2118 
2119 /// Check that the mask is shuffling N byte elements. Within each N byte
2120 /// element of the mask, the indices could be either in increasing or
2121 /// decreasing order as long as they are consecutive.
2122 /// \param[in] N the shuffle vector SD Node to analyze
2123 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
2124 /// Word/DoubleWord/QuadWord).
2125 /// \param[in] StepLen the delta indices number among the N byte element, if
2126 /// the mask is in increasing/decreasing order then it is 1/-1.
2127 /// \return true iff the mask is shuffling N byte elements.
2128 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
2129                                    int StepLen) {
2130   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2131          "Unexpected element width.");
2132   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
2133 
2134   unsigned NumOfElem = 16 / Width;
2135   unsigned MaskVal[16]; //  Width is never greater than 16
2136   for (unsigned i = 0; i < NumOfElem; ++i) {
2137     MaskVal[0] = N->getMaskElt(i * Width);
2138     if ((StepLen == 1) && (MaskVal[0] % Width)) {
2139       return false;
2140     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2141       return false;
2142     }
2143 
2144     for (unsigned int j = 1; j < Width; ++j) {
2145       MaskVal[j] = N->getMaskElt(i * Width + j);
2146       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2147         return false;
2148       }
2149     }
2150   }
2151 
2152   return true;
2153 }
2154 
2155 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2156                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2157   if (!isNByteElemShuffleMask(N, 4, 1))
2158     return false;
2159 
2160   // Now we look at mask elements 0,4,8,12
2161   unsigned M0 = N->getMaskElt(0) / 4;
2162   unsigned M1 = N->getMaskElt(4) / 4;
2163   unsigned M2 = N->getMaskElt(8) / 4;
2164   unsigned M3 = N->getMaskElt(12) / 4;
2165   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2166   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2167 
2168   // Below, let H and L be arbitrary elements of the shuffle mask
2169   // where H is in the range [4,7] and L is in the range [0,3].
2170   // H, 1, 2, 3 or L, 5, 6, 7
2171   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2172       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2173     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2174     InsertAtByte = IsLE ? 12 : 0;
2175     Swap = M0 < 4;
2176     return true;
2177   }
2178   // 0, H, 2, 3 or 4, L, 6, 7
2179   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2180       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2181     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2182     InsertAtByte = IsLE ? 8 : 4;
2183     Swap = M1 < 4;
2184     return true;
2185   }
2186   // 0, 1, H, 3 or 4, 5, L, 7
2187   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2188       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2189     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2190     InsertAtByte = IsLE ? 4 : 8;
2191     Swap = M2 < 4;
2192     return true;
2193   }
2194   // 0, 1, 2, H or 4, 5, 6, L
2195   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2196       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2197     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2198     InsertAtByte = IsLE ? 0 : 12;
2199     Swap = M3 < 4;
2200     return true;
2201   }
2202 
2203   // If both vector operands for the shuffle are the same vector, the mask will
2204   // contain only elements from the first one and the second one will be undef.
2205   if (N->getOperand(1).isUndef()) {
2206     ShiftElts = 0;
2207     Swap = true;
2208     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2209     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2210       InsertAtByte = IsLE ? 12 : 0;
2211       return true;
2212     }
2213     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2214       InsertAtByte = IsLE ? 8 : 4;
2215       return true;
2216     }
2217     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2218       InsertAtByte = IsLE ? 4 : 8;
2219       return true;
2220     }
2221     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2222       InsertAtByte = IsLE ? 0 : 12;
2223       return true;
2224     }
2225   }
2226 
2227   return false;
2228 }
2229 
2230 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2231                                bool &Swap, bool IsLE) {
2232   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2233   // Ensure each byte index of the word is consecutive.
2234   if (!isNByteElemShuffleMask(N, 4, 1))
2235     return false;
2236 
2237   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2238   unsigned M0 = N->getMaskElt(0) / 4;
2239   unsigned M1 = N->getMaskElt(4) / 4;
2240   unsigned M2 = N->getMaskElt(8) / 4;
2241   unsigned M3 = N->getMaskElt(12) / 4;
2242 
2243   // If both vector operands for the shuffle are the same vector, the mask will
2244   // contain only elements from the first one and the second one will be undef.
2245   if (N->getOperand(1).isUndef()) {
2246     assert(M0 < 4 && "Indexing into an undef vector?");
2247     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2248       return false;
2249 
2250     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2251     Swap = false;
2252     return true;
2253   }
2254 
2255   // Ensure each word index of the ShuffleVector Mask is consecutive.
2256   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2257     return false;
2258 
2259   if (IsLE) {
2260     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2261       // Input vectors don't need to be swapped if the leading element
2262       // of the result is one of the 3 left elements of the second vector
2263       // (or if there is no shift to be done at all).
2264       Swap = false;
2265       ShiftElts = (8 - M0) % 8;
2266     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2267       // Input vectors need to be swapped if the leading element
2268       // of the result is one of the 3 left elements of the first vector
2269       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2270       Swap = true;
2271       ShiftElts = (4 - M0) % 4;
2272     }
2273 
2274     return true;
2275   } else {                                          // BE
2276     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2277       // Input vectors don't need to be swapped if the leading element
2278       // of the result is one of the 4 elements of the first vector.
2279       Swap = false;
2280       ShiftElts = M0;
2281     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2282       // Input vectors need to be swapped if the leading element
2283       // of the result is one of the 4 elements of the right vector.
2284       Swap = true;
2285       ShiftElts = M0 - 4;
2286     }
2287 
2288     return true;
2289   }
2290 }
2291 
2292 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2293   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2294 
2295   if (!isNByteElemShuffleMask(N, Width, -1))
2296     return false;
2297 
2298   for (int i = 0; i < 16; i += Width)
2299     if (N->getMaskElt(i) != i + Width - 1)
2300       return false;
2301 
2302   return true;
2303 }
2304 
2305 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2306   return isXXBRShuffleMaskHelper(N, 2);
2307 }
2308 
2309 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2310   return isXXBRShuffleMaskHelper(N, 4);
2311 }
2312 
2313 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2314   return isXXBRShuffleMaskHelper(N, 8);
2315 }
2316 
2317 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2318   return isXXBRShuffleMaskHelper(N, 16);
2319 }
2320 
2321 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2322 /// if the inputs to the instruction should be swapped and set \p DM to the
2323 /// value for the immediate.
2324 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2325 /// AND element 0 of the result comes from the first input (LE) or second input
2326 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2327 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2328 /// mask.
2329 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2330                                bool &Swap, bool IsLE) {
2331   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2332 
2333   // Ensure each byte index of the double word is consecutive.
2334   if (!isNByteElemShuffleMask(N, 8, 1))
2335     return false;
2336 
2337   unsigned M0 = N->getMaskElt(0) / 8;
2338   unsigned M1 = N->getMaskElt(8) / 8;
2339   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2340 
2341   // If both vector operands for the shuffle are the same vector, the mask will
2342   // contain only elements from the first one and the second one will be undef.
2343   if (N->getOperand(1).isUndef()) {
2344     if ((M0 | M1) < 2) {
2345       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2346       Swap = false;
2347       return true;
2348     } else
2349       return false;
2350   }
2351 
2352   if (IsLE) {
2353     if (M0 > 1 && M1 < 2) {
2354       Swap = false;
2355     } else if (M0 < 2 && M1 > 1) {
2356       M0 = (M0 + 2) % 4;
2357       M1 = (M1 + 2) % 4;
2358       Swap = true;
2359     } else
2360       return false;
2361 
2362     // Note: if control flow comes here that means Swap is already set above
2363     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2364     return true;
2365   } else { // BE
2366     if (M0 < 2 && M1 > 1) {
2367       Swap = false;
2368     } else if (M0 > 1 && M1 < 2) {
2369       M0 = (M0 + 2) % 4;
2370       M1 = (M1 + 2) % 4;
2371       Swap = true;
2372     } else
2373       return false;
2374 
2375     // Note: if control flow comes here that means Swap is already set above
2376     DM = (M0 << 1) + (M1 & 1);
2377     return true;
2378   }
2379 }
2380 
2381 
2382 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2383 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2384 /// elements are counted from the left of the vector register).
2385 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2386                                          SelectionDAG &DAG) {
2387   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2388   assert(isSplatShuffleMask(SVOp, EltSize));
2389   if (DAG.getDataLayout().isLittleEndian())
2390     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2391   else
2392     return SVOp->getMaskElt(0) / EltSize;
2393 }
2394 
2395 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2396 /// by using a vspltis[bhw] instruction of the specified element size, return
2397 /// the constant being splatted.  The ByteSize field indicates the number of
2398 /// bytes of each element [124] -> [bhw].
2399 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2400   SDValue OpVal(nullptr, 0);
2401 
2402   // If ByteSize of the splat is bigger than the element size of the
2403   // build_vector, then we have a case where we are checking for a splat where
2404   // multiple elements of the buildvector are folded together into a single
2405   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2406   unsigned EltSize = 16/N->getNumOperands();
2407   if (EltSize < ByteSize) {
2408     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2409     SDValue UniquedVals[4];
2410     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2411 
2412     // See if all of the elements in the buildvector agree across.
2413     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2414       if (N->getOperand(i).isUndef()) continue;
2415       // If the element isn't a constant, bail fully out.
2416       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2417 
2418       if (!UniquedVals[i&(Multiple-1)].getNode())
2419         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2420       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2421         return SDValue();  // no match.
2422     }
2423 
2424     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2425     // either constant or undef values that are identical for each chunk.  See
2426     // if these chunks can form into a larger vspltis*.
2427 
2428     // Check to see if all of the leading entries are either 0 or -1.  If
2429     // neither, then this won't fit into the immediate field.
2430     bool LeadingZero = true;
2431     bool LeadingOnes = true;
2432     for (unsigned i = 0; i != Multiple-1; ++i) {
2433       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2434 
2435       LeadingZero &= isNullConstant(UniquedVals[i]);
2436       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2437     }
2438     // Finally, check the least significant entry.
2439     if (LeadingZero) {
2440       if (!UniquedVals[Multiple-1].getNode())
2441         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2442       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2443       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2444         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2445     }
2446     if (LeadingOnes) {
2447       if (!UniquedVals[Multiple-1].getNode())
2448         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2449       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2450       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2451         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2452     }
2453 
2454     return SDValue();
2455   }
2456 
2457   // Check to see if this buildvec has a single non-undef value in its elements.
2458   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2459     if (N->getOperand(i).isUndef()) continue;
2460     if (!OpVal.getNode())
2461       OpVal = N->getOperand(i);
2462     else if (OpVal != N->getOperand(i))
2463       return SDValue();
2464   }
2465 
2466   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2467 
2468   unsigned ValSizeInBytes = EltSize;
2469   uint64_t Value = 0;
2470   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2471     Value = CN->getZExtValue();
2472   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2473     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2474     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2475   }
2476 
2477   // If the splat value is larger than the element value, then we can never do
2478   // this splat.  The only case that we could fit the replicated bits into our
2479   // immediate field for would be zero, and we prefer to use vxor for it.
2480   if (ValSizeInBytes < ByteSize) return SDValue();
2481 
2482   // If the element value is larger than the splat value, check if it consists
2483   // of a repeated bit pattern of size ByteSize.
2484   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2485     return SDValue();
2486 
2487   // Properly sign extend the value.
2488   int MaskVal = SignExtend32(Value, ByteSize * 8);
2489 
2490   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2491   if (MaskVal == 0) return SDValue();
2492 
2493   // Finally, if this value fits in a 5 bit sext field, return it
2494   if (SignExtend32<5>(MaskVal) == MaskVal)
2495     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2496   return SDValue();
2497 }
2498 
2499 //===----------------------------------------------------------------------===//
2500 //  Addressing Mode Selection
2501 //===----------------------------------------------------------------------===//
2502 
2503 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2504 /// or 64-bit immediate, and if the value can be accurately represented as a
2505 /// sign extension from a 16-bit value.  If so, this returns true and the
2506 /// immediate.
2507 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2508   if (!isa<ConstantSDNode>(N))
2509     return false;
2510 
2511   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2512   if (N->getValueType(0) == MVT::i32)
2513     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2514   else
2515     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2516 }
2517 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2518   return isIntS16Immediate(Op.getNode(), Imm);
2519 }
2520 
2521 /// Used when computing address flags for selecting loads and stores.
2522 /// If we have an OR, check if the LHS and RHS are provably disjoint.
2523 /// An OR of two provably disjoint values is equivalent to an ADD.
2524 /// Most PPC load/store instructions compute the effective address as a sum,
2525 /// so doing this conversion is useful.
2526 static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N) {
2527   if (N.getOpcode() != ISD::OR)
2528     return false;
2529   KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2530   if (!LHSKnown.Zero.getBoolValue())
2531     return false;
2532   KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2533   return (~(LHSKnown.Zero | RHSKnown.Zero) == 0);
2534 }
2535 
2536 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2537 /// be represented as an indexed [r+r] operation.
2538 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2539                                                SDValue &Index,
2540                                                SelectionDAG &DAG) const {
2541   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2542       UI != E; ++UI) {
2543     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2544       if (Memop->getMemoryVT() == MVT::f64) {
2545           Base = N.getOperand(0);
2546           Index = N.getOperand(1);
2547           return true;
2548       }
2549     }
2550   }
2551   return false;
2552 }
2553 
2554 /// isIntS34Immediate - This method tests if value of node given can be
2555 /// accurately represented as a sign extension from a 34-bit value.  If so,
2556 /// this returns true and the immediate.
2557 bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
2558   if (!isa<ConstantSDNode>(N))
2559     return false;
2560 
2561   Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2562   return isInt<34>(Imm);
2563 }
2564 bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
2565   return isIntS34Immediate(Op.getNode(), Imm);
2566 }
2567 
2568 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2569 /// can be represented as an indexed [r+r] operation.  Returns false if it
2570 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2571 /// non-zero and N can be represented by a base register plus a signed 16-bit
2572 /// displacement, make a more precise judgement by checking (displacement % \p
2573 /// EncodingAlignment).
2574 bool PPCTargetLowering::SelectAddressRegReg(
2575     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2576     MaybeAlign EncodingAlignment) const {
2577   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2578   // a [pc+imm].
2579   if (SelectAddressPCRel(N, Base))
2580     return false;
2581 
2582   int16_t Imm = 0;
2583   if (N.getOpcode() == ISD::ADD) {
2584     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2585     // SPE load/store can only handle 8-bit offsets.
2586     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2587         return true;
2588     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2589         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2590       return false; // r+i
2591     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2592       return false;    // r+i
2593 
2594     Base = N.getOperand(0);
2595     Index = N.getOperand(1);
2596     return true;
2597   } else if (N.getOpcode() == ISD::OR) {
2598     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2599         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2600       return false; // r+i can fold it if we can.
2601 
2602     // If this is an or of disjoint bitfields, we can codegen this as an add
2603     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2604     // disjoint.
2605     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2606 
2607     if (LHSKnown.Zero.getBoolValue()) {
2608       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2609       // If all of the bits are known zero on the LHS or RHS, the add won't
2610       // carry.
2611       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2612         Base = N.getOperand(0);
2613         Index = N.getOperand(1);
2614         return true;
2615       }
2616     }
2617   }
2618 
2619   return false;
2620 }
2621 
2622 // If we happen to be doing an i64 load or store into a stack slot that has
2623 // less than a 4-byte alignment, then the frame-index elimination may need to
2624 // use an indexed load or store instruction (because the offset may not be a
2625 // multiple of 4). The extra register needed to hold the offset comes from the
2626 // register scavenger, and it is possible that the scavenger will need to use
2627 // an emergency spill slot. As a result, we need to make sure that a spill slot
2628 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2629 // stack slot.
2630 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2631   // FIXME: This does not handle the LWA case.
2632   if (VT != MVT::i64)
2633     return;
2634 
2635   // NOTE: We'll exclude negative FIs here, which come from argument
2636   // lowering, because there are no known test cases triggering this problem
2637   // using packed structures (or similar). We can remove this exclusion if
2638   // we find such a test case. The reason why this is so test-case driven is
2639   // because this entire 'fixup' is only to prevent crashes (from the
2640   // register scavenger) on not-really-valid inputs. For example, if we have:
2641   //   %a = alloca i1
2642   //   %b = bitcast i1* %a to i64*
2643   //   store i64* a, i64 b
2644   // then the store should really be marked as 'align 1', but is not. If it
2645   // were marked as 'align 1' then the indexed form would have been
2646   // instruction-selected initially, and the problem this 'fixup' is preventing
2647   // won't happen regardless.
2648   if (FrameIdx < 0)
2649     return;
2650 
2651   MachineFunction &MF = DAG.getMachineFunction();
2652   MachineFrameInfo &MFI = MF.getFrameInfo();
2653 
2654   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2655     return;
2656 
2657   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2658   FuncInfo->setHasNonRISpills();
2659 }
2660 
2661 /// Returns true if the address N can be represented by a base register plus
2662 /// a signed 16-bit displacement [r+imm], and if it is not better
2663 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2664 /// displacements that are multiples of that value.
2665 bool PPCTargetLowering::SelectAddressRegImm(
2666     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2667     MaybeAlign EncodingAlignment) const {
2668   // FIXME dl should come from parent load or store, not from address
2669   SDLoc dl(N);
2670 
2671   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2672   // a [pc+imm].
2673   if (SelectAddressPCRel(N, Base))
2674     return false;
2675 
2676   // If this can be more profitably realized as r+r, fail.
2677   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2678     return false;
2679 
2680   if (N.getOpcode() == ISD::ADD) {
2681     int16_t imm = 0;
2682     if (isIntS16Immediate(N.getOperand(1), imm) &&
2683         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2684       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2685       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2686         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2687         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2688       } else {
2689         Base = N.getOperand(0);
2690       }
2691       return true; // [r+i]
2692     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2693       // Match LOAD (ADD (X, Lo(G))).
2694       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2695              && "Cannot handle constant offsets yet!");
2696       Disp = N.getOperand(1).getOperand(0);  // The global address.
2697       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2698              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2699              Disp.getOpcode() == ISD::TargetConstantPool ||
2700              Disp.getOpcode() == ISD::TargetJumpTable);
2701       Base = N.getOperand(0);
2702       return true;  // [&g+r]
2703     }
2704   } else if (N.getOpcode() == ISD::OR) {
2705     int16_t imm = 0;
2706     if (isIntS16Immediate(N.getOperand(1), imm) &&
2707         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2708       // If this is an or of disjoint bitfields, we can codegen this as an add
2709       // (for better address arithmetic) if the LHS and RHS of the OR are
2710       // provably disjoint.
2711       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2712 
2713       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2714         // If all of the bits are known zero on the LHS or RHS, the add won't
2715         // carry.
2716         if (FrameIndexSDNode *FI =
2717               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2718           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2719           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2720         } else {
2721           Base = N.getOperand(0);
2722         }
2723         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2724         return true;
2725       }
2726     }
2727   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2728     // Loading from a constant address.
2729 
2730     // If this address fits entirely in a 16-bit sext immediate field, codegen
2731     // this as "d, 0"
2732     int16_t Imm;
2733     if (isIntS16Immediate(CN, Imm) &&
2734         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2735       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2736       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2737                              CN->getValueType(0));
2738       return true;
2739     }
2740 
2741     // Handle 32-bit sext immediates with LIS + addr mode.
2742     if ((CN->getValueType(0) == MVT::i32 ||
2743          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2744         (!EncodingAlignment ||
2745          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2746       int Addr = (int)CN->getZExtValue();
2747 
2748       // Otherwise, break this down into an LIS + disp.
2749       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2750 
2751       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2752                                    MVT::i32);
2753       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2754       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2755       return true;
2756     }
2757   }
2758 
2759   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2760   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2761     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2762     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2763   } else
2764     Base = N;
2765   return true;      // [r+0]
2766 }
2767 
2768 /// Similar to the 16-bit case but for instructions that take a 34-bit
2769 /// displacement field (prefixed loads/stores).
2770 bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp,
2771                                               SDValue &Base,
2772                                               SelectionDAG &DAG) const {
2773   // Only on 64-bit targets.
2774   if (N.getValueType() != MVT::i64)
2775     return false;
2776 
2777   SDLoc dl(N);
2778   int64_t Imm = 0;
2779 
2780   if (N.getOpcode() == ISD::ADD) {
2781     if (!isIntS34Immediate(N.getOperand(1), Imm))
2782       return false;
2783     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2784     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2785       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2786     else
2787       Base = N.getOperand(0);
2788     return true;
2789   }
2790 
2791   if (N.getOpcode() == ISD::OR) {
2792     if (!isIntS34Immediate(N.getOperand(1), Imm))
2793       return false;
2794     // If this is an or of disjoint bitfields, we can codegen this as an add
2795     // (for better address arithmetic) if the LHS and RHS of the OR are
2796     // provably disjoint.
2797     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2798     if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL)
2799       return false;
2800     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2801       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2802     else
2803       Base = N.getOperand(0);
2804     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2805     return true;
2806   }
2807 
2808   if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const.
2809     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2810     Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
2811     return true;
2812   }
2813 
2814   return false;
2815 }
2816 
2817 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2818 /// represented as an indexed [r+r] operation.
2819 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2820                                                 SDValue &Index,
2821                                                 SelectionDAG &DAG) const {
2822   // Check to see if we can easily represent this as an [r+r] address.  This
2823   // will fail if it thinks that the address is more profitably represented as
2824   // reg+imm, e.g. where imm = 0.
2825   if (SelectAddressRegReg(N, Base, Index, DAG))
2826     return true;
2827 
2828   // If the address is the result of an add, we will utilize the fact that the
2829   // address calculation includes an implicit add.  However, we can reduce
2830   // register pressure if we do not materialize a constant just for use as the
2831   // index register.  We only get rid of the add if it is not an add of a
2832   // value and a 16-bit signed constant and both have a single use.
2833   int16_t imm = 0;
2834   if (N.getOpcode() == ISD::ADD &&
2835       (!isIntS16Immediate(N.getOperand(1), imm) ||
2836        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2837     Base = N.getOperand(0);
2838     Index = N.getOperand(1);
2839     return true;
2840   }
2841 
2842   // Otherwise, do it the hard way, using R0 as the base register.
2843   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2844                          N.getValueType());
2845   Index = N;
2846   return true;
2847 }
2848 
2849 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2850   Ty *PCRelCand = dyn_cast<Ty>(N);
2851   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2852 }
2853 
2854 /// Returns true if this address is a PC Relative address.
2855 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2856 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2857 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2858   // This is a materialize PC Relative node. Always select this as PC Relative.
2859   Base = N;
2860   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2861     return true;
2862   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2863       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2864       isValidPCRelNode<JumpTableSDNode>(N) ||
2865       isValidPCRelNode<BlockAddressSDNode>(N))
2866     return true;
2867   return false;
2868 }
2869 
2870 /// Returns true if we should use a direct load into vector instruction
2871 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2872 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2873 
2874   // If there are any other uses other than scalar to vector, then we should
2875   // keep it as a scalar load -> direct move pattern to prevent multiple
2876   // loads.
2877   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2878   if (!LD)
2879     return false;
2880 
2881   EVT MemVT = LD->getMemoryVT();
2882   if (!MemVT.isSimple())
2883     return false;
2884   switch(MemVT.getSimpleVT().SimpleTy) {
2885   case MVT::i64:
2886     break;
2887   case MVT::i32:
2888     if (!ST.hasP8Vector())
2889       return false;
2890     break;
2891   case MVT::i16:
2892   case MVT::i8:
2893     if (!ST.hasP9Vector())
2894       return false;
2895     break;
2896   default:
2897     return false;
2898   }
2899 
2900   SDValue LoadedVal(N, 0);
2901   if (!LoadedVal.hasOneUse())
2902     return false;
2903 
2904   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2905        UI != UE; ++UI)
2906     if (UI.getUse().get().getResNo() == 0 &&
2907         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2908         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2909       return false;
2910 
2911   return true;
2912 }
2913 
2914 /// getPreIndexedAddressParts - returns true by value, base pointer and
2915 /// offset pointer and addressing mode by reference if the node's address
2916 /// can be legally represented as pre-indexed load / store address.
2917 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2918                                                   SDValue &Offset,
2919                                                   ISD::MemIndexedMode &AM,
2920                                                   SelectionDAG &DAG) const {
2921   if (DisablePPCPreinc) return false;
2922 
2923   bool isLoad = true;
2924   SDValue Ptr;
2925   EVT VT;
2926   unsigned Alignment;
2927   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2928     Ptr = LD->getBasePtr();
2929     VT = LD->getMemoryVT();
2930     Alignment = LD->getAlignment();
2931   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2932     Ptr = ST->getBasePtr();
2933     VT  = ST->getMemoryVT();
2934     Alignment = ST->getAlignment();
2935     isLoad = false;
2936   } else
2937     return false;
2938 
2939   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2940   // instructions because we can fold these into a more efficient instruction
2941   // instead, (such as LXSD).
2942   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2943     return false;
2944   }
2945 
2946   // PowerPC doesn't have preinc load/store instructions for vectors
2947   if (VT.isVector())
2948     return false;
2949 
2950   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2951     // Common code will reject creating a pre-inc form if the base pointer
2952     // is a frame index, or if N is a store and the base pointer is either
2953     // the same as or a predecessor of the value being stored.  Check for
2954     // those situations here, and try with swapped Base/Offset instead.
2955     bool Swap = false;
2956 
2957     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2958       Swap = true;
2959     else if (!isLoad) {
2960       SDValue Val = cast<StoreSDNode>(N)->getValue();
2961       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2962         Swap = true;
2963     }
2964 
2965     if (Swap)
2966       std::swap(Base, Offset);
2967 
2968     AM = ISD::PRE_INC;
2969     return true;
2970   }
2971 
2972   // LDU/STU can only handle immediates that are a multiple of 4.
2973   if (VT != MVT::i64) {
2974     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2975       return false;
2976   } else {
2977     // LDU/STU need an address with at least 4-byte alignment.
2978     if (Alignment < 4)
2979       return false;
2980 
2981     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2982       return false;
2983   }
2984 
2985   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2986     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2987     // sext i32 to i64 when addr mode is r+i.
2988     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2989         LD->getExtensionType() == ISD::SEXTLOAD &&
2990         isa<ConstantSDNode>(Offset))
2991       return false;
2992   }
2993 
2994   AM = ISD::PRE_INC;
2995   return true;
2996 }
2997 
2998 //===----------------------------------------------------------------------===//
2999 //  LowerOperation implementation
3000 //===----------------------------------------------------------------------===//
3001 
3002 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
3003 /// and LoOpFlags to the target MO flags.
3004 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
3005                                unsigned &HiOpFlags, unsigned &LoOpFlags,
3006                                const GlobalValue *GV = nullptr) {
3007   HiOpFlags = PPCII::MO_HA;
3008   LoOpFlags = PPCII::MO_LO;
3009 
3010   // Don't use the pic base if not in PIC relocation model.
3011   if (IsPIC) {
3012     HiOpFlags |= PPCII::MO_PIC_FLAG;
3013     LoOpFlags |= PPCII::MO_PIC_FLAG;
3014   }
3015 }
3016 
3017 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
3018                              SelectionDAG &DAG) {
3019   SDLoc DL(HiPart);
3020   EVT PtrVT = HiPart.getValueType();
3021   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
3022 
3023   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
3024   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
3025 
3026   // With PIC, the first instruction is actually "GR+hi(&G)".
3027   if (isPIC)
3028     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
3029                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
3030 
3031   // Generate non-pic code that has direct accesses to the constant pool.
3032   // The address of the global is just (hi(&g)+lo(&g)).
3033   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
3034 }
3035 
3036 static void setUsesTOCBasePtr(MachineFunction &MF) {
3037   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3038   FuncInfo->setUsesTOCBasePtr();
3039 }
3040 
3041 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
3042   setUsesTOCBasePtr(DAG.getMachineFunction());
3043 }
3044 
3045 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
3046                                        SDValue GA) const {
3047   const bool Is64Bit = Subtarget.isPPC64();
3048   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
3049   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
3050                         : Subtarget.isAIXABI()
3051                               ? DAG.getRegister(PPC::R2, VT)
3052                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
3053   SDValue Ops[] = { GA, Reg };
3054   return DAG.getMemIntrinsicNode(
3055       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
3056       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
3057       MachineMemOperand::MOLoad);
3058 }
3059 
3060 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
3061                                              SelectionDAG &DAG) const {
3062   EVT PtrVT = Op.getValueType();
3063   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3064   const Constant *C = CP->getConstVal();
3065 
3066   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3067   // The actual address of the GlobalValue is stored in the TOC.
3068   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3069     if (Subtarget.isUsingPCRelativeCalls()) {
3070       SDLoc DL(CP);
3071       EVT Ty = getPointerTy(DAG.getDataLayout());
3072       SDValue ConstPool = DAG.getTargetConstantPool(
3073           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
3074       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
3075     }
3076     setUsesTOCBasePtr(DAG);
3077     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
3078     return getTOCEntry(DAG, SDLoc(CP), GA);
3079   }
3080 
3081   unsigned MOHiFlag, MOLoFlag;
3082   bool IsPIC = isPositionIndependent();
3083   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3084 
3085   if (IsPIC && Subtarget.isSVR4ABI()) {
3086     SDValue GA =
3087         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
3088     return getTOCEntry(DAG, SDLoc(CP), GA);
3089   }
3090 
3091   SDValue CPIHi =
3092       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
3093   SDValue CPILo =
3094       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
3095   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
3096 }
3097 
3098 // For 64-bit PowerPC, prefer the more compact relative encodings.
3099 // This trades 32 bits per jump table entry for one or two instructions
3100 // on the jump site.
3101 unsigned PPCTargetLowering::getJumpTableEncoding() const {
3102   if (isJumpTableRelative())
3103     return MachineJumpTableInfo::EK_LabelDifference32;
3104 
3105   return TargetLowering::getJumpTableEncoding();
3106 }
3107 
3108 bool PPCTargetLowering::isJumpTableRelative() const {
3109   if (UseAbsoluteJumpTables)
3110     return false;
3111   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
3112     return true;
3113   return TargetLowering::isJumpTableRelative();
3114 }
3115 
3116 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3117                                                     SelectionDAG &DAG) const {
3118   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3119     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3120 
3121   switch (getTargetMachine().getCodeModel()) {
3122   case CodeModel::Small:
3123   case CodeModel::Medium:
3124     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3125   default:
3126     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
3127                        getPointerTy(DAG.getDataLayout()));
3128   }
3129 }
3130 
3131 const MCExpr *
3132 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3133                                                 unsigned JTI,
3134                                                 MCContext &Ctx) const {
3135   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3136     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3137 
3138   switch (getTargetMachine().getCodeModel()) {
3139   case CodeModel::Small:
3140   case CodeModel::Medium:
3141     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3142   default:
3143     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
3144   }
3145 }
3146 
3147 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
3148   EVT PtrVT = Op.getValueType();
3149   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3150 
3151   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3152   if (Subtarget.isUsingPCRelativeCalls()) {
3153     SDLoc DL(JT);
3154     EVT Ty = getPointerTy(DAG.getDataLayout());
3155     SDValue GA =
3156         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
3157     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3158     return MatAddr;
3159   }
3160 
3161   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3162   // The actual address of the GlobalValue is stored in the TOC.
3163   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3164     setUsesTOCBasePtr(DAG);
3165     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3166     return getTOCEntry(DAG, SDLoc(JT), GA);
3167   }
3168 
3169   unsigned MOHiFlag, MOLoFlag;
3170   bool IsPIC = isPositionIndependent();
3171   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3172 
3173   if (IsPIC && Subtarget.isSVR4ABI()) {
3174     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3175                                         PPCII::MO_PIC_FLAG);
3176     return getTOCEntry(DAG, SDLoc(GA), GA);
3177   }
3178 
3179   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3180   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3181   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3182 }
3183 
3184 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3185                                              SelectionDAG &DAG) const {
3186   EVT PtrVT = Op.getValueType();
3187   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3188   const BlockAddress *BA = BASDN->getBlockAddress();
3189 
3190   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3191   if (Subtarget.isUsingPCRelativeCalls()) {
3192     SDLoc DL(BASDN);
3193     EVT Ty = getPointerTy(DAG.getDataLayout());
3194     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3195                                            PPCII::MO_PCREL_FLAG);
3196     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3197     return MatAddr;
3198   }
3199 
3200   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3201   // The actual BlockAddress is stored in the TOC.
3202   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3203     setUsesTOCBasePtr(DAG);
3204     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3205     return getTOCEntry(DAG, SDLoc(BASDN), GA);
3206   }
3207 
3208   // 32-bit position-independent ELF stores the BlockAddress in the .got.
3209   if (Subtarget.is32BitELFABI() && isPositionIndependent())
3210     return getTOCEntry(
3211         DAG, SDLoc(BASDN),
3212         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3213 
3214   unsigned MOHiFlag, MOLoFlag;
3215   bool IsPIC = isPositionIndependent();
3216   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3217   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3218   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3219   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3220 }
3221 
3222 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3223                                               SelectionDAG &DAG) const {
3224   if (Subtarget.isAIXABI())
3225     return LowerGlobalTLSAddressAIX(Op, DAG);
3226 
3227   return LowerGlobalTLSAddressLinux(Op, DAG);
3228 }
3229 
3230 SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
3231                                                     SelectionDAG &DAG) const {
3232   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3233 
3234   if (DAG.getTarget().useEmulatedTLS())
3235     report_fatal_error("Emulated TLS is not yet supported on AIX");
3236 
3237   SDLoc dl(GA);
3238   const GlobalValue *GV = GA->getGlobal();
3239   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3240 
3241   // The general-dynamic model is the only access model supported for now, so
3242   // all the GlobalTLSAddress nodes are lowered with this model.
3243   // We need to generate two TOC entries, one for the variable offset, one for
3244   // the region handle. The global address for the TOC entry of the region
3245   // handle is created with the MO_TLSGDM_FLAG flag and the global address
3246   // for the TOC entry of the variable offset is created with MO_TLSGD_FLAG.
3247   SDValue VariableOffsetTGA =
3248       DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGD_FLAG);
3249   SDValue RegionHandleTGA =
3250       DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGDM_FLAG);
3251   SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3252   SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
3253   return DAG.getNode(PPCISD::TLSGD_AIX, dl, PtrVT, VariableOffset,
3254                      RegionHandle);
3255 }
3256 
3257 SDValue PPCTargetLowering::LowerGlobalTLSAddressLinux(SDValue Op,
3258                                                       SelectionDAG &DAG) const {
3259   // FIXME: TLS addresses currently use medium model code sequences,
3260   // which is the most useful form.  Eventually support for small and
3261   // large models could be added if users need it, at the cost of
3262   // additional complexity.
3263   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3264   if (DAG.getTarget().useEmulatedTLS())
3265     return LowerToTLSEmulatedModel(GA, DAG);
3266 
3267   SDLoc dl(GA);
3268   const GlobalValue *GV = GA->getGlobal();
3269   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3270   bool is64bit = Subtarget.isPPC64();
3271   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3272   PICLevel::Level picLevel = M->getPICLevel();
3273 
3274   const TargetMachine &TM = getTargetMachine();
3275   TLSModel::Model Model = TM.getTLSModel(GV);
3276 
3277   if (Model == TLSModel::LocalExec) {
3278     if (Subtarget.isUsingPCRelativeCalls()) {
3279       SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3280       SDValue TGA = DAG.getTargetGlobalAddress(
3281           GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG));
3282       SDValue MatAddr =
3283           DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
3284       return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
3285     }
3286 
3287     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3288                                                PPCII::MO_TPREL_HA);
3289     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3290                                                PPCII::MO_TPREL_LO);
3291     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3292                              : DAG.getRegister(PPC::R2, MVT::i32);
3293 
3294     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3295     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3296   }
3297 
3298   if (Model == TLSModel::InitialExec) {
3299     bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3300     SDValue TGA = DAG.getTargetGlobalAddress(
3301         GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3302     SDValue TGATLS = DAG.getTargetGlobalAddress(
3303         GV, dl, PtrVT, 0,
3304         IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
3305     SDValue TPOffset;
3306     if (IsPCRel) {
3307       SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3308       TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3309                              MachinePointerInfo());
3310     } else {
3311       SDValue GOTPtr;
3312       if (is64bit) {
3313         setUsesTOCBasePtr(DAG);
3314         SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3315         GOTPtr =
3316             DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3317       } else {
3318         if (!TM.isPositionIndependent())
3319           GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3320         else if (picLevel == PICLevel::SmallPIC)
3321           GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3322         else
3323           GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3324       }
3325       TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3326     }
3327     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3328   }
3329 
3330   if (Model == TLSModel::GeneralDynamic) {
3331     if (Subtarget.isUsingPCRelativeCalls()) {
3332       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3333                                                PPCII::MO_GOT_TLSGD_PCREL_FLAG);
3334       return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3335     }
3336 
3337     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3338     SDValue GOTPtr;
3339     if (is64bit) {
3340       setUsesTOCBasePtr(DAG);
3341       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3342       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3343                                    GOTReg, TGA);
3344     } else {
3345       if (picLevel == PICLevel::SmallPIC)
3346         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3347       else
3348         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3349     }
3350     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3351                        GOTPtr, TGA, TGA);
3352   }
3353 
3354   if (Model == TLSModel::LocalDynamic) {
3355     if (Subtarget.isUsingPCRelativeCalls()) {
3356       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3357                                                PPCII::MO_GOT_TLSLD_PCREL_FLAG);
3358       SDValue MatPCRel =
3359           DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3360       return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA);
3361     }
3362 
3363     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3364     SDValue GOTPtr;
3365     if (is64bit) {
3366       setUsesTOCBasePtr(DAG);
3367       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3368       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3369                            GOTReg, TGA);
3370     } else {
3371       if (picLevel == PICLevel::SmallPIC)
3372         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3373       else
3374         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3375     }
3376     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3377                                   PtrVT, GOTPtr, TGA, TGA);
3378     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3379                                       PtrVT, TLSAddr, TGA);
3380     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3381   }
3382 
3383   llvm_unreachable("Unknown TLS model!");
3384 }
3385 
3386 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3387                                               SelectionDAG &DAG) const {
3388   EVT PtrVT = Op.getValueType();
3389   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3390   SDLoc DL(GSDN);
3391   const GlobalValue *GV = GSDN->getGlobal();
3392 
3393   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3394   // The actual address of the GlobalValue is stored in the TOC.
3395   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3396     if (Subtarget.isUsingPCRelativeCalls()) {
3397       EVT Ty = getPointerTy(DAG.getDataLayout());
3398       if (isAccessedAsGotIndirect(Op)) {
3399         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3400                                                 PPCII::MO_PCREL_FLAG |
3401                                                     PPCII::MO_GOT_FLAG);
3402         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3403         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3404                                    MachinePointerInfo());
3405         return Load;
3406       } else {
3407         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3408                                                 PPCII::MO_PCREL_FLAG);
3409         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3410       }
3411     }
3412     setUsesTOCBasePtr(DAG);
3413     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3414     return getTOCEntry(DAG, DL, GA);
3415   }
3416 
3417   unsigned MOHiFlag, MOLoFlag;
3418   bool IsPIC = isPositionIndependent();
3419   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3420 
3421   if (IsPIC && Subtarget.isSVR4ABI()) {
3422     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3423                                             GSDN->getOffset(),
3424                                             PPCII::MO_PIC_FLAG);
3425     return getTOCEntry(DAG, DL, GA);
3426   }
3427 
3428   SDValue GAHi =
3429     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3430   SDValue GALo =
3431     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3432 
3433   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3434 }
3435 
3436 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3437   bool IsStrict = Op->isStrictFPOpcode();
3438   ISD::CondCode CC =
3439       cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
3440   SDValue LHS = Op.getOperand(IsStrict ? 1 : 0);
3441   SDValue RHS = Op.getOperand(IsStrict ? 2 : 1);
3442   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
3443   EVT LHSVT = LHS.getValueType();
3444   SDLoc dl(Op);
3445 
3446   // Soften the setcc with libcall if it is fp128.
3447   if (LHSVT == MVT::f128) {
3448     assert(!Subtarget.hasP9Vector() &&
3449            "SETCC for f128 is already legal under Power9!");
3450     softenSetCCOperands(DAG, LHSVT, LHS, RHS, CC, dl, LHS, RHS, Chain,
3451                         Op->getOpcode() == ISD::STRICT_FSETCCS);
3452     if (RHS.getNode())
3453       LHS = DAG.getNode(ISD::SETCC, dl, Op.getValueType(), LHS, RHS,
3454                         DAG.getCondCode(CC));
3455     if (IsStrict)
3456       return DAG.getMergeValues({LHS, Chain}, dl);
3457     return LHS;
3458   }
3459 
3460   assert(!IsStrict && "Don't know how to handle STRICT_FSETCC!");
3461 
3462   if (Op.getValueType() == MVT::v2i64) {
3463     // When the operands themselves are v2i64 values, we need to do something
3464     // special because VSX has no underlying comparison operations for these.
3465     if (LHS.getValueType() == MVT::v2i64) {
3466       // Equality can be handled by casting to the legal type for Altivec
3467       // comparisons, everything else needs to be expanded.
3468       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3469         return DAG.getNode(
3470             ISD::BITCAST, dl, MVT::v2i64,
3471             DAG.getSetCC(dl, MVT::v4i32,
3472                          DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, LHS),
3473                          DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, RHS), CC));
3474       }
3475 
3476       return SDValue();
3477     }
3478 
3479     // We handle most of these in the usual way.
3480     return Op;
3481   }
3482 
3483   // If we're comparing for equality to zero, expose the fact that this is
3484   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3485   // fold the new nodes.
3486   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3487     return V;
3488 
3489   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
3490     // Leave comparisons against 0 and -1 alone for now, since they're usually
3491     // optimized.  FIXME: revisit this when we can custom lower all setcc
3492     // optimizations.
3493     if (C->isAllOnesValue() || C->isNullValue())
3494       return SDValue();
3495   }
3496 
3497   // If we have an integer seteq/setne, turn it into a compare against zero
3498   // by xor'ing the rhs with the lhs, which is faster than setting a
3499   // condition register, reading it back out, and masking the correct bit.  The
3500   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3501   // the result to other bit-twiddling opportunities.
3502   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3503     EVT VT = Op.getValueType();
3504     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, LHS, RHS);
3505     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3506   }
3507   return SDValue();
3508 }
3509 
3510 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3511   SDNode *Node = Op.getNode();
3512   EVT VT = Node->getValueType(0);
3513   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3514   SDValue InChain = Node->getOperand(0);
3515   SDValue VAListPtr = Node->getOperand(1);
3516   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3517   SDLoc dl(Node);
3518 
3519   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3520 
3521   // gpr_index
3522   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3523                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3524   InChain = GprIndex.getValue(1);
3525 
3526   if (VT == MVT::i64) {
3527     // Check if GprIndex is even
3528     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3529                                  DAG.getConstant(1, dl, MVT::i32));
3530     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3531                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3532     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3533                                           DAG.getConstant(1, dl, MVT::i32));
3534     // Align GprIndex to be even if it isn't
3535     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3536                            GprIndex);
3537   }
3538 
3539   // fpr index is 1 byte after gpr
3540   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3541                                DAG.getConstant(1, dl, MVT::i32));
3542 
3543   // fpr
3544   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3545                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3546   InChain = FprIndex.getValue(1);
3547 
3548   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3549                                        DAG.getConstant(8, dl, MVT::i32));
3550 
3551   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3552                                         DAG.getConstant(4, dl, MVT::i32));
3553 
3554   // areas
3555   SDValue OverflowArea =
3556       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3557   InChain = OverflowArea.getValue(1);
3558 
3559   SDValue RegSaveArea =
3560       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3561   InChain = RegSaveArea.getValue(1);
3562 
3563   // select overflow_area if index > 8
3564   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3565                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3566 
3567   // adjustment constant gpr_index * 4/8
3568   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3569                                     VT.isInteger() ? GprIndex : FprIndex,
3570                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3571                                                     MVT::i32));
3572 
3573   // OurReg = RegSaveArea + RegConstant
3574   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3575                                RegConstant);
3576 
3577   // Floating types are 32 bytes into RegSaveArea
3578   if (VT.isFloatingPoint())
3579     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3580                          DAG.getConstant(32, dl, MVT::i32));
3581 
3582   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3583   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3584                                    VT.isInteger() ? GprIndex : FprIndex,
3585                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3586                                                    MVT::i32));
3587 
3588   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3589                               VT.isInteger() ? VAListPtr : FprPtr,
3590                               MachinePointerInfo(SV), MVT::i8);
3591 
3592   // determine if we should load from reg_save_area or overflow_area
3593   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3594 
3595   // increase overflow_area by 4/8 if gpr/fpr > 8
3596   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3597                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3598                                           dl, MVT::i32));
3599 
3600   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3601                              OverflowAreaPlusN);
3602 
3603   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3604                               MachinePointerInfo(), MVT::i32);
3605 
3606   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3607 }
3608 
3609 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3610   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3611 
3612   // We have to copy the entire va_list struct:
3613   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3614   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3615                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3616                        false, true, false, MachinePointerInfo(),
3617                        MachinePointerInfo());
3618 }
3619 
3620 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3621                                                   SelectionDAG &DAG) const {
3622   if (Subtarget.isAIXABI())
3623     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3624 
3625   return Op.getOperand(0);
3626 }
3627 
3628 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3629                                                 SelectionDAG &DAG) const {
3630   if (Subtarget.isAIXABI())
3631     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3632 
3633   SDValue Chain = Op.getOperand(0);
3634   SDValue Trmp = Op.getOperand(1); // trampoline
3635   SDValue FPtr = Op.getOperand(2); // nested function
3636   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3637   SDLoc dl(Op);
3638 
3639   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3640   bool isPPC64 = (PtrVT == MVT::i64);
3641   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3642 
3643   TargetLowering::ArgListTy Args;
3644   TargetLowering::ArgListEntry Entry;
3645 
3646   Entry.Ty = IntPtrTy;
3647   Entry.Node = Trmp; Args.push_back(Entry);
3648 
3649   // TrampSize == (isPPC64 ? 48 : 40);
3650   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3651                                isPPC64 ? MVT::i64 : MVT::i32);
3652   Args.push_back(Entry);
3653 
3654   Entry.Node = FPtr; Args.push_back(Entry);
3655   Entry.Node = Nest; Args.push_back(Entry);
3656 
3657   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3658   TargetLowering::CallLoweringInfo CLI(DAG);
3659   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3660       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3661       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3662 
3663   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3664   return CallResult.second;
3665 }
3666 
3667 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3668   MachineFunction &MF = DAG.getMachineFunction();
3669   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3670   EVT PtrVT = getPointerTy(MF.getDataLayout());
3671 
3672   SDLoc dl(Op);
3673 
3674   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3675     // vastart just stores the address of the VarArgsFrameIndex slot into the
3676     // memory location argument.
3677     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3678     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3679     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3680                         MachinePointerInfo(SV));
3681   }
3682 
3683   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3684   // We suppose the given va_list is already allocated.
3685   //
3686   // typedef struct {
3687   //  char gpr;     /* index into the array of 8 GPRs
3688   //                 * stored in the register save area
3689   //                 * gpr=0 corresponds to r3,
3690   //                 * gpr=1 to r4, etc.
3691   //                 */
3692   //  char fpr;     /* index into the array of 8 FPRs
3693   //                 * stored in the register save area
3694   //                 * fpr=0 corresponds to f1,
3695   //                 * fpr=1 to f2, etc.
3696   //                 */
3697   //  char *overflow_arg_area;
3698   //                /* location on stack that holds
3699   //                 * the next overflow argument
3700   //                 */
3701   //  char *reg_save_area;
3702   //               /* where r3:r10 and f1:f8 (if saved)
3703   //                * are stored
3704   //                */
3705   // } va_list[1];
3706 
3707   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3708   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3709   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3710                                             PtrVT);
3711   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3712                                  PtrVT);
3713 
3714   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3715   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3716 
3717   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3718   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3719 
3720   uint64_t FPROffset = 1;
3721   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3722 
3723   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3724 
3725   // Store first byte : number of int regs
3726   SDValue firstStore =
3727       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3728                         MachinePointerInfo(SV), MVT::i8);
3729   uint64_t nextOffset = FPROffset;
3730   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3731                                   ConstFPROffset);
3732 
3733   // Store second byte : number of float regs
3734   SDValue secondStore =
3735       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3736                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3737   nextOffset += StackOffset;
3738   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3739 
3740   // Store second word : arguments given on stack
3741   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3742                                     MachinePointerInfo(SV, nextOffset));
3743   nextOffset += FrameOffset;
3744   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3745 
3746   // Store third word : arguments given in registers
3747   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3748                       MachinePointerInfo(SV, nextOffset));
3749 }
3750 
3751 /// FPR - The set of FP registers that should be allocated for arguments
3752 /// on Darwin and AIX.
3753 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3754                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3755                                 PPC::F11, PPC::F12, PPC::F13};
3756 
3757 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3758 /// the stack.
3759 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3760                                        unsigned PtrByteSize) {
3761   unsigned ArgSize = ArgVT.getStoreSize();
3762   if (Flags.isByVal())
3763     ArgSize = Flags.getByValSize();
3764 
3765   // Round up to multiples of the pointer size, except for array members,
3766   // which are always packed.
3767   if (!Flags.isInConsecutiveRegs())
3768     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3769 
3770   return ArgSize;
3771 }
3772 
3773 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3774 /// on the stack.
3775 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3776                                          ISD::ArgFlagsTy Flags,
3777                                          unsigned PtrByteSize) {
3778   Align Alignment(PtrByteSize);
3779 
3780   // Altivec parameters are padded to a 16 byte boundary.
3781   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3782       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3783       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3784       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3785     Alignment = Align(16);
3786 
3787   // ByVal parameters are aligned as requested.
3788   if (Flags.isByVal()) {
3789     auto BVAlign = Flags.getNonZeroByValAlign();
3790     if (BVAlign > PtrByteSize) {
3791       if (BVAlign.value() % PtrByteSize != 0)
3792         llvm_unreachable(
3793             "ByVal alignment is not a multiple of the pointer size");
3794 
3795       Alignment = BVAlign;
3796     }
3797   }
3798 
3799   // Array members are always packed to their original alignment.
3800   if (Flags.isInConsecutiveRegs()) {
3801     // If the array member was split into multiple registers, the first
3802     // needs to be aligned to the size of the full type.  (Except for
3803     // ppcf128, which is only aligned as its f64 components.)
3804     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3805       Alignment = Align(OrigVT.getStoreSize());
3806     else
3807       Alignment = Align(ArgVT.getStoreSize());
3808   }
3809 
3810   return Alignment;
3811 }
3812 
3813 /// CalculateStackSlotUsed - Return whether this argument will use its
3814 /// stack slot (instead of being passed in registers).  ArgOffset,
3815 /// AvailableFPRs, and AvailableVRs must hold the current argument
3816 /// position, and will be updated to account for this argument.
3817 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
3818                                    unsigned PtrByteSize, unsigned LinkageSize,
3819                                    unsigned ParamAreaSize, unsigned &ArgOffset,
3820                                    unsigned &AvailableFPRs,
3821                                    unsigned &AvailableVRs) {
3822   bool UseMemory = false;
3823 
3824   // Respect alignment of argument on the stack.
3825   Align Alignment =
3826       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3827   ArgOffset = alignTo(ArgOffset, Alignment);
3828   // If there's no space left in the argument save area, we must
3829   // use memory (this check also catches zero-sized arguments).
3830   if (ArgOffset >= LinkageSize + ParamAreaSize)
3831     UseMemory = true;
3832 
3833   // Allocate argument on the stack.
3834   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3835   if (Flags.isInConsecutiveRegsLast())
3836     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3837   // If we overran the argument save area, we must use memory
3838   // (this check catches arguments passed partially in memory)
3839   if (ArgOffset > LinkageSize + ParamAreaSize)
3840     UseMemory = true;
3841 
3842   // However, if the argument is actually passed in an FPR or a VR,
3843   // we don't use memory after all.
3844   if (!Flags.isByVal()) {
3845     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3846       if (AvailableFPRs > 0) {
3847         --AvailableFPRs;
3848         return false;
3849       }
3850     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3851         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3852         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3853         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3854       if (AvailableVRs > 0) {
3855         --AvailableVRs;
3856         return false;
3857       }
3858   }
3859 
3860   return UseMemory;
3861 }
3862 
3863 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3864 /// ensure minimum alignment required for target.
3865 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3866                                      unsigned NumBytes) {
3867   return alignTo(NumBytes, Lowering->getStackAlign());
3868 }
3869 
3870 SDValue PPCTargetLowering::LowerFormalArguments(
3871     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3872     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3873     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3874   if (Subtarget.isAIXABI())
3875     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3876                                     InVals);
3877   if (Subtarget.is64BitELFABI())
3878     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3879                                        InVals);
3880   assert(Subtarget.is32BitELFABI());
3881   return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3882                                      InVals);
3883 }
3884 
3885 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3886     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3887     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3888     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3889 
3890   // 32-bit SVR4 ABI Stack Frame Layout:
3891   //              +-----------------------------------+
3892   //        +-->  |            Back chain             |
3893   //        |     +-----------------------------------+
3894   //        |     | Floating-point register save area |
3895   //        |     +-----------------------------------+
3896   //        |     |    General register save area     |
3897   //        |     +-----------------------------------+
3898   //        |     |          CR save word             |
3899   //        |     +-----------------------------------+
3900   //        |     |         VRSAVE save word          |
3901   //        |     +-----------------------------------+
3902   //        |     |         Alignment padding         |
3903   //        |     +-----------------------------------+
3904   //        |     |     Vector register save area     |
3905   //        |     +-----------------------------------+
3906   //        |     |       Local variable space        |
3907   //        |     +-----------------------------------+
3908   //        |     |        Parameter list area        |
3909   //        |     +-----------------------------------+
3910   //        |     |           LR save word            |
3911   //        |     +-----------------------------------+
3912   // SP-->  +---  |            Back chain             |
3913   //              +-----------------------------------+
3914   //
3915   // Specifications:
3916   //   System V Application Binary Interface PowerPC Processor Supplement
3917   //   AltiVec Technology Programming Interface Manual
3918 
3919   MachineFunction &MF = DAG.getMachineFunction();
3920   MachineFrameInfo &MFI = MF.getFrameInfo();
3921   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3922 
3923   EVT PtrVT = getPointerTy(MF.getDataLayout());
3924   // Potential tail calls could cause overwriting of argument stack slots.
3925   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3926                        (CallConv == CallingConv::Fast));
3927   const Align PtrAlign(4);
3928 
3929   // Assign locations to all of the incoming arguments.
3930   SmallVector<CCValAssign, 16> ArgLocs;
3931   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3932                  *DAG.getContext());
3933 
3934   // Reserve space for the linkage area on the stack.
3935   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3936   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3937   if (useSoftFloat())
3938     CCInfo.PreAnalyzeFormalArguments(Ins);
3939 
3940   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3941   CCInfo.clearWasPPCF128();
3942 
3943   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3944     CCValAssign &VA = ArgLocs[i];
3945 
3946     // Arguments stored in registers.
3947     if (VA.isRegLoc()) {
3948       const TargetRegisterClass *RC;
3949       EVT ValVT = VA.getValVT();
3950 
3951       switch (ValVT.getSimpleVT().SimpleTy) {
3952         default:
3953           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3954         case MVT::i1:
3955         case MVT::i32:
3956           RC = &PPC::GPRCRegClass;
3957           break;
3958         case MVT::f32:
3959           if (Subtarget.hasP8Vector())
3960             RC = &PPC::VSSRCRegClass;
3961           else if (Subtarget.hasSPE())
3962             RC = &PPC::GPRCRegClass;
3963           else
3964             RC = &PPC::F4RCRegClass;
3965           break;
3966         case MVT::f64:
3967           if (Subtarget.hasVSX())
3968             RC = &PPC::VSFRCRegClass;
3969           else if (Subtarget.hasSPE())
3970             // SPE passes doubles in GPR pairs.
3971             RC = &PPC::GPRCRegClass;
3972           else
3973             RC = &PPC::F8RCRegClass;
3974           break;
3975         case MVT::v16i8:
3976         case MVT::v8i16:
3977         case MVT::v4i32:
3978           RC = &PPC::VRRCRegClass;
3979           break;
3980         case MVT::v4f32:
3981           RC = &PPC::VRRCRegClass;
3982           break;
3983         case MVT::v2f64:
3984         case MVT::v2i64:
3985           RC = &PPC::VRRCRegClass;
3986           break;
3987       }
3988 
3989       SDValue ArgValue;
3990       // Transform the arguments stored in physical registers into
3991       // virtual ones.
3992       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3993         assert(i + 1 < e && "No second half of double precision argument");
3994         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3995         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3996         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3997         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3998         if (!Subtarget.isLittleEndian())
3999           std::swap (ArgValueLo, ArgValueHi);
4000         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
4001                                ArgValueHi);
4002       } else {
4003         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
4004         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
4005                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
4006         if (ValVT == MVT::i1)
4007           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
4008       }
4009 
4010       InVals.push_back(ArgValue);
4011     } else {
4012       // Argument stored in memory.
4013       assert(VA.isMemLoc());
4014 
4015       // Get the extended size of the argument type in stack
4016       unsigned ArgSize = VA.getLocVT().getStoreSize();
4017       // Get the actual size of the argument type
4018       unsigned ObjSize = VA.getValVT().getStoreSize();
4019       unsigned ArgOffset = VA.getLocMemOffset();
4020       // Stack objects in PPC32 are right justified.
4021       ArgOffset += ArgSize - ObjSize;
4022       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
4023 
4024       // Create load nodes to retrieve arguments from the stack.
4025       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4026       InVals.push_back(
4027           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
4028     }
4029   }
4030 
4031   // Assign locations to all of the incoming aggregate by value arguments.
4032   // Aggregates passed by value are stored in the local variable space of the
4033   // caller's stack frame, right above the parameter list area.
4034   SmallVector<CCValAssign, 16> ByValArgLocs;
4035   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
4036                       ByValArgLocs, *DAG.getContext());
4037 
4038   // Reserve stack space for the allocations in CCInfo.
4039   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
4040 
4041   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
4042 
4043   // Area that is at least reserved in the caller of this function.
4044   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
4045   MinReservedArea = std::max(MinReservedArea, LinkageSize);
4046 
4047   // Set the size that is at least reserved in caller of this function.  Tail
4048   // call optimized function's reserved stack space needs to be aligned so that
4049   // taking the difference between two stack areas will result in an aligned
4050   // stack.
4051   MinReservedArea =
4052       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4053   FuncInfo->setMinReservedArea(MinReservedArea);
4054 
4055   SmallVector<SDValue, 8> MemOps;
4056 
4057   // If the function takes variable number of arguments, make a frame index for
4058   // the start of the first vararg value... for expansion of llvm.va_start.
4059   if (isVarArg) {
4060     static const MCPhysReg GPArgRegs[] = {
4061       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4062       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4063     };
4064     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
4065 
4066     static const MCPhysReg FPArgRegs[] = {
4067       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
4068       PPC::F8
4069     };
4070     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
4071 
4072     if (useSoftFloat() || hasSPE())
4073        NumFPArgRegs = 0;
4074 
4075     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
4076     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
4077 
4078     // Make room for NumGPArgRegs and NumFPArgRegs.
4079     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
4080                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
4081 
4082     FuncInfo->setVarArgsStackOffset(
4083       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4084                             CCInfo.getNextStackOffset(), true));
4085 
4086     FuncInfo->setVarArgsFrameIndex(
4087         MFI.CreateStackObject(Depth, Align(8), false));
4088     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4089 
4090     // The fixed integer arguments of a variadic function are stored to the
4091     // VarArgsFrameIndex on the stack so that they may be loaded by
4092     // dereferencing the result of va_next.
4093     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
4094       // Get an existing live-in vreg, or add a new one.
4095       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
4096       if (!VReg)
4097         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
4098 
4099       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4100       SDValue Store =
4101           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4102       MemOps.push_back(Store);
4103       // Increment the address by four for the next argument to store
4104       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4105       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4106     }
4107 
4108     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
4109     // is set.
4110     // The double arguments are stored to the VarArgsFrameIndex
4111     // on the stack.
4112     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
4113       // Get an existing live-in vreg, or add a new one.
4114       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
4115       if (!VReg)
4116         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
4117 
4118       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
4119       SDValue Store =
4120           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4121       MemOps.push_back(Store);
4122       // Increment the address by eight for the next argument to store
4123       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
4124                                          PtrVT);
4125       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4126     }
4127   }
4128 
4129   if (!MemOps.empty())
4130     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4131 
4132   return Chain;
4133 }
4134 
4135 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4136 // value to MVT::i64 and then truncate to the correct register size.
4137 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
4138                                              EVT ObjectVT, SelectionDAG &DAG,
4139                                              SDValue ArgVal,
4140                                              const SDLoc &dl) const {
4141   if (Flags.isSExt())
4142     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
4143                          DAG.getValueType(ObjectVT));
4144   else if (Flags.isZExt())
4145     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
4146                          DAG.getValueType(ObjectVT));
4147 
4148   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
4149 }
4150 
4151 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
4152     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4153     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4154     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4155   // TODO: add description of PPC stack frame format, or at least some docs.
4156   //
4157   bool isELFv2ABI = Subtarget.isELFv2ABI();
4158   bool isLittleEndian = Subtarget.isLittleEndian();
4159   MachineFunction &MF = DAG.getMachineFunction();
4160   MachineFrameInfo &MFI = MF.getFrameInfo();
4161   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4162 
4163   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
4164          "fastcc not supported on varargs functions");
4165 
4166   EVT PtrVT = getPointerTy(MF.getDataLayout());
4167   // Potential tail calls could cause overwriting of argument stack slots.
4168   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4169                        (CallConv == CallingConv::Fast));
4170   unsigned PtrByteSize = 8;
4171   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4172 
4173   static const MCPhysReg GPR[] = {
4174     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4175     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4176   };
4177   static const MCPhysReg VR[] = {
4178     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4179     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4180   };
4181 
4182   const unsigned Num_GPR_Regs = array_lengthof(GPR);
4183   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4184   const unsigned Num_VR_Regs  = array_lengthof(VR);
4185 
4186   // Do a first pass over the arguments to determine whether the ABI
4187   // guarantees that our caller has allocated the parameter save area
4188   // on its stack frame.  In the ELFv1 ABI, this is always the case;
4189   // in the ELFv2 ABI, it is true if this is a vararg function or if
4190   // any parameter is located in a stack slot.
4191 
4192   bool HasParameterArea = !isELFv2ABI || isVarArg;
4193   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
4194   unsigned NumBytes = LinkageSize;
4195   unsigned AvailableFPRs = Num_FPR_Regs;
4196   unsigned AvailableVRs = Num_VR_Regs;
4197   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4198     if (Ins[i].Flags.isNest())
4199       continue;
4200 
4201     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
4202                                PtrByteSize, LinkageSize, ParamAreaSize,
4203                                NumBytes, AvailableFPRs, AvailableVRs))
4204       HasParameterArea = true;
4205   }
4206 
4207   // Add DAG nodes to load the arguments or copy them out of registers.  On
4208   // entry to a function on PPC, the arguments start after the linkage area,
4209   // although the first ones are often in registers.
4210 
4211   unsigned ArgOffset = LinkageSize;
4212   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4213   SmallVector<SDValue, 8> MemOps;
4214   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4215   unsigned CurArgIdx = 0;
4216   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4217     SDValue ArgVal;
4218     bool needsLoad = false;
4219     EVT ObjectVT = Ins[ArgNo].VT;
4220     EVT OrigVT = Ins[ArgNo].ArgVT;
4221     unsigned ObjSize = ObjectVT.getStoreSize();
4222     unsigned ArgSize = ObjSize;
4223     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4224     if (Ins[ArgNo].isOrigArg()) {
4225       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4226       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4227     }
4228     // We re-align the argument offset for each argument, except when using the
4229     // fast calling convention, when we need to make sure we do that only when
4230     // we'll actually use a stack slot.
4231     unsigned CurArgOffset;
4232     Align Alignment;
4233     auto ComputeArgOffset = [&]() {
4234       /* Respect alignment of argument on the stack.  */
4235       Alignment =
4236           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4237       ArgOffset = alignTo(ArgOffset, Alignment);
4238       CurArgOffset = ArgOffset;
4239     };
4240 
4241     if (CallConv != CallingConv::Fast) {
4242       ComputeArgOffset();
4243 
4244       /* Compute GPR index associated with argument offset.  */
4245       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4246       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4247     }
4248 
4249     // FIXME the codegen can be much improved in some cases.
4250     // We do not have to keep everything in memory.
4251     if (Flags.isByVal()) {
4252       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4253 
4254       if (CallConv == CallingConv::Fast)
4255         ComputeArgOffset();
4256 
4257       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4258       ObjSize = Flags.getByValSize();
4259       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4260       // Empty aggregate parameters do not take up registers.  Examples:
4261       //   struct { } a;
4262       //   union  { } b;
4263       //   int c[0];
4264       // etc.  However, we have to provide a place-holder in InVals, so
4265       // pretend we have an 8-byte item at the current address for that
4266       // purpose.
4267       if (!ObjSize) {
4268         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4269         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4270         InVals.push_back(FIN);
4271         continue;
4272       }
4273 
4274       // Create a stack object covering all stack doublewords occupied
4275       // by the argument.  If the argument is (fully or partially) on
4276       // the stack, or if the argument is fully in registers but the
4277       // caller has allocated the parameter save anyway, we can refer
4278       // directly to the caller's stack frame.  Otherwise, create a
4279       // local copy in our own frame.
4280       int FI;
4281       if (HasParameterArea ||
4282           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4283         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4284       else
4285         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4286       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4287 
4288       // Handle aggregates smaller than 8 bytes.
4289       if (ObjSize < PtrByteSize) {
4290         // The value of the object is its address, which differs from the
4291         // address of the enclosing doubleword on big-endian systems.
4292         SDValue Arg = FIN;
4293         if (!isLittleEndian) {
4294           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4295           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4296         }
4297         InVals.push_back(Arg);
4298 
4299         if (GPR_idx != Num_GPR_Regs) {
4300           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4301           FuncInfo->addLiveInAttr(VReg, Flags);
4302           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4303           SDValue Store;
4304 
4305           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4306             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4307                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4308             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4309                                       MachinePointerInfo(&*FuncArg), ObjType);
4310           } else {
4311             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4312             // store the whole register as-is to the parameter save area
4313             // slot.
4314             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4315                                  MachinePointerInfo(&*FuncArg));
4316           }
4317 
4318           MemOps.push_back(Store);
4319         }
4320         // Whether we copied from a register or not, advance the offset
4321         // into the parameter save area by a full doubleword.
4322         ArgOffset += PtrByteSize;
4323         continue;
4324       }
4325 
4326       // The value of the object is its address, which is the address of
4327       // its first stack doubleword.
4328       InVals.push_back(FIN);
4329 
4330       // Store whatever pieces of the object are in registers to memory.
4331       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4332         if (GPR_idx == Num_GPR_Regs)
4333           break;
4334 
4335         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4336         FuncInfo->addLiveInAttr(VReg, Flags);
4337         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4338         SDValue Addr = FIN;
4339         if (j) {
4340           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4341           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4342         }
4343         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4344                                      MachinePointerInfo(&*FuncArg, j));
4345         MemOps.push_back(Store);
4346         ++GPR_idx;
4347       }
4348       ArgOffset += ArgSize;
4349       continue;
4350     }
4351 
4352     switch (ObjectVT.getSimpleVT().SimpleTy) {
4353     default: llvm_unreachable("Unhandled argument type!");
4354     case MVT::i1:
4355     case MVT::i32:
4356     case MVT::i64:
4357       if (Flags.isNest()) {
4358         // The 'nest' parameter, if any, is passed in R11.
4359         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4360         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4361 
4362         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4363           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4364 
4365         break;
4366       }
4367 
4368       // These can be scalar arguments or elements of an integer array type
4369       // passed directly.  Clang may use those instead of "byval" aggregate
4370       // types to avoid forcing arguments to memory unnecessarily.
4371       if (GPR_idx != Num_GPR_Regs) {
4372         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4373         FuncInfo->addLiveInAttr(VReg, Flags);
4374         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4375 
4376         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4377           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4378           // value to MVT::i64 and then truncate to the correct register size.
4379           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4380       } else {
4381         if (CallConv == CallingConv::Fast)
4382           ComputeArgOffset();
4383 
4384         needsLoad = true;
4385         ArgSize = PtrByteSize;
4386       }
4387       if (CallConv != CallingConv::Fast || needsLoad)
4388         ArgOffset += 8;
4389       break;
4390 
4391     case MVT::f32:
4392     case MVT::f64:
4393       // These can be scalar arguments or elements of a float array type
4394       // passed directly.  The latter are used to implement ELFv2 homogenous
4395       // float aggregates.
4396       if (FPR_idx != Num_FPR_Regs) {
4397         unsigned VReg;
4398 
4399         if (ObjectVT == MVT::f32)
4400           VReg = MF.addLiveIn(FPR[FPR_idx],
4401                               Subtarget.hasP8Vector()
4402                                   ? &PPC::VSSRCRegClass
4403                                   : &PPC::F4RCRegClass);
4404         else
4405           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4406                                                 ? &PPC::VSFRCRegClass
4407                                                 : &PPC::F8RCRegClass);
4408 
4409         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4410         ++FPR_idx;
4411       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4412         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4413         // once we support fp <-> gpr moves.
4414 
4415         // This can only ever happen in the presence of f32 array types,
4416         // since otherwise we never run out of FPRs before running out
4417         // of GPRs.
4418         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4419         FuncInfo->addLiveInAttr(VReg, Flags);
4420         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4421 
4422         if (ObjectVT == MVT::f32) {
4423           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4424             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4425                                  DAG.getConstant(32, dl, MVT::i32));
4426           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4427         }
4428 
4429         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4430       } else {
4431         if (CallConv == CallingConv::Fast)
4432           ComputeArgOffset();
4433 
4434         needsLoad = true;
4435       }
4436 
4437       // When passing an array of floats, the array occupies consecutive
4438       // space in the argument area; only round up to the next doubleword
4439       // at the end of the array.  Otherwise, each float takes 8 bytes.
4440       if (CallConv != CallingConv::Fast || needsLoad) {
4441         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4442         ArgOffset += ArgSize;
4443         if (Flags.isInConsecutiveRegsLast())
4444           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4445       }
4446       break;
4447     case MVT::v4f32:
4448     case MVT::v4i32:
4449     case MVT::v8i16:
4450     case MVT::v16i8:
4451     case MVT::v2f64:
4452     case MVT::v2i64:
4453     case MVT::v1i128:
4454     case MVT::f128:
4455       // These can be scalar arguments or elements of a vector array type
4456       // passed directly.  The latter are used to implement ELFv2 homogenous
4457       // vector aggregates.
4458       if (VR_idx != Num_VR_Regs) {
4459         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4460         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4461         ++VR_idx;
4462       } else {
4463         if (CallConv == CallingConv::Fast)
4464           ComputeArgOffset();
4465         needsLoad = true;
4466       }
4467       if (CallConv != CallingConv::Fast || needsLoad)
4468         ArgOffset += 16;
4469       break;
4470     }
4471 
4472     // We need to load the argument to a virtual register if we determined
4473     // above that we ran out of physical registers of the appropriate type.
4474     if (needsLoad) {
4475       if (ObjSize < ArgSize && !isLittleEndian)
4476         CurArgOffset += ArgSize - ObjSize;
4477       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4478       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4479       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4480     }
4481 
4482     InVals.push_back(ArgVal);
4483   }
4484 
4485   // Area that is at least reserved in the caller of this function.
4486   unsigned MinReservedArea;
4487   if (HasParameterArea)
4488     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4489   else
4490     MinReservedArea = LinkageSize;
4491 
4492   // Set the size that is at least reserved in caller of this function.  Tail
4493   // call optimized functions' reserved stack space needs to be aligned so that
4494   // taking the difference between two stack areas will result in an aligned
4495   // stack.
4496   MinReservedArea =
4497       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4498   FuncInfo->setMinReservedArea(MinReservedArea);
4499 
4500   // If the function takes variable number of arguments, make a frame index for
4501   // the start of the first vararg value... for expansion of llvm.va_start.
4502   // On ELFv2ABI spec, it writes:
4503   // C programs that are intended to be *portable* across different compilers
4504   // and architectures must use the header file <stdarg.h> to deal with variable
4505   // argument lists.
4506   if (isVarArg && MFI.hasVAStart()) {
4507     int Depth = ArgOffset;
4508 
4509     FuncInfo->setVarArgsFrameIndex(
4510       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4511     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4512 
4513     // If this function is vararg, store any remaining integer argument regs
4514     // to their spots on the stack so that they may be loaded by dereferencing
4515     // the result of va_next.
4516     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4517          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4518       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4519       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4520       SDValue Store =
4521           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4522       MemOps.push_back(Store);
4523       // Increment the address by four for the next argument to store
4524       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4525       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4526     }
4527   }
4528 
4529   if (!MemOps.empty())
4530     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4531 
4532   return Chain;
4533 }
4534 
4535 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4536 /// adjusted to accommodate the arguments for the tailcall.
4537 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4538                                    unsigned ParamSize) {
4539 
4540   if (!isTailCall) return 0;
4541 
4542   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4543   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4544   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4545   // Remember only if the new adjustment is bigger.
4546   if (SPDiff < FI->getTailCallSPDelta())
4547     FI->setTailCallSPDelta(SPDiff);
4548 
4549   return SPDiff;
4550 }
4551 
4552 static bool isFunctionGlobalAddress(SDValue Callee);
4553 
4554 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4555                               const TargetMachine &TM) {
4556   // It does not make sense to call callsShareTOCBase() with a caller that
4557   // is PC Relative since PC Relative callers do not have a TOC.
4558 #ifndef NDEBUG
4559   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4560   assert(!STICaller->isUsingPCRelativeCalls() &&
4561          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4562 #endif
4563 
4564   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4565   // don't have enough information to determine if the caller and callee share
4566   // the same  TOC base, so we have to pessimistically assume they don't for
4567   // correctness.
4568   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4569   if (!G)
4570     return false;
4571 
4572   const GlobalValue *GV = G->getGlobal();
4573 
4574   // If the callee is preemptable, then the static linker will use a plt-stub
4575   // which saves the toc to the stack, and needs a nop after the call
4576   // instruction to convert to a toc-restore.
4577   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4578     return false;
4579 
4580   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4581   // We may need a TOC restore in the situation where the caller requires a
4582   // valid TOC but the callee is PC Relative and does not.
4583   const Function *F = dyn_cast<Function>(GV);
4584   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4585 
4586   // If we have an Alias we can try to get the function from there.
4587   if (Alias) {
4588     const GlobalObject *GlobalObj = Alias->getBaseObject();
4589     F = dyn_cast<Function>(GlobalObj);
4590   }
4591 
4592   // If we still have no valid function pointer we do not have enough
4593   // information to determine if the callee uses PC Relative calls so we must
4594   // assume that it does.
4595   if (!F)
4596     return false;
4597 
4598   // If the callee uses PC Relative we cannot guarantee that the callee won't
4599   // clobber the TOC of the caller and so we must assume that the two
4600   // functions do not share a TOC base.
4601   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4602   if (STICallee->isUsingPCRelativeCalls())
4603     return false;
4604 
4605   // If the GV is not a strong definition then we need to assume it can be
4606   // replaced by another function at link time. The function that replaces
4607   // it may not share the same TOC as the caller since the callee may be
4608   // replaced by a PC Relative version of the same function.
4609   if (!GV->isStrongDefinitionForLinker())
4610     return false;
4611 
4612   // The medium and large code models are expected to provide a sufficiently
4613   // large TOC to provide all data addressing needs of a module with a
4614   // single TOC.
4615   if (CodeModel::Medium == TM.getCodeModel() ||
4616       CodeModel::Large == TM.getCodeModel())
4617     return true;
4618 
4619   // Any explicitly-specified sections and section prefixes must also match.
4620   // Also, if we're using -ffunction-sections, then each function is always in
4621   // a different section (the same is true for COMDAT functions).
4622   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4623       GV->getSection() != Caller->getSection())
4624     return false;
4625   if (const auto *F = dyn_cast<Function>(GV)) {
4626     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4627       return false;
4628   }
4629 
4630   return true;
4631 }
4632 
4633 static bool
4634 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4635                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4636   assert(Subtarget.is64BitELFABI());
4637 
4638   const unsigned PtrByteSize = 8;
4639   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4640 
4641   static const MCPhysReg GPR[] = {
4642     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4643     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4644   };
4645   static const MCPhysReg VR[] = {
4646     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4647     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4648   };
4649 
4650   const unsigned NumGPRs = array_lengthof(GPR);
4651   const unsigned NumFPRs = 13;
4652   const unsigned NumVRs = array_lengthof(VR);
4653   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4654 
4655   unsigned NumBytes = LinkageSize;
4656   unsigned AvailableFPRs = NumFPRs;
4657   unsigned AvailableVRs = NumVRs;
4658 
4659   for (const ISD::OutputArg& Param : Outs) {
4660     if (Param.Flags.isNest()) continue;
4661 
4662     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4663                                LinkageSize, ParamAreaSize, NumBytes,
4664                                AvailableFPRs, AvailableVRs))
4665       return true;
4666   }
4667   return false;
4668 }
4669 
4670 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4671   if (CB.arg_size() != CallerFn->arg_size())
4672     return false;
4673 
4674   auto CalleeArgIter = CB.arg_begin();
4675   auto CalleeArgEnd = CB.arg_end();
4676   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4677 
4678   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4679     const Value* CalleeArg = *CalleeArgIter;
4680     const Value* CallerArg = &(*CallerArgIter);
4681     if (CalleeArg == CallerArg)
4682       continue;
4683 
4684     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4685     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4686     //      }
4687     // 1st argument of callee is undef and has the same type as caller.
4688     if (CalleeArg->getType() == CallerArg->getType() &&
4689         isa<UndefValue>(CalleeArg))
4690       continue;
4691 
4692     return false;
4693   }
4694 
4695   return true;
4696 }
4697 
4698 // Returns true if TCO is possible between the callers and callees
4699 // calling conventions.
4700 static bool
4701 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4702                                     CallingConv::ID CalleeCC) {
4703   // Tail calls are possible with fastcc and ccc.
4704   auto isTailCallableCC  = [] (CallingConv::ID CC){
4705       return  CC == CallingConv::C || CC == CallingConv::Fast;
4706   };
4707   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4708     return false;
4709 
4710   // We can safely tail call both fastcc and ccc callees from a c calling
4711   // convention caller. If the caller is fastcc, we may have less stack space
4712   // than a non-fastcc caller with the same signature so disable tail-calls in
4713   // that case.
4714   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4715 }
4716 
4717 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4718     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4719     const SmallVectorImpl<ISD::OutputArg> &Outs,
4720     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4721   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4722 
4723   if (DisableSCO && !TailCallOpt) return false;
4724 
4725   // Variadic argument functions are not supported.
4726   if (isVarArg) return false;
4727 
4728   auto &Caller = DAG.getMachineFunction().getFunction();
4729   // Check that the calling conventions are compatible for tco.
4730   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4731     return false;
4732 
4733   // Caller contains any byval parameter is not supported.
4734   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4735     return false;
4736 
4737   // Callee contains any byval parameter is not supported, too.
4738   // Note: This is a quick work around, because in some cases, e.g.
4739   // caller's stack size > callee's stack size, we are still able to apply
4740   // sibling call optimization. For example, gcc is able to do SCO for caller1
4741   // in the following example, but not for caller2.
4742   //   struct test {
4743   //     long int a;
4744   //     char ary[56];
4745   //   } gTest;
4746   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4747   //     b->a = v.a;
4748   //     return 0;
4749   //   }
4750   //   void caller1(struct test a, struct test c, struct test *b) {
4751   //     callee(gTest, b); }
4752   //   void caller2(struct test *b) { callee(gTest, b); }
4753   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4754     return false;
4755 
4756   // If callee and caller use different calling conventions, we cannot pass
4757   // parameters on stack since offsets for the parameter area may be different.
4758   if (Caller.getCallingConv() != CalleeCC &&
4759       needStackSlotPassParameters(Subtarget, Outs))
4760     return false;
4761 
4762   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4763   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4764   // callee potentially have different TOC bases then we cannot tail call since
4765   // we need to restore the TOC pointer after the call.
4766   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4767   // We cannot guarantee this for indirect calls or calls to external functions.
4768   // When PC-Relative addressing is used, the concept of the TOC is no longer
4769   // applicable so this check is not required.
4770   // Check first for indirect calls.
4771   if (!Subtarget.isUsingPCRelativeCalls() &&
4772       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4773     return false;
4774 
4775   // Check if we share the TOC base.
4776   if (!Subtarget.isUsingPCRelativeCalls() &&
4777       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4778     return false;
4779 
4780   // TCO allows altering callee ABI, so we don't have to check further.
4781   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4782     return true;
4783 
4784   if (DisableSCO) return false;
4785 
4786   // If callee use the same argument list that caller is using, then we can
4787   // apply SCO on this case. If it is not, then we need to check if callee needs
4788   // stack for passing arguments.
4789   // PC Relative tail calls may not have a CallBase.
4790   // If there is no CallBase we cannot verify if we have the same argument
4791   // list so assume that we don't have the same argument list.
4792   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4793       needStackSlotPassParameters(Subtarget, Outs))
4794     return false;
4795   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4796     return false;
4797 
4798   return true;
4799 }
4800 
4801 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4802 /// for tail call optimization. Targets which want to do tail call
4803 /// optimization should implement this function.
4804 bool
4805 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4806                                                      CallingConv::ID CalleeCC,
4807                                                      bool isVarArg,
4808                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4809                                                      SelectionDAG& DAG) const {
4810   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4811     return false;
4812 
4813   // Variable argument functions are not supported.
4814   if (isVarArg)
4815     return false;
4816 
4817   MachineFunction &MF = DAG.getMachineFunction();
4818   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4819   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4820     // Functions containing by val parameters are not supported.
4821     for (unsigned i = 0; i != Ins.size(); i++) {
4822        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4823        if (Flags.isByVal()) return false;
4824     }
4825 
4826     // Non-PIC/GOT tail calls are supported.
4827     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4828       return true;
4829 
4830     // At the moment we can only do local tail calls (in same module, hidden
4831     // or protected) if we are generating PIC.
4832     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4833       return G->getGlobal()->hasHiddenVisibility()
4834           || G->getGlobal()->hasProtectedVisibility();
4835   }
4836 
4837   return false;
4838 }
4839 
4840 /// isCallCompatibleAddress - Return the immediate to use if the specified
4841 /// 32-bit value is representable in the immediate field of a BxA instruction.
4842 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4843   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4844   if (!C) return nullptr;
4845 
4846   int Addr = C->getZExtValue();
4847   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4848       SignExtend32<26>(Addr) != Addr)
4849     return nullptr;  // Top 6 bits have to be sext of immediate.
4850 
4851   return DAG
4852       .getConstant(
4853           (int)C->getZExtValue() >> 2, SDLoc(Op),
4854           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4855       .getNode();
4856 }
4857 
4858 namespace {
4859 
4860 struct TailCallArgumentInfo {
4861   SDValue Arg;
4862   SDValue FrameIdxOp;
4863   int FrameIdx = 0;
4864 
4865   TailCallArgumentInfo() = default;
4866 };
4867 
4868 } // end anonymous namespace
4869 
4870 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4871 static void StoreTailCallArgumentsToStackSlot(
4872     SelectionDAG &DAG, SDValue Chain,
4873     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4874     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4875   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4876     SDValue Arg = TailCallArgs[i].Arg;
4877     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4878     int FI = TailCallArgs[i].FrameIdx;
4879     // Store relative to framepointer.
4880     MemOpChains.push_back(DAG.getStore(
4881         Chain, dl, Arg, FIN,
4882         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4883   }
4884 }
4885 
4886 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4887 /// the appropriate stack slot for the tail call optimized function call.
4888 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4889                                              SDValue OldRetAddr, SDValue OldFP,
4890                                              int SPDiff, const SDLoc &dl) {
4891   if (SPDiff) {
4892     // Calculate the new stack slot for the return address.
4893     MachineFunction &MF = DAG.getMachineFunction();
4894     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4895     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4896     bool isPPC64 = Subtarget.isPPC64();
4897     int SlotSize = isPPC64 ? 8 : 4;
4898     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4899     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4900                                                          NewRetAddrLoc, true);
4901     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4902     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4903     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4904                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4905   }
4906   return Chain;
4907 }
4908 
4909 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4910 /// the position of the argument.
4911 static void
4912 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4913                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4914                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4915   int Offset = ArgOffset + SPDiff;
4916   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4917   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4918   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4919   SDValue FIN = DAG.getFrameIndex(FI, VT);
4920   TailCallArgumentInfo Info;
4921   Info.Arg = Arg;
4922   Info.FrameIdxOp = FIN;
4923   Info.FrameIdx = FI;
4924   TailCallArguments.push_back(Info);
4925 }
4926 
4927 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4928 /// stack slot. Returns the chain as result and the loaded frame pointers in
4929 /// LROpOut/FPOpout. Used when tail calling.
4930 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4931     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4932     SDValue &FPOpOut, const SDLoc &dl) const {
4933   if (SPDiff) {
4934     // Load the LR and FP stack slot for later adjusting.
4935     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4936     LROpOut = getReturnAddrFrameIndex(DAG);
4937     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4938     Chain = SDValue(LROpOut.getNode(), 1);
4939   }
4940   return Chain;
4941 }
4942 
4943 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4944 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4945 /// specified by the specific parameter attribute. The copy will be passed as
4946 /// a byval function parameter.
4947 /// Sometimes what we are copying is the end of a larger object, the part that
4948 /// does not fit in registers.
4949 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4950                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4951                                          SelectionDAG &DAG, const SDLoc &dl) {
4952   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4953   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
4954                        Flags.getNonZeroByValAlign(), false, false, false,
4955                        MachinePointerInfo(), MachinePointerInfo());
4956 }
4957 
4958 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4959 /// tail calls.
4960 static void LowerMemOpCallTo(
4961     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4962     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4963     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4964     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4965   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4966   if (!isTailCall) {
4967     if (isVector) {
4968       SDValue StackPtr;
4969       if (isPPC64)
4970         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4971       else
4972         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4973       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4974                            DAG.getConstant(ArgOffset, dl, PtrVT));
4975     }
4976     MemOpChains.push_back(
4977         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4978     // Calculate and remember argument location.
4979   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4980                                   TailCallArguments);
4981 }
4982 
4983 static void
4984 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4985                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4986                 SDValue FPOp,
4987                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4988   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4989   // might overwrite each other in case of tail call optimization.
4990   SmallVector<SDValue, 8> MemOpChains2;
4991   // Do not flag preceding copytoreg stuff together with the following stuff.
4992   InFlag = SDValue();
4993   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4994                                     MemOpChains2, dl);
4995   if (!MemOpChains2.empty())
4996     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4997 
4998   // Store the return address to the appropriate stack slot.
4999   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5000 
5001   // Emit callseq_end just before tailcall node.
5002   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5003                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5004   InFlag = Chain.getValue(1);
5005 }
5006 
5007 // Is this global address that of a function that can be called by name? (as
5008 // opposed to something that must hold a descriptor for an indirect call).
5009 static bool isFunctionGlobalAddress(SDValue Callee) {
5010   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5011     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5012         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5013       return false;
5014 
5015     return G->getGlobal()->getValueType()->isFunctionTy();
5016   }
5017 
5018   return false;
5019 }
5020 
5021 SDValue PPCTargetLowering::LowerCallResult(
5022     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5023     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5024     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5025   SmallVector<CCValAssign, 16> RVLocs;
5026   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5027                     *DAG.getContext());
5028 
5029   CCRetInfo.AnalyzeCallResult(
5030       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5031                ? RetCC_PPC_Cold
5032                : RetCC_PPC);
5033 
5034   // Copy all of the result registers out of their specified physreg.
5035   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5036     CCValAssign &VA = RVLocs[i];
5037     assert(VA.isRegLoc() && "Can only return in registers!");
5038 
5039     SDValue Val;
5040 
5041     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5042       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5043                                       InFlag);
5044       Chain = Lo.getValue(1);
5045       InFlag = Lo.getValue(2);
5046       VA = RVLocs[++i]; // skip ahead to next loc
5047       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5048                                       InFlag);
5049       Chain = Hi.getValue(1);
5050       InFlag = Hi.getValue(2);
5051       if (!Subtarget.isLittleEndian())
5052         std::swap (Lo, Hi);
5053       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5054     } else {
5055       Val = DAG.getCopyFromReg(Chain, dl,
5056                                VA.getLocReg(), VA.getLocVT(), InFlag);
5057       Chain = Val.getValue(1);
5058       InFlag = Val.getValue(2);
5059     }
5060 
5061     switch (VA.getLocInfo()) {
5062     default: llvm_unreachable("Unknown loc info!");
5063     case CCValAssign::Full: break;
5064     case CCValAssign::AExt:
5065       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5066       break;
5067     case CCValAssign::ZExt:
5068       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5069                         DAG.getValueType(VA.getValVT()));
5070       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5071       break;
5072     case CCValAssign::SExt:
5073       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5074                         DAG.getValueType(VA.getValVT()));
5075       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5076       break;
5077     }
5078 
5079     InVals.push_back(Val);
5080   }
5081 
5082   return Chain;
5083 }
5084 
5085 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5086                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5087   // PatchPoint calls are not indirect.
5088   if (isPatchPoint)
5089     return false;
5090 
5091   if (isFunctionGlobalAddress(Callee) || isa<ExternalSymbolSDNode>(Callee))
5092     return false;
5093 
5094   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5095   // becuase the immediate function pointer points to a descriptor instead of
5096   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5097   // pointer immediate points to the global entry point, while the BLA would
5098   // need to jump to the local entry point (see rL211174).
5099   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5100       isBLACompatibleAddress(Callee, DAG))
5101     return false;
5102 
5103   return true;
5104 }
5105 
5106 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5107 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5108   return Subtarget.isAIXABI() ||
5109          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5110 }
5111 
5112 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5113                               const Function &Caller,
5114                               const SDValue &Callee,
5115                               const PPCSubtarget &Subtarget,
5116                               const TargetMachine &TM) {
5117   if (CFlags.IsTailCall)
5118     return PPCISD::TC_RETURN;
5119 
5120   // This is a call through a function pointer.
5121   if (CFlags.IsIndirect) {
5122     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5123     // indirect calls. The save of the caller's TOC pointer to the stack will be
5124     // inserted into the DAG as part of call lowering. The restore of the TOC
5125     // pointer is modeled by using a pseudo instruction for the call opcode that
5126     // represents the 2 instruction sequence of an indirect branch and link,
5127     // immediately followed by a load of the TOC pointer from the the stack save
5128     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5129     // as it is not saved or used.
5130     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5131                                                : PPCISD::BCTRL;
5132   }
5133 
5134   if (Subtarget.isUsingPCRelativeCalls()) {
5135     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5136     return PPCISD::CALL_NOTOC;
5137   }
5138 
5139   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5140   // immediately following the call instruction if the caller and callee may
5141   // have different TOC bases. At link time if the linker determines the calls
5142   // may not share a TOC base, the call is redirected to a trampoline inserted
5143   // by the linker. The trampoline will (among other things) save the callers
5144   // TOC pointer at an ABI designated offset in the linkage area and the linker
5145   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5146   // into gpr2.
5147   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5148     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5149                                                   : PPCISD::CALL_NOP;
5150 
5151   return PPCISD::CALL;
5152 }
5153 
5154 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5155                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5156   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5157     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5158       return SDValue(Dest, 0);
5159 
5160   // Returns true if the callee is local, and false otherwise.
5161   auto isLocalCallee = [&]() {
5162     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5163     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5164     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5165 
5166     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5167            !dyn_cast_or_null<GlobalIFunc>(GV);
5168   };
5169 
5170   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5171   // a static relocation model causes some versions of GNU LD (2.17.50, at
5172   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5173   // built with secure-PLT.
5174   bool UsePlt =
5175       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5176       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5177 
5178   const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5179     const TargetMachine &TM = Subtarget.getTargetMachine();
5180     const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5181     MCSymbolXCOFF *S =
5182         cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5183 
5184     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5185     return DAG.getMCSymbol(S, PtrVT);
5186   };
5187 
5188   if (isFunctionGlobalAddress(Callee)) {
5189     const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5190 
5191     if (Subtarget.isAIXABI()) {
5192       assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5193       return getAIXFuncEntryPointSymbolSDNode(GV);
5194     }
5195     return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5196                                       UsePlt ? PPCII::MO_PLT : 0);
5197   }
5198 
5199   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5200     const char *SymName = S->getSymbol();
5201     if (Subtarget.isAIXABI()) {
5202       // If there exists a user-declared function whose name is the same as the
5203       // ExternalSymbol's, then we pick up the user-declared version.
5204       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5205       if (const Function *F =
5206               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5207         return getAIXFuncEntryPointSymbolSDNode(F);
5208 
5209       // On AIX, direct function calls reference the symbol for the function's
5210       // entry point, which is named by prepending a "." before the function's
5211       // C-linkage name. A Qualname is returned here because an external
5212       // function entry point is a csect with XTY_ER property.
5213       const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5214         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5215         MCSectionXCOFF *Sec = Context.getXCOFFSection(
5216             (Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(),
5217             XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER));
5218         return Sec->getQualNameSymbol();
5219       };
5220 
5221       SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5222     }
5223     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5224                                        UsePlt ? PPCII::MO_PLT : 0);
5225   }
5226 
5227   // No transformation needed.
5228   assert(Callee.getNode() && "What no callee?");
5229   return Callee;
5230 }
5231 
5232 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5233   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5234          "Expected a CALLSEQ_STARTSDNode.");
5235 
5236   // The last operand is the chain, except when the node has glue. If the node
5237   // has glue, then the last operand is the glue, and the chain is the second
5238   // last operand.
5239   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5240   if (LastValue.getValueType() != MVT::Glue)
5241     return LastValue;
5242 
5243   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5244 }
5245 
5246 // Creates the node that moves a functions address into the count register
5247 // to prepare for an indirect call instruction.
5248 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5249                                 SDValue &Glue, SDValue &Chain,
5250                                 const SDLoc &dl) {
5251   SDValue MTCTROps[] = {Chain, Callee, Glue};
5252   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5253   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5254                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5255   // The glue is the second value produced.
5256   Glue = Chain.getValue(1);
5257 }
5258 
5259 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5260                                           SDValue &Glue, SDValue &Chain,
5261                                           SDValue CallSeqStart,
5262                                           const CallBase *CB, const SDLoc &dl,
5263                                           bool hasNest,
5264                                           const PPCSubtarget &Subtarget) {
5265   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5266   // entry point, but to the function descriptor (the function entry point
5267   // address is part of the function descriptor though).
5268   // The function descriptor is a three doubleword structure with the
5269   // following fields: function entry point, TOC base address and
5270   // environment pointer.
5271   // Thus for a call through a function pointer, the following actions need
5272   // to be performed:
5273   //   1. Save the TOC of the caller in the TOC save area of its stack
5274   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5275   //   2. Load the address of the function entry point from the function
5276   //      descriptor.
5277   //   3. Load the TOC of the callee from the function descriptor into r2.
5278   //   4. Load the environment pointer from the function descriptor into
5279   //      r11.
5280   //   5. Branch to the function entry point address.
5281   //   6. On return of the callee, the TOC of the caller needs to be
5282   //      restored (this is done in FinishCall()).
5283   //
5284   // The loads are scheduled at the beginning of the call sequence, and the
5285   // register copies are flagged together to ensure that no other
5286   // operations can be scheduled in between. E.g. without flagging the
5287   // copies together, a TOC access in the caller could be scheduled between
5288   // the assignment of the callee TOC and the branch to the callee, which leads
5289   // to incorrect code.
5290 
5291   // Start by loading the function address from the descriptor.
5292   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5293   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5294                       ? (MachineMemOperand::MODereferenceable |
5295                          MachineMemOperand::MOInvariant)
5296                       : MachineMemOperand::MONone;
5297 
5298   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5299 
5300   // Registers used in building the DAG.
5301   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5302   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5303 
5304   // Offsets of descriptor members.
5305   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5306   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5307 
5308   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5309   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5310 
5311   // One load for the functions entry point address.
5312   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5313                                     Alignment, MMOFlags);
5314 
5315   // One for loading the TOC anchor for the module that contains the called
5316   // function.
5317   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5318   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5319   SDValue TOCPtr =
5320       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5321                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5322 
5323   // One for loading the environment pointer.
5324   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5325   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5326   SDValue LoadEnvPtr =
5327       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5328                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5329 
5330 
5331   // Then copy the newly loaded TOC anchor to the TOC pointer.
5332   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5333   Chain = TOCVal.getValue(0);
5334   Glue = TOCVal.getValue(1);
5335 
5336   // If the function call has an explicit 'nest' parameter, it takes the
5337   // place of the environment pointer.
5338   assert((!hasNest || !Subtarget.isAIXABI()) &&
5339          "Nest parameter is not supported on AIX.");
5340   if (!hasNest) {
5341     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5342     Chain = EnvVal.getValue(0);
5343     Glue = EnvVal.getValue(1);
5344   }
5345 
5346   // The rest of the indirect call sequence is the same as the non-descriptor
5347   // DAG.
5348   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5349 }
5350 
5351 static void
5352 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5353                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5354                   SelectionDAG &DAG,
5355                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5356                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5357                   const PPCSubtarget &Subtarget) {
5358   const bool IsPPC64 = Subtarget.isPPC64();
5359   // MVT for a general purpose register.
5360   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5361 
5362   // First operand is always the chain.
5363   Ops.push_back(Chain);
5364 
5365   // If it's a direct call pass the callee as the second operand.
5366   if (!CFlags.IsIndirect)
5367     Ops.push_back(Callee);
5368   else {
5369     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5370 
5371     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5372     // on the stack (this would have been done in `LowerCall_64SVR4` or
5373     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5374     // represents both the indirect branch and a load that restores the TOC
5375     // pointer from the linkage area. The operand for the TOC restore is an add
5376     // of the TOC save offset to the stack pointer. This must be the second
5377     // operand: after the chain input but before any other variadic arguments.
5378     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5379     // saved or used.
5380     if (isTOCSaveRestoreRequired(Subtarget)) {
5381       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5382 
5383       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5384       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5385       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5386       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5387       Ops.push_back(AddTOC);
5388     }
5389 
5390     // Add the register used for the environment pointer.
5391     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5392       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5393                                     RegVT));
5394 
5395 
5396     // Add CTR register as callee so a bctr can be emitted later.
5397     if (CFlags.IsTailCall)
5398       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5399   }
5400 
5401   // If this is a tail call add stack pointer delta.
5402   if (CFlags.IsTailCall)
5403     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5404 
5405   // Add argument registers to the end of the list so that they are known live
5406   // into the call.
5407   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5408     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5409                                   RegsToPass[i].second.getValueType()));
5410 
5411   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5412   // no way to mark dependencies as implicit here.
5413   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5414   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5415        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5416     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5417 
5418   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5419   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5420     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5421 
5422   // Add a register mask operand representing the call-preserved registers.
5423   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5424   const uint32_t *Mask =
5425       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5426   assert(Mask && "Missing call preserved mask for calling convention");
5427   Ops.push_back(DAG.getRegisterMask(Mask));
5428 
5429   // If the glue is valid, it is the last operand.
5430   if (Glue.getNode())
5431     Ops.push_back(Glue);
5432 }
5433 
5434 SDValue PPCTargetLowering::FinishCall(
5435     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5436     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5437     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5438     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5439     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5440 
5441   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5442       Subtarget.isAIXABI())
5443     setUsesTOCBasePtr(DAG);
5444 
5445   unsigned CallOpc =
5446       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5447                     Subtarget, DAG.getTarget());
5448 
5449   if (!CFlags.IsIndirect)
5450     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5451   else if (Subtarget.usesFunctionDescriptors())
5452     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5453                                   dl, CFlags.HasNest, Subtarget);
5454   else
5455     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5456 
5457   // Build the operand list for the call instruction.
5458   SmallVector<SDValue, 8> Ops;
5459   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5460                     SPDiff, Subtarget);
5461 
5462   // Emit tail call.
5463   if (CFlags.IsTailCall) {
5464     // Indirect tail call when using PC Relative calls do not have the same
5465     // constraints.
5466     assert(((Callee.getOpcode() == ISD::Register &&
5467              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5468             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5469             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5470             isa<ConstantSDNode>(Callee) ||
5471             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5472            "Expecting a global address, external symbol, absolute value, "
5473            "register or an indirect tail call when PC Relative calls are "
5474            "used.");
5475     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5476     assert(CallOpc == PPCISD::TC_RETURN &&
5477            "Unexpected call opcode for a tail call.");
5478     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5479     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5480   }
5481 
5482   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5483   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5484   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5485   Glue = Chain.getValue(1);
5486 
5487   // When performing tail call optimization the callee pops its arguments off
5488   // the stack. Account for this here so these bytes can be pushed back on in
5489   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5490   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5491                          getTargetMachine().Options.GuaranteedTailCallOpt)
5492                             ? NumBytes
5493                             : 0;
5494 
5495   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5496                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5497                              Glue, dl);
5498   Glue = Chain.getValue(1);
5499 
5500   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5501                          DAG, InVals);
5502 }
5503 
5504 SDValue
5505 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5506                              SmallVectorImpl<SDValue> &InVals) const {
5507   SelectionDAG &DAG                     = CLI.DAG;
5508   SDLoc &dl                             = CLI.DL;
5509   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5510   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5511   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5512   SDValue Chain                         = CLI.Chain;
5513   SDValue Callee                        = CLI.Callee;
5514   bool &isTailCall                      = CLI.IsTailCall;
5515   CallingConv::ID CallConv              = CLI.CallConv;
5516   bool isVarArg                         = CLI.IsVarArg;
5517   bool isPatchPoint                     = CLI.IsPatchPoint;
5518   const CallBase *CB                    = CLI.CB;
5519 
5520   if (isTailCall) {
5521     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5522       isTailCall = false;
5523     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5524       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5525           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5526     else
5527       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5528                                                      Ins, DAG);
5529     if (isTailCall) {
5530       ++NumTailCalls;
5531       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5532         ++NumSiblingCalls;
5533 
5534       // PC Relative calls no longer guarantee that the callee is a Global
5535       // Address Node. The callee could be an indirect tail call in which
5536       // case the SDValue for the callee could be a load (to load the address
5537       // of a function pointer) or it may be a register copy (to move the
5538       // address of the callee from a function parameter into a virtual
5539       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5540       assert((Subtarget.isUsingPCRelativeCalls() ||
5541               isa<GlobalAddressSDNode>(Callee)) &&
5542              "Callee should be an llvm::Function object.");
5543 
5544       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5545                         << "\nTCO callee: ");
5546       LLVM_DEBUG(Callee.dump());
5547     }
5548   }
5549 
5550   if (!isTailCall && CB && CB->isMustTailCall())
5551     report_fatal_error("failed to perform tail call elimination on a call "
5552                        "site marked musttail");
5553 
5554   // When long calls (i.e. indirect calls) are always used, calls are always
5555   // made via function pointer. If we have a function name, first translate it
5556   // into a pointer.
5557   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5558       !isTailCall)
5559     Callee = LowerGlobalAddress(Callee, DAG);
5560 
5561   CallFlags CFlags(
5562       CallConv, isTailCall, isVarArg, isPatchPoint,
5563       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5564       // hasNest
5565       Subtarget.is64BitELFABI() &&
5566           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5567       CLI.NoMerge);
5568 
5569   if (Subtarget.isAIXABI())
5570     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5571                          InVals, CB);
5572 
5573   assert(Subtarget.isSVR4ABI());
5574   if (Subtarget.isPPC64())
5575     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5576                             InVals, CB);
5577   return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5578                           InVals, CB);
5579 }
5580 
5581 SDValue PPCTargetLowering::LowerCall_32SVR4(
5582     SDValue Chain, SDValue Callee, CallFlags CFlags,
5583     const SmallVectorImpl<ISD::OutputArg> &Outs,
5584     const SmallVectorImpl<SDValue> &OutVals,
5585     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5586     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5587     const CallBase *CB) const {
5588   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5589   // of the 32-bit SVR4 ABI stack frame layout.
5590 
5591   const CallingConv::ID CallConv = CFlags.CallConv;
5592   const bool IsVarArg = CFlags.IsVarArg;
5593   const bool IsTailCall = CFlags.IsTailCall;
5594 
5595   assert((CallConv == CallingConv::C ||
5596           CallConv == CallingConv::Cold ||
5597           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5598 
5599   const Align PtrAlign(4);
5600 
5601   MachineFunction &MF = DAG.getMachineFunction();
5602 
5603   // Mark this function as potentially containing a function that contains a
5604   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5605   // and restoring the callers stack pointer in this functions epilog. This is
5606   // done because by tail calling the called function might overwrite the value
5607   // in this function's (MF) stack pointer stack slot 0(SP).
5608   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5609       CallConv == CallingConv::Fast)
5610     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5611 
5612   // Count how many bytes are to be pushed on the stack, including the linkage
5613   // area, parameter list area and the part of the local variable space which
5614   // contains copies of aggregates which are passed by value.
5615 
5616   // Assign locations to all of the outgoing arguments.
5617   SmallVector<CCValAssign, 16> ArgLocs;
5618   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5619 
5620   // Reserve space for the linkage area on the stack.
5621   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5622                        PtrAlign);
5623   if (useSoftFloat())
5624     CCInfo.PreAnalyzeCallOperands(Outs);
5625 
5626   if (IsVarArg) {
5627     // Handle fixed and variable vector arguments differently.
5628     // Fixed vector arguments go into registers as long as registers are
5629     // available. Variable vector arguments always go into memory.
5630     unsigned NumArgs = Outs.size();
5631 
5632     for (unsigned i = 0; i != NumArgs; ++i) {
5633       MVT ArgVT = Outs[i].VT;
5634       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5635       bool Result;
5636 
5637       if (Outs[i].IsFixed) {
5638         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5639                                CCInfo);
5640       } else {
5641         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5642                                       ArgFlags, CCInfo);
5643       }
5644 
5645       if (Result) {
5646 #ifndef NDEBUG
5647         errs() << "Call operand #" << i << " has unhandled type "
5648              << EVT(ArgVT).getEVTString() << "\n";
5649 #endif
5650         llvm_unreachable(nullptr);
5651       }
5652     }
5653   } else {
5654     // All arguments are treated the same.
5655     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5656   }
5657   CCInfo.clearWasPPCF128();
5658 
5659   // Assign locations to all of the outgoing aggregate by value arguments.
5660   SmallVector<CCValAssign, 16> ByValArgLocs;
5661   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5662 
5663   // Reserve stack space for the allocations in CCInfo.
5664   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5665 
5666   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5667 
5668   // Size of the linkage area, parameter list area and the part of the local
5669   // space variable where copies of aggregates which are passed by value are
5670   // stored.
5671   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5672 
5673   // Calculate by how many bytes the stack has to be adjusted in case of tail
5674   // call optimization.
5675   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5676 
5677   // Adjust the stack pointer for the new arguments...
5678   // These operations are automatically eliminated by the prolog/epilog pass
5679   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5680   SDValue CallSeqStart = Chain;
5681 
5682   // Load the return address and frame pointer so it can be moved somewhere else
5683   // later.
5684   SDValue LROp, FPOp;
5685   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5686 
5687   // Set up a copy of the stack pointer for use loading and storing any
5688   // arguments that may not fit in the registers available for argument
5689   // passing.
5690   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5691 
5692   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5693   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5694   SmallVector<SDValue, 8> MemOpChains;
5695 
5696   bool seenFloatArg = false;
5697   // Walk the register/memloc assignments, inserting copies/loads.
5698   // i - Tracks the index into the list of registers allocated for the call
5699   // RealArgIdx - Tracks the index into the list of actual function arguments
5700   // j - Tracks the index into the list of byval arguments
5701   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5702        i != e;
5703        ++i, ++RealArgIdx) {
5704     CCValAssign &VA = ArgLocs[i];
5705     SDValue Arg = OutVals[RealArgIdx];
5706     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5707 
5708     if (Flags.isByVal()) {
5709       // Argument is an aggregate which is passed by value, thus we need to
5710       // create a copy of it in the local variable space of the current stack
5711       // frame (which is the stack frame of the caller) and pass the address of
5712       // this copy to the callee.
5713       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5714       CCValAssign &ByValVA = ByValArgLocs[j++];
5715       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5716 
5717       // Memory reserved in the local variable space of the callers stack frame.
5718       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5719 
5720       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5721       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5722                            StackPtr, PtrOff);
5723 
5724       // Create a copy of the argument in the local area of the current
5725       // stack frame.
5726       SDValue MemcpyCall =
5727         CreateCopyOfByValArgument(Arg, PtrOff,
5728                                   CallSeqStart.getNode()->getOperand(0),
5729                                   Flags, DAG, dl);
5730 
5731       // This must go outside the CALLSEQ_START..END.
5732       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5733                                                      SDLoc(MemcpyCall));
5734       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5735                              NewCallSeqStart.getNode());
5736       Chain = CallSeqStart = NewCallSeqStart;
5737 
5738       // Pass the address of the aggregate copy on the stack either in a
5739       // physical register or in the parameter list area of the current stack
5740       // frame to the callee.
5741       Arg = PtrOff;
5742     }
5743 
5744     // When useCRBits() is true, there can be i1 arguments.
5745     // It is because getRegisterType(MVT::i1) => MVT::i1,
5746     // and for other integer types getRegisterType() => MVT::i32.
5747     // Extend i1 and ensure callee will get i32.
5748     if (Arg.getValueType() == MVT::i1)
5749       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5750                         dl, MVT::i32, Arg);
5751 
5752     if (VA.isRegLoc()) {
5753       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5754       // Put argument in a physical register.
5755       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5756         bool IsLE = Subtarget.isLittleEndian();
5757         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5758                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5759         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5760         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5761                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5762         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5763                              SVal.getValue(0)));
5764       } else
5765         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5766     } else {
5767       // Put argument in the parameter list area of the current stack frame.
5768       assert(VA.isMemLoc());
5769       unsigned LocMemOffset = VA.getLocMemOffset();
5770 
5771       if (!IsTailCall) {
5772         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5773         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5774                              StackPtr, PtrOff);
5775 
5776         MemOpChains.push_back(
5777             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5778       } else {
5779         // Calculate and remember argument location.
5780         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5781                                  TailCallArguments);
5782       }
5783     }
5784   }
5785 
5786   if (!MemOpChains.empty())
5787     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5788 
5789   // Build a sequence of copy-to-reg nodes chained together with token chain
5790   // and flag operands which copy the outgoing args into the appropriate regs.
5791   SDValue InFlag;
5792   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5793     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5794                              RegsToPass[i].second, InFlag);
5795     InFlag = Chain.getValue(1);
5796   }
5797 
5798   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5799   // registers.
5800   if (IsVarArg) {
5801     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5802     SDValue Ops[] = { Chain, InFlag };
5803 
5804     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5805                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5806 
5807     InFlag = Chain.getValue(1);
5808   }
5809 
5810   if (IsTailCall)
5811     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5812                     TailCallArguments);
5813 
5814   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5815                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5816 }
5817 
5818 // Copy an argument into memory, being careful to do this outside the
5819 // call sequence for the call to which the argument belongs.
5820 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5821     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5822     SelectionDAG &DAG, const SDLoc &dl) const {
5823   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5824                         CallSeqStart.getNode()->getOperand(0),
5825                         Flags, DAG, dl);
5826   // The MEMCPY must go outside the CALLSEQ_START..END.
5827   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5828   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5829                                                  SDLoc(MemcpyCall));
5830   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5831                          NewCallSeqStart.getNode());
5832   return NewCallSeqStart;
5833 }
5834 
5835 SDValue PPCTargetLowering::LowerCall_64SVR4(
5836     SDValue Chain, SDValue Callee, CallFlags CFlags,
5837     const SmallVectorImpl<ISD::OutputArg> &Outs,
5838     const SmallVectorImpl<SDValue> &OutVals,
5839     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5840     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5841     const CallBase *CB) const {
5842   bool isELFv2ABI = Subtarget.isELFv2ABI();
5843   bool isLittleEndian = Subtarget.isLittleEndian();
5844   unsigned NumOps = Outs.size();
5845   bool IsSibCall = false;
5846   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5847 
5848   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5849   unsigned PtrByteSize = 8;
5850 
5851   MachineFunction &MF = DAG.getMachineFunction();
5852 
5853   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5854     IsSibCall = true;
5855 
5856   // Mark this function as potentially containing a function that contains a
5857   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5858   // and restoring the callers stack pointer in this functions epilog. This is
5859   // done because by tail calling the called function might overwrite the value
5860   // in this function's (MF) stack pointer stack slot 0(SP).
5861   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5862     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5863 
5864   assert(!(IsFastCall && CFlags.IsVarArg) &&
5865          "fastcc not supported on varargs functions");
5866 
5867   // Count how many bytes are to be pushed on the stack, including the linkage
5868   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5869   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5870   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5871   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5872   unsigned NumBytes = LinkageSize;
5873   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5874 
5875   static const MCPhysReg GPR[] = {
5876     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5877     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5878   };
5879   static const MCPhysReg VR[] = {
5880     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5881     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5882   };
5883 
5884   const unsigned NumGPRs = array_lengthof(GPR);
5885   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5886   const unsigned NumVRs  = array_lengthof(VR);
5887 
5888   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5889   // can be passed to the callee in registers.
5890   // For the fast calling convention, there is another check below.
5891   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5892   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5893   if (!HasParameterArea) {
5894     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5895     unsigned AvailableFPRs = NumFPRs;
5896     unsigned AvailableVRs = NumVRs;
5897     unsigned NumBytesTmp = NumBytes;
5898     for (unsigned i = 0; i != NumOps; ++i) {
5899       if (Outs[i].Flags.isNest()) continue;
5900       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5901                                  PtrByteSize, LinkageSize, ParamAreaSize,
5902                                  NumBytesTmp, AvailableFPRs, AvailableVRs))
5903         HasParameterArea = true;
5904     }
5905   }
5906 
5907   // When using the fast calling convention, we don't provide backing for
5908   // arguments that will be in registers.
5909   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5910 
5911   // Avoid allocating parameter area for fastcc functions if all the arguments
5912   // can be passed in the registers.
5913   if (IsFastCall)
5914     HasParameterArea = false;
5915 
5916   // Add up all the space actually used.
5917   for (unsigned i = 0; i != NumOps; ++i) {
5918     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5919     EVT ArgVT = Outs[i].VT;
5920     EVT OrigVT = Outs[i].ArgVT;
5921 
5922     if (Flags.isNest())
5923       continue;
5924 
5925     if (IsFastCall) {
5926       if (Flags.isByVal()) {
5927         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5928         if (NumGPRsUsed > NumGPRs)
5929           HasParameterArea = true;
5930       } else {
5931         switch (ArgVT.getSimpleVT().SimpleTy) {
5932         default: llvm_unreachable("Unexpected ValueType for argument!");
5933         case MVT::i1:
5934         case MVT::i32:
5935         case MVT::i64:
5936           if (++NumGPRsUsed <= NumGPRs)
5937             continue;
5938           break;
5939         case MVT::v4i32:
5940         case MVT::v8i16:
5941         case MVT::v16i8:
5942         case MVT::v2f64:
5943         case MVT::v2i64:
5944         case MVT::v1i128:
5945         case MVT::f128:
5946           if (++NumVRsUsed <= NumVRs)
5947             continue;
5948           break;
5949         case MVT::v4f32:
5950           if (++NumVRsUsed <= NumVRs)
5951             continue;
5952           break;
5953         case MVT::f32:
5954         case MVT::f64:
5955           if (++NumFPRsUsed <= NumFPRs)
5956             continue;
5957           break;
5958         }
5959         HasParameterArea = true;
5960       }
5961     }
5962 
5963     /* Respect alignment of argument on the stack.  */
5964     auto Alignement =
5965         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5966     NumBytes = alignTo(NumBytes, Alignement);
5967 
5968     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5969     if (Flags.isInConsecutiveRegsLast())
5970       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5971   }
5972 
5973   unsigned NumBytesActuallyUsed = NumBytes;
5974 
5975   // In the old ELFv1 ABI,
5976   // the prolog code of the callee may store up to 8 GPR argument registers to
5977   // the stack, allowing va_start to index over them in memory if its varargs.
5978   // Because we cannot tell if this is needed on the caller side, we have to
5979   // conservatively assume that it is needed.  As such, make sure we have at
5980   // least enough stack space for the caller to store the 8 GPRs.
5981   // In the ELFv2 ABI, we allocate the parameter area iff a callee
5982   // really requires memory operands, e.g. a vararg function.
5983   if (HasParameterArea)
5984     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5985   else
5986     NumBytes = LinkageSize;
5987 
5988   // Tail call needs the stack to be aligned.
5989   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5990     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5991 
5992   int SPDiff = 0;
5993 
5994   // Calculate by how many bytes the stack has to be adjusted in case of tail
5995   // call optimization.
5996   if (!IsSibCall)
5997     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
5998 
5999   // To protect arguments on the stack from being clobbered in a tail call,
6000   // force all the loads to happen before doing any other lowering.
6001   if (CFlags.IsTailCall)
6002     Chain = DAG.getStackArgumentTokenFactor(Chain);
6003 
6004   // Adjust the stack pointer for the new arguments...
6005   // These operations are automatically eliminated by the prolog/epilog pass
6006   if (!IsSibCall)
6007     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6008   SDValue CallSeqStart = Chain;
6009 
6010   // Load the return address and frame pointer so it can be move somewhere else
6011   // later.
6012   SDValue LROp, FPOp;
6013   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6014 
6015   // Set up a copy of the stack pointer for use loading and storing any
6016   // arguments that may not fit in the registers available for argument
6017   // passing.
6018   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6019 
6020   // Figure out which arguments are going to go in registers, and which in
6021   // memory.  Also, if this is a vararg function, floating point operations
6022   // must be stored to our stack, and loaded into integer regs as well, if
6023   // any integer regs are available for argument passing.
6024   unsigned ArgOffset = LinkageSize;
6025 
6026   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6027   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6028 
6029   SmallVector<SDValue, 8> MemOpChains;
6030   for (unsigned i = 0; i != NumOps; ++i) {
6031     SDValue Arg = OutVals[i];
6032     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6033     EVT ArgVT = Outs[i].VT;
6034     EVT OrigVT = Outs[i].ArgVT;
6035 
6036     // PtrOff will be used to store the current argument to the stack if a
6037     // register cannot be found for it.
6038     SDValue PtrOff;
6039 
6040     // We re-align the argument offset for each argument, except when using the
6041     // fast calling convention, when we need to make sure we do that only when
6042     // we'll actually use a stack slot.
6043     auto ComputePtrOff = [&]() {
6044       /* Respect alignment of argument on the stack.  */
6045       auto Alignment =
6046           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6047       ArgOffset = alignTo(ArgOffset, Alignment);
6048 
6049       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6050 
6051       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6052     };
6053 
6054     if (!IsFastCall) {
6055       ComputePtrOff();
6056 
6057       /* Compute GPR index associated with argument offset.  */
6058       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6059       GPR_idx = std::min(GPR_idx, NumGPRs);
6060     }
6061 
6062     // Promote integers to 64-bit values.
6063     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6064       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6065       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6066       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6067     }
6068 
6069     // FIXME memcpy is used way more than necessary.  Correctness first.
6070     // Note: "by value" is code for passing a structure by value, not
6071     // basic types.
6072     if (Flags.isByVal()) {
6073       // Note: Size includes alignment padding, so
6074       //   struct x { short a; char b; }
6075       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6076       // These are the proper values we need for right-justifying the
6077       // aggregate in a parameter register.
6078       unsigned Size = Flags.getByValSize();
6079 
6080       // An empty aggregate parameter takes up no storage and no
6081       // registers.
6082       if (Size == 0)
6083         continue;
6084 
6085       if (IsFastCall)
6086         ComputePtrOff();
6087 
6088       // All aggregates smaller than 8 bytes must be passed right-justified.
6089       if (Size==1 || Size==2 || Size==4) {
6090         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6091         if (GPR_idx != NumGPRs) {
6092           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6093                                         MachinePointerInfo(), VT);
6094           MemOpChains.push_back(Load.getValue(1));
6095           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6096 
6097           ArgOffset += PtrByteSize;
6098           continue;
6099         }
6100       }
6101 
6102       if (GPR_idx == NumGPRs && Size < 8) {
6103         SDValue AddPtr = PtrOff;
6104         if (!isLittleEndian) {
6105           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6106                                           PtrOff.getValueType());
6107           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6108         }
6109         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6110                                                           CallSeqStart,
6111                                                           Flags, DAG, dl);
6112         ArgOffset += PtrByteSize;
6113         continue;
6114       }
6115       // Copy entire object into memory.  There are cases where gcc-generated
6116       // code assumes it is there, even if it could be put entirely into
6117       // registers.  (This is not what the doc says.)
6118 
6119       // FIXME: The above statement is likely due to a misunderstanding of the
6120       // documents.  All arguments must be copied into the parameter area BY
6121       // THE CALLEE in the event that the callee takes the address of any
6122       // formal argument.  That has not yet been implemented.  However, it is
6123       // reasonable to use the stack area as a staging area for the register
6124       // load.
6125 
6126       // Skip this for small aggregates, as we will use the same slot for a
6127       // right-justified copy, below.
6128       if (Size >= 8)
6129         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6130                                                           CallSeqStart,
6131                                                           Flags, DAG, dl);
6132 
6133       // When a register is available, pass a small aggregate right-justified.
6134       if (Size < 8 && GPR_idx != NumGPRs) {
6135         // The easiest way to get this right-justified in a register
6136         // is to copy the structure into the rightmost portion of a
6137         // local variable slot, then load the whole slot into the
6138         // register.
6139         // FIXME: The memcpy seems to produce pretty awful code for
6140         // small aggregates, particularly for packed ones.
6141         // FIXME: It would be preferable to use the slot in the
6142         // parameter save area instead of a new local variable.
6143         SDValue AddPtr = PtrOff;
6144         if (!isLittleEndian) {
6145           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6146           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6147         }
6148         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6149                                                           CallSeqStart,
6150                                                           Flags, DAG, dl);
6151 
6152         // Load the slot into the register.
6153         SDValue Load =
6154             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6155         MemOpChains.push_back(Load.getValue(1));
6156         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6157 
6158         // Done with this argument.
6159         ArgOffset += PtrByteSize;
6160         continue;
6161       }
6162 
6163       // For aggregates larger than PtrByteSize, copy the pieces of the
6164       // object that fit into registers from the parameter save area.
6165       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6166         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6167         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6168         if (GPR_idx != NumGPRs) {
6169           SDValue Load =
6170               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6171           MemOpChains.push_back(Load.getValue(1));
6172           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6173           ArgOffset += PtrByteSize;
6174         } else {
6175           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6176           break;
6177         }
6178       }
6179       continue;
6180     }
6181 
6182     switch (Arg.getSimpleValueType().SimpleTy) {
6183     default: llvm_unreachable("Unexpected ValueType for argument!");
6184     case MVT::i1:
6185     case MVT::i32:
6186     case MVT::i64:
6187       if (Flags.isNest()) {
6188         // The 'nest' parameter, if any, is passed in R11.
6189         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6190         break;
6191       }
6192 
6193       // These can be scalar arguments or elements of an integer array type
6194       // passed directly.  Clang may use those instead of "byval" aggregate
6195       // types to avoid forcing arguments to memory unnecessarily.
6196       if (GPR_idx != NumGPRs) {
6197         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6198       } else {
6199         if (IsFastCall)
6200           ComputePtrOff();
6201 
6202         assert(HasParameterArea &&
6203                "Parameter area must exist to pass an argument in memory.");
6204         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6205                          true, CFlags.IsTailCall, false, MemOpChains,
6206                          TailCallArguments, dl);
6207         if (IsFastCall)
6208           ArgOffset += PtrByteSize;
6209       }
6210       if (!IsFastCall)
6211         ArgOffset += PtrByteSize;
6212       break;
6213     case MVT::f32:
6214     case MVT::f64: {
6215       // These can be scalar arguments or elements of a float array type
6216       // passed directly.  The latter are used to implement ELFv2 homogenous
6217       // float aggregates.
6218 
6219       // Named arguments go into FPRs first, and once they overflow, the
6220       // remaining arguments go into GPRs and then the parameter save area.
6221       // Unnamed arguments for vararg functions always go to GPRs and
6222       // then the parameter save area.  For now, put all arguments to vararg
6223       // routines always in both locations (FPR *and* GPR or stack slot).
6224       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6225       bool NeededLoad = false;
6226 
6227       // First load the argument into the next available FPR.
6228       if (FPR_idx != NumFPRs)
6229         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6230 
6231       // Next, load the argument into GPR or stack slot if needed.
6232       if (!NeedGPROrStack)
6233         ;
6234       else if (GPR_idx != NumGPRs && !IsFastCall) {
6235         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6236         // once we support fp <-> gpr moves.
6237 
6238         // In the non-vararg case, this can only ever happen in the
6239         // presence of f32 array types, since otherwise we never run
6240         // out of FPRs before running out of GPRs.
6241         SDValue ArgVal;
6242 
6243         // Double values are always passed in a single GPR.
6244         if (Arg.getValueType() != MVT::f32) {
6245           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6246 
6247         // Non-array float values are extended and passed in a GPR.
6248         } else if (!Flags.isInConsecutiveRegs()) {
6249           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6250           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6251 
6252         // If we have an array of floats, we collect every odd element
6253         // together with its predecessor into one GPR.
6254         } else if (ArgOffset % PtrByteSize != 0) {
6255           SDValue Lo, Hi;
6256           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6257           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6258           if (!isLittleEndian)
6259             std::swap(Lo, Hi);
6260           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6261 
6262         // The final element, if even, goes into the first half of a GPR.
6263         } else if (Flags.isInConsecutiveRegsLast()) {
6264           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6265           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6266           if (!isLittleEndian)
6267             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6268                                  DAG.getConstant(32, dl, MVT::i32));
6269 
6270         // Non-final even elements are skipped; they will be handled
6271         // together the with subsequent argument on the next go-around.
6272         } else
6273           ArgVal = SDValue();
6274 
6275         if (ArgVal.getNode())
6276           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6277       } else {
6278         if (IsFastCall)
6279           ComputePtrOff();
6280 
6281         // Single-precision floating-point values are mapped to the
6282         // second (rightmost) word of the stack doubleword.
6283         if (Arg.getValueType() == MVT::f32 &&
6284             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6285           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6286           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6287         }
6288 
6289         assert(HasParameterArea &&
6290                "Parameter area must exist to pass an argument in memory.");
6291         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6292                          true, CFlags.IsTailCall, false, MemOpChains,
6293                          TailCallArguments, dl);
6294 
6295         NeededLoad = true;
6296       }
6297       // When passing an array of floats, the array occupies consecutive
6298       // space in the argument area; only round up to the next doubleword
6299       // at the end of the array.  Otherwise, each float takes 8 bytes.
6300       if (!IsFastCall || NeededLoad) {
6301         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6302                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6303         if (Flags.isInConsecutiveRegsLast())
6304           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6305       }
6306       break;
6307     }
6308     case MVT::v4f32:
6309     case MVT::v4i32:
6310     case MVT::v8i16:
6311     case MVT::v16i8:
6312     case MVT::v2f64:
6313     case MVT::v2i64:
6314     case MVT::v1i128:
6315     case MVT::f128:
6316       // These can be scalar arguments or elements of a vector array type
6317       // passed directly.  The latter are used to implement ELFv2 homogenous
6318       // vector aggregates.
6319 
6320       // For a varargs call, named arguments go into VRs or on the stack as
6321       // usual; unnamed arguments always go to the stack or the corresponding
6322       // GPRs when within range.  For now, we always put the value in both
6323       // locations (or even all three).
6324       if (CFlags.IsVarArg) {
6325         assert(HasParameterArea &&
6326                "Parameter area must exist if we have a varargs call.");
6327         // We could elide this store in the case where the object fits
6328         // entirely in R registers.  Maybe later.
6329         SDValue Store =
6330             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6331         MemOpChains.push_back(Store);
6332         if (VR_idx != NumVRs) {
6333           SDValue Load =
6334               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6335           MemOpChains.push_back(Load.getValue(1));
6336           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6337         }
6338         ArgOffset += 16;
6339         for (unsigned i=0; i<16; i+=PtrByteSize) {
6340           if (GPR_idx == NumGPRs)
6341             break;
6342           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6343                                    DAG.getConstant(i, dl, PtrVT));
6344           SDValue Load =
6345               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6346           MemOpChains.push_back(Load.getValue(1));
6347           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6348         }
6349         break;
6350       }
6351 
6352       // Non-varargs Altivec params go into VRs or on the stack.
6353       if (VR_idx != NumVRs) {
6354         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6355       } else {
6356         if (IsFastCall)
6357           ComputePtrOff();
6358 
6359         assert(HasParameterArea &&
6360                "Parameter area must exist to pass an argument in memory.");
6361         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6362                          true, CFlags.IsTailCall, true, MemOpChains,
6363                          TailCallArguments, dl);
6364         if (IsFastCall)
6365           ArgOffset += 16;
6366       }
6367 
6368       if (!IsFastCall)
6369         ArgOffset += 16;
6370       break;
6371     }
6372   }
6373 
6374   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6375          "mismatch in size of parameter area");
6376   (void)NumBytesActuallyUsed;
6377 
6378   if (!MemOpChains.empty())
6379     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6380 
6381   // Check if this is an indirect call (MTCTR/BCTRL).
6382   // See prepareDescriptorIndirectCall and buildCallOperands for more
6383   // information about calls through function pointers in the 64-bit SVR4 ABI.
6384   if (CFlags.IsIndirect) {
6385     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6386     // caller in the TOC save area.
6387     if (isTOCSaveRestoreRequired(Subtarget)) {
6388       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6389       // Load r2 into a virtual register and store it to the TOC save area.
6390       setUsesTOCBasePtr(DAG);
6391       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6392       // TOC save area offset.
6393       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6394       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6395       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6396       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6397                            MachinePointerInfo::getStack(
6398                                DAG.getMachineFunction(), TOCSaveOffset));
6399     }
6400     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6401     // This does not mean the MTCTR instruction must use R12; it's easier
6402     // to model this as an extra parameter, so do that.
6403     if (isELFv2ABI && !CFlags.IsPatchPoint)
6404       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6405   }
6406 
6407   // Build a sequence of copy-to-reg nodes chained together with token chain
6408   // and flag operands which copy the outgoing args into the appropriate regs.
6409   SDValue InFlag;
6410   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6411     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6412                              RegsToPass[i].second, InFlag);
6413     InFlag = Chain.getValue(1);
6414   }
6415 
6416   if (CFlags.IsTailCall && !IsSibCall)
6417     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6418                     TailCallArguments);
6419 
6420   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6421                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6422 }
6423 
6424 // Returns true when the shadow of a general purpose argument register
6425 // in the parameter save area is aligned to at least 'RequiredAlign'.
6426 static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) {
6427   assert(RequiredAlign.value() <= 16 &&
6428          "Required alignment greater than stack alignment.");
6429   switch (Reg) {
6430   default:
6431     report_fatal_error("called on invalid register.");
6432   case PPC::R5:
6433   case PPC::R9:
6434   case PPC::X3:
6435   case PPC::X5:
6436   case PPC::X7:
6437   case PPC::X9:
6438     // These registers are 16 byte aligned which is the most strict aligment
6439     // we can support.
6440     return true;
6441   case PPC::R3:
6442   case PPC::R7:
6443   case PPC::X4:
6444   case PPC::X6:
6445   case PPC::X8:
6446   case PPC::X10:
6447     // The shadow of these registers in the PSA is 8 byte aligned.
6448     return RequiredAlign <= 8;
6449   case PPC::R4:
6450   case PPC::R6:
6451   case PPC::R8:
6452   case PPC::R10:
6453     return RequiredAlign <= 4;
6454   }
6455 }
6456 
6457 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6458                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6459                    CCState &S) {
6460   AIXCCState &State = static_cast<AIXCCState &>(S);
6461   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6462       State.getMachineFunction().getSubtarget());
6463   const bool IsPPC64 = Subtarget.isPPC64();
6464   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6465   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6466 
6467   if (ValVT == MVT::f128)
6468     report_fatal_error("f128 is unimplemented on AIX.");
6469 
6470   if (ArgFlags.isNest())
6471     report_fatal_error("Nest arguments are unimplemented.");
6472 
6473   static const MCPhysReg GPR_32[] = {// 32-bit registers.
6474                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6475                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6476   static const MCPhysReg GPR_64[] = {// 64-bit registers.
6477                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6478                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6479 
6480   static const MCPhysReg VR[] = {// Vector registers.
6481                                  PPC::V2,  PPC::V3,  PPC::V4,  PPC::V5,
6482                                  PPC::V6,  PPC::V7,  PPC::V8,  PPC::V9,
6483                                  PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6484 
6485   if (ArgFlags.isByVal()) {
6486     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6487       report_fatal_error("Pass-by-value arguments with alignment greater than "
6488                          "register width are not supported.");
6489 
6490     const unsigned ByValSize = ArgFlags.getByValSize();
6491 
6492     // An empty aggregate parameter takes up no storage and no registers,
6493     // but needs a MemLoc for a stack slot for the formal arguments side.
6494     if (ByValSize == 0) {
6495       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6496                                        State.getNextStackOffset(), RegVT,
6497                                        LocInfo));
6498       return false;
6499     }
6500 
6501     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6502     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6503     for (const unsigned E = Offset + StackSize; Offset < E;
6504          Offset += PtrAlign.value()) {
6505       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6506         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6507       else {
6508         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6509                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
6510                                          LocInfo));
6511         break;
6512       }
6513     }
6514     return false;
6515   }
6516 
6517   // Arguments always reserve parameter save area.
6518   switch (ValVT.SimpleTy) {
6519   default:
6520     report_fatal_error("Unhandled value type for argument.");
6521   case MVT::i64:
6522     // i64 arguments should have been split to i32 for PPC32.
6523     assert(IsPPC64 && "PPC32 should have split i64 values.");
6524     LLVM_FALLTHROUGH;
6525   case MVT::i1:
6526   case MVT::i32: {
6527     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6528     // AIX integer arguments are always passed in register width.
6529     if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
6530       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6531                                   : CCValAssign::LocInfo::ZExt;
6532     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6533       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6534     else
6535       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6536 
6537     return false;
6538   }
6539   case MVT::f32:
6540   case MVT::f64: {
6541     // Parameter save area (PSA) is reserved even if the float passes in fpr.
6542     const unsigned StoreSize = LocVT.getStoreSize();
6543     // Floats are always 4-byte aligned in the PSA on AIX.
6544     // This includes f64 in 64-bit mode for ABI compatibility.
6545     const unsigned Offset =
6546         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6547     unsigned FReg = State.AllocateReg(FPR);
6548     if (FReg)
6549       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6550 
6551     // Reserve and initialize GPRs or initialize the PSA as required.
6552     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6553       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6554         assert(FReg && "An FPR should be available when a GPR is reserved.");
6555         if (State.isVarArg()) {
6556           // Successfully reserved GPRs are only initialized for vararg calls.
6557           // Custom handling is required for:
6558           //   f64 in PPC32 needs to be split into 2 GPRs.
6559           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6560           State.addLoc(
6561               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6562         }
6563       } else {
6564         // If there are insufficient GPRs, the PSA needs to be initialized.
6565         // Initialization occurs even if an FPR was initialized for
6566         // compatibility with the AIX XL compiler. The full memory for the
6567         // argument will be initialized even if a prior word is saved in GPR.
6568         // A custom memLoc is used when the argument also passes in FPR so
6569         // that the callee handling can skip over it easily.
6570         State.addLoc(
6571             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6572                                              LocInfo)
6573                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6574         break;
6575       }
6576     }
6577 
6578     return false;
6579   }
6580   case MVT::v4f32:
6581   case MVT::v4i32:
6582   case MVT::v8i16:
6583   case MVT::v16i8:
6584   case MVT::v2i64:
6585   case MVT::v2f64:
6586   case MVT::v1i128: {
6587     const unsigned VecSize = 16;
6588     const Align VecAlign(VecSize);
6589 
6590     if (!State.isVarArg()) {
6591       // If there are vector registers remaining we don't consume any stack
6592       // space.
6593       if (unsigned VReg = State.AllocateReg(VR)) {
6594         State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6595         return false;
6596       }
6597       // Vectors passed on the stack do not shadow GPRs or FPRs even though they
6598       // might be allocated in the portion of the PSA that is shadowed by the
6599       // GPRs.
6600       const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6601       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6602       return false;
6603     }
6604 
6605     const unsigned PtrSize = IsPPC64 ? 8 : 4;
6606     ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
6607 
6608     unsigned NextRegIndex = State.getFirstUnallocated(GPRs);
6609     // Burn any underaligned registers and their shadowed stack space until
6610     // we reach the required alignment.
6611     while (NextRegIndex != GPRs.size() &&
6612            !isGPRShadowAligned(GPRs[NextRegIndex], VecAlign)) {
6613       // Shadow allocate register and its stack shadow.
6614       unsigned Reg = State.AllocateReg(GPRs);
6615       State.AllocateStack(PtrSize, PtrAlign);
6616       assert(Reg && "Allocating register unexpectedly failed.");
6617       (void)Reg;
6618       NextRegIndex = State.getFirstUnallocated(GPRs);
6619     }
6620 
6621     // Vectors that are passed as fixed arguments are handled differently.
6622     // They are passed in VRs if any are available (unlike arguments passed
6623     // through ellipses) and shadow GPRs (unlike arguments to non-vaarg
6624     // functions)
6625     if (State.isFixed(ValNo)) {
6626       if (unsigned VReg = State.AllocateReg(VR)) {
6627         State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6628         // Shadow allocate GPRs and stack space even though we pass in a VR.
6629         for (unsigned I = 0; I != VecSize; I += PtrSize)
6630           State.AllocateReg(GPRs);
6631         State.AllocateStack(VecSize, VecAlign);
6632         return false;
6633       }
6634       // No vector registers remain so pass on the stack.
6635       const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6636       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6637       return false;
6638     }
6639 
6640     // If all GPRS are consumed then we pass the argument fully on the stack.
6641     if (NextRegIndex == GPRs.size()) {
6642       const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6643       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6644       return false;
6645     }
6646 
6647     // Corner case for 32-bit codegen. We have 2 registers to pass the first
6648     // half of the argument, and then need to pass the remaining half on the
6649     // stack.
6650     if (GPRs[NextRegIndex] == PPC::R9) {
6651       const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6652       State.addLoc(
6653           CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6654 
6655       const unsigned FirstReg = State.AllocateReg(PPC::R9);
6656       const unsigned SecondReg = State.AllocateReg(PPC::R10);
6657       assert(FirstReg && SecondReg &&
6658              "Allocating R9 or R10 unexpectedly failed.");
6659       State.addLoc(
6660           CCValAssign::getCustomReg(ValNo, ValVT, FirstReg, RegVT, LocInfo));
6661       State.addLoc(
6662           CCValAssign::getCustomReg(ValNo, ValVT, SecondReg, RegVT, LocInfo));
6663       return false;
6664     }
6665 
6666     // We have enough GPRs to fully pass the vector argument, and we have
6667     // already consumed any underaligned registers. Start with the custom
6668     // MemLoc and then the custom RegLocs.
6669     const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6670     State.addLoc(
6671         CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6672     for (unsigned I = 0; I != VecSize; I += PtrSize) {
6673       const unsigned Reg = State.AllocateReg(GPRs);
6674       assert(Reg && "Failed to allocated register for vararg vector argument");
6675       State.addLoc(
6676           CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6677     }
6678     return false;
6679   }
6680   }
6681   return true;
6682 }
6683 
6684 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6685                                                     bool IsPPC64) {
6686   assert((IsPPC64 || SVT != MVT::i64) &&
6687          "i64 should have been split for 32-bit codegen.");
6688 
6689   switch (SVT) {
6690   default:
6691     report_fatal_error("Unexpected value type for formal argument");
6692   case MVT::i1:
6693   case MVT::i32:
6694   case MVT::i64:
6695     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6696   case MVT::f32:
6697     return &PPC::F4RCRegClass;
6698   case MVT::f64:
6699     return &PPC::F8RCRegClass;
6700   case MVT::v4f32:
6701   case MVT::v4i32:
6702   case MVT::v8i16:
6703   case MVT::v16i8:
6704   case MVT::v2i64:
6705   case MVT::v2f64:
6706   case MVT::v1i128:
6707     return &PPC::VRRCRegClass;
6708   }
6709 }
6710 
6711 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6712                                         SelectionDAG &DAG, SDValue ArgValue,
6713                                         MVT LocVT, const SDLoc &dl) {
6714   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
6715   assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits());
6716 
6717   if (Flags.isSExt())
6718     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6719                            DAG.getValueType(ValVT));
6720   else if (Flags.isZExt())
6721     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6722                            DAG.getValueType(ValVT));
6723 
6724   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
6725 }
6726 
6727 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
6728   const unsigned LASize = FL->getLinkageSize();
6729 
6730   if (PPC::GPRCRegClass.contains(Reg)) {
6731     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
6732            "Reg must be a valid argument register!");
6733     return LASize + 4 * (Reg - PPC::R3);
6734   }
6735 
6736   if (PPC::G8RCRegClass.contains(Reg)) {
6737     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
6738            "Reg must be a valid argument register!");
6739     return LASize + 8 * (Reg - PPC::X3);
6740   }
6741 
6742   llvm_unreachable("Only general purpose registers expected.");
6743 }
6744 
6745 //   AIX ABI Stack Frame Layout:
6746 //
6747 //   Low Memory +--------------------------------------------+
6748 //   SP   +---> | Back chain                                 | ---+
6749 //        |     +--------------------------------------------+    |
6750 //        |     | Saved Condition Register                   |    |
6751 //        |     +--------------------------------------------+    |
6752 //        |     | Saved Linkage Register                     |    |
6753 //        |     +--------------------------------------------+    | Linkage Area
6754 //        |     | Reserved for compilers                     |    |
6755 //        |     +--------------------------------------------+    |
6756 //        |     | Reserved for binders                       |    |
6757 //        |     +--------------------------------------------+    |
6758 //        |     | Saved TOC pointer                          | ---+
6759 //        |     +--------------------------------------------+
6760 //        |     | Parameter save area                        |
6761 //        |     +--------------------------------------------+
6762 //        |     | Alloca space                               |
6763 //        |     +--------------------------------------------+
6764 //        |     | Local variable space                       |
6765 //        |     +--------------------------------------------+
6766 //        |     | Float/int conversion temporary             |
6767 //        |     +--------------------------------------------+
6768 //        |     | Save area for AltiVec registers            |
6769 //        |     +--------------------------------------------+
6770 //        |     | AltiVec alignment padding                  |
6771 //        |     +--------------------------------------------+
6772 //        |     | Save area for VRSAVE register              |
6773 //        |     +--------------------------------------------+
6774 //        |     | Save area for General Purpose registers    |
6775 //        |     +--------------------------------------------+
6776 //        |     | Save area for Floating Point registers     |
6777 //        |     +--------------------------------------------+
6778 //        +---- | Back chain                                 |
6779 // High Memory  +--------------------------------------------+
6780 //
6781 //  Specifications:
6782 //  AIX 7.2 Assembler Language Reference
6783 //  Subroutine linkage convention
6784 
6785 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
6786     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
6787     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6788     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
6789 
6790   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
6791           CallConv == CallingConv::Fast) &&
6792          "Unexpected calling convention!");
6793 
6794   if (getTargetMachine().Options.GuaranteedTailCallOpt)
6795     report_fatal_error("Tail call support is unimplemented on AIX.");
6796 
6797   if (useSoftFloat())
6798     report_fatal_error("Soft float support is unimplemented on AIX.");
6799 
6800   const PPCSubtarget &Subtarget =
6801       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
6802 
6803   const bool IsPPC64 = Subtarget.isPPC64();
6804   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6805 
6806   // Assign locations to all of the incoming arguments.
6807   SmallVector<CCValAssign, 16> ArgLocs;
6808   MachineFunction &MF = DAG.getMachineFunction();
6809   MachineFrameInfo &MFI = MF.getFrameInfo();
6810   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
6811   AIXCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
6812 
6813   const EVT PtrVT = getPointerTy(MF.getDataLayout());
6814   // Reserve space for the linkage area on the stack.
6815   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6816   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
6817   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
6818 
6819   SmallVector<SDValue, 8> MemOps;
6820 
6821   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
6822     CCValAssign &VA = ArgLocs[I++];
6823     MVT LocVT = VA.getLocVT();
6824     MVT ValVT = VA.getValVT();
6825     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
6826     // For compatibility with the AIX XL compiler, the float args in the
6827     // parameter save area are initialized even if the argument is available
6828     // in register.  The caller is required to initialize both the register
6829     // and memory, however, the callee can choose to expect it in either.
6830     // The memloc is dismissed here because the argument is retrieved from
6831     // the register.
6832     if (VA.isMemLoc() && VA.needsCustom() && ValVT.isFloatingPoint())
6833       continue;
6834 
6835     auto HandleMemLoc = [&]() {
6836       const unsigned LocSize = LocVT.getStoreSize();
6837       const unsigned ValSize = ValVT.getStoreSize();
6838       assert((ValSize <= LocSize) &&
6839              "Object size is larger than size of MemLoc");
6840       int CurArgOffset = VA.getLocMemOffset();
6841       // Objects are right-justified because AIX is big-endian.
6842       if (LocSize > ValSize)
6843         CurArgOffset += LocSize - ValSize;
6844       // Potential tail calls could cause overwriting of argument stack slots.
6845       const bool IsImmutable =
6846           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
6847             (CallConv == CallingConv::Fast));
6848       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
6849       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6850       SDValue ArgValue =
6851           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
6852       InVals.push_back(ArgValue);
6853     };
6854 
6855     // Vector arguments to VaArg functions are passed both on the stack, and
6856     // in any available GPRs. Load the value from the stack and add the GPRs
6857     // as live ins.
6858     if (VA.isMemLoc() && VA.needsCustom()) {
6859       assert(ValVT.isVector() && "Unexpected Custom MemLoc type.");
6860       assert(isVarArg && "Only use custom memloc for vararg.");
6861       // ValNo of the custom MemLoc, so we can compare it to the ValNo of the
6862       // matching custom RegLocs.
6863       const unsigned OriginalValNo = VA.getValNo();
6864       (void)OriginalValNo;
6865 
6866       auto HandleCustomVecRegLoc = [&]() {
6867         assert(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
6868                "Missing custom RegLoc.");
6869         VA = ArgLocs[I++];
6870         assert(VA.getValVT().isVector() &&
6871                "Unexpected Val type for custom RegLoc.");
6872         assert(VA.getValNo() == OriginalValNo &&
6873                "ValNo mismatch between custom MemLoc and RegLoc.");
6874         MVT::SimpleValueType SVT = VA.getLocVT().SimpleTy;
6875         MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
6876       };
6877 
6878       HandleMemLoc();
6879       // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
6880       // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
6881       // R10.
6882       HandleCustomVecRegLoc();
6883       HandleCustomVecRegLoc();
6884 
6885       // If we are targeting 32-bit, there might be 2 extra custom RegLocs if
6886       // we passed the vector in R5, R6, R7 and R8.
6887       if (I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom()) {
6888         assert(!IsPPC64 &&
6889                "Only 2 custom RegLocs expected for 64-bit codegen.");
6890         HandleCustomVecRegLoc();
6891         HandleCustomVecRegLoc();
6892       }
6893 
6894       continue;
6895     }
6896 
6897     if (VA.isRegLoc()) {
6898       if (VA.getValVT().isScalarInteger())
6899         FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6900       else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector())
6901         FuncInfo->appendParameterType(VA.getValVT().SimpleTy == MVT::f32
6902                                           ? PPCFunctionInfo::ShortFloatPoint
6903                                           : PPCFunctionInfo::LongFloatPoint);
6904     }
6905 
6906     if (Flags.isByVal() && VA.isMemLoc()) {
6907       const unsigned Size =
6908           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
6909                   PtrByteSize);
6910       const int FI = MF.getFrameInfo().CreateFixedObject(
6911           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
6912           /* IsAliased */ true);
6913       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6914       InVals.push_back(FIN);
6915 
6916       continue;
6917     }
6918 
6919     if (Flags.isByVal()) {
6920       assert(VA.isRegLoc() && "MemLocs should already be handled.");
6921 
6922       const MCPhysReg ArgReg = VA.getLocReg();
6923       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
6924 
6925       if (Flags.getNonZeroByValAlign() > PtrByteSize)
6926         report_fatal_error("Over aligned byvals not supported yet.");
6927 
6928       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
6929       const int FI = MF.getFrameInfo().CreateFixedObject(
6930           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
6931           /* IsAliased */ true);
6932       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6933       InVals.push_back(FIN);
6934 
6935       // Add live ins for all the RegLocs for the same ByVal.
6936       const TargetRegisterClass *RegClass =
6937           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6938 
6939       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
6940                                                unsigned Offset) {
6941         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
6942         // Since the callers side has left justified the aggregate in the
6943         // register, we can simply store the entire register into the stack
6944         // slot.
6945         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
6946         // The store to the fixedstack object is needed becuase accessing a
6947         // field of the ByVal will use a gep and load. Ideally we will optimize
6948         // to extracting the value from the register directly, and elide the
6949         // stores when the arguments address is not taken, but that will need to
6950         // be future work.
6951         SDValue Store = DAG.getStore(
6952             CopyFrom.getValue(1), dl, CopyFrom,
6953             DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
6954             MachinePointerInfo::getFixedStack(MF, FI, Offset));
6955 
6956         MemOps.push_back(Store);
6957       };
6958 
6959       unsigned Offset = 0;
6960       HandleRegLoc(VA.getLocReg(), Offset);
6961       Offset += PtrByteSize;
6962       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
6963            Offset += PtrByteSize) {
6964         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
6965                "RegLocs should be for ByVal argument.");
6966 
6967         const CCValAssign RL = ArgLocs[I++];
6968         HandleRegLoc(RL.getLocReg(), Offset);
6969         FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6970       }
6971 
6972       if (Offset != StackSize) {
6973         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
6974                "Expected MemLoc for remaining bytes.");
6975         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
6976         // Consume the MemLoc.The InVal has already been emitted, so nothing
6977         // more needs to be done.
6978         ++I;
6979       }
6980 
6981       continue;
6982     }
6983 
6984     if (VA.isRegLoc() && !VA.needsCustom()) {
6985       MVT::SimpleValueType SVT = ValVT.SimpleTy;
6986       unsigned VReg =
6987           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
6988       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
6989       if (ValVT.isScalarInteger() &&
6990           (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) {
6991         ArgValue =
6992             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
6993       }
6994       InVals.push_back(ArgValue);
6995       continue;
6996     }
6997     if (VA.isMemLoc()) {
6998       HandleMemLoc();
6999       continue;
7000     }
7001   }
7002 
7003   // On AIX a minimum of 8 words is saved to the parameter save area.
7004   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7005   // Area that is at least reserved in the caller of this function.
7006   unsigned CallerReservedArea =
7007       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7008 
7009   // Set the size that is at least reserved in caller of this function. Tail
7010   // call optimized function's reserved stack space needs to be aligned so
7011   // that taking the difference between two stack areas will result in an
7012   // aligned stack.
7013   CallerReservedArea =
7014       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7015   FuncInfo->setMinReservedArea(CallerReservedArea);
7016 
7017   if (isVarArg) {
7018     FuncInfo->setVarArgsFrameIndex(
7019         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7020     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7021 
7022     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7023                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7024 
7025     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7026                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7027     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7028 
7029     // The fixed integer arguments of a variadic function are stored to the
7030     // VarArgsFrameIndex on the stack so that they may be loaded by
7031     // dereferencing the result of va_next.
7032     for (unsigned GPRIndex =
7033              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7034          GPRIndex < NumGPArgRegs; ++GPRIndex) {
7035 
7036       const unsigned VReg =
7037           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7038                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7039 
7040       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7041       SDValue Store =
7042           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7043       MemOps.push_back(Store);
7044       // Increment the address for the next argument to store.
7045       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7046       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7047     }
7048   }
7049 
7050   if (!MemOps.empty())
7051     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7052 
7053   return Chain;
7054 }
7055 
7056 SDValue PPCTargetLowering::LowerCall_AIX(
7057     SDValue Chain, SDValue Callee, CallFlags CFlags,
7058     const SmallVectorImpl<ISD::OutputArg> &Outs,
7059     const SmallVectorImpl<SDValue> &OutVals,
7060     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7061     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7062     const CallBase *CB) const {
7063   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7064   // AIX ABI stack frame layout.
7065 
7066   assert((CFlags.CallConv == CallingConv::C ||
7067           CFlags.CallConv == CallingConv::Cold ||
7068           CFlags.CallConv == CallingConv::Fast) &&
7069          "Unexpected calling convention!");
7070 
7071   if (CFlags.IsPatchPoint)
7072     report_fatal_error("This call type is unimplemented on AIX.");
7073 
7074   const PPCSubtarget& Subtarget =
7075       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7076 
7077   MachineFunction &MF = DAG.getMachineFunction();
7078   SmallVector<CCValAssign, 16> ArgLocs;
7079   AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7080                     *DAG.getContext());
7081 
7082   // Reserve space for the linkage save area (LSA) on the stack.
7083   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7084   //   [SP][CR][LR][2 x reserved][TOC].
7085   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7086   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7087   const bool IsPPC64 = Subtarget.isPPC64();
7088   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7089   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7090   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7091   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7092 
7093   // The prolog code of the callee may store up to 8 GPR argument registers to
7094   // the stack, allowing va_start to index over them in memory if the callee
7095   // is variadic.
7096   // Because we cannot tell if this is needed on the caller side, we have to
7097   // conservatively assume that it is needed.  As such, make sure we have at
7098   // least enough stack space for the caller to store the 8 GPRs.
7099   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7100   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7101                                      CCInfo.getNextStackOffset());
7102 
7103   // Adjust the stack pointer for the new arguments...
7104   // These operations are automatically eliminated by the prolog/epilog pass.
7105   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7106   SDValue CallSeqStart = Chain;
7107 
7108   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7109   SmallVector<SDValue, 8> MemOpChains;
7110 
7111   // Set up a copy of the stack pointer for loading and storing any
7112   // arguments that may not fit in the registers available for argument
7113   // passing.
7114   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7115                                    : DAG.getRegister(PPC::R1, MVT::i32);
7116 
7117   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7118     const unsigned ValNo = ArgLocs[I].getValNo();
7119     SDValue Arg = OutVals[ValNo];
7120     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7121 
7122     if (Flags.isByVal()) {
7123       const unsigned ByValSize = Flags.getByValSize();
7124 
7125       // Nothing to do for zero-sized ByVals on the caller side.
7126       if (!ByValSize) {
7127         ++I;
7128         continue;
7129       }
7130 
7131       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7132         return DAG.getExtLoad(
7133             ISD::ZEXTLOAD, dl, PtrVT, Chain,
7134             (LoadOffset != 0)
7135                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7136                 : Arg,
7137             MachinePointerInfo(), VT);
7138       };
7139 
7140       unsigned LoadOffset = 0;
7141 
7142       // Initialize registers, which are fully occupied by the by-val argument.
7143       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7144         SDValue Load = GetLoad(PtrVT, LoadOffset);
7145         MemOpChains.push_back(Load.getValue(1));
7146         LoadOffset += PtrByteSize;
7147         const CCValAssign &ByValVA = ArgLocs[I++];
7148         assert(ByValVA.getValNo() == ValNo &&
7149                "Unexpected location for pass-by-value argument.");
7150         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7151       }
7152 
7153       if (LoadOffset == ByValSize)
7154         continue;
7155 
7156       // There must be one more loc to handle the remainder.
7157       assert(ArgLocs[I].getValNo() == ValNo &&
7158              "Expected additional location for by-value argument.");
7159 
7160       if (ArgLocs[I].isMemLoc()) {
7161         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7162         const CCValAssign &ByValVA = ArgLocs[I++];
7163         ISD::ArgFlagsTy MemcpyFlags = Flags;
7164         // Only memcpy the bytes that don't pass in register.
7165         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7166         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7167             (LoadOffset != 0)
7168                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7169                 : Arg,
7170             DAG.getObjectPtrOffset(dl, StackPtr,
7171                                    TypeSize::Fixed(ByValVA.getLocMemOffset())),
7172             CallSeqStart, MemcpyFlags, DAG, dl);
7173         continue;
7174       }
7175 
7176       // Initialize the final register residue.
7177       // Any residue that occupies the final by-val arg register must be
7178       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7179       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7180       // 2 and 1 byte loads.
7181       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7182       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7183              "Unexpected register residue for by-value argument.");
7184       SDValue ResidueVal;
7185       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7186         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7187         const MVT VT =
7188             N == 1 ? MVT::i8
7189                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7190         SDValue Load = GetLoad(VT, LoadOffset);
7191         MemOpChains.push_back(Load.getValue(1));
7192         LoadOffset += N;
7193         Bytes += N;
7194 
7195         // By-val arguments are passed left-justfied in register.
7196         // Every load here needs to be shifted, otherwise a full register load
7197         // should have been used.
7198         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7199                "Unexpected load emitted during handling of pass-by-value "
7200                "argument.");
7201         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7202         EVT ShiftAmountTy =
7203             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7204         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7205         SDValue ShiftedLoad =
7206             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7207         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7208                                               ShiftedLoad)
7209                                 : ShiftedLoad;
7210       }
7211 
7212       const CCValAssign &ByValVA = ArgLocs[I++];
7213       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7214       continue;
7215     }
7216 
7217     CCValAssign &VA = ArgLocs[I++];
7218     const MVT LocVT = VA.getLocVT();
7219     const MVT ValVT = VA.getValVT();
7220 
7221     switch (VA.getLocInfo()) {
7222     default:
7223       report_fatal_error("Unexpected argument extension type.");
7224     case CCValAssign::Full:
7225       break;
7226     case CCValAssign::ZExt:
7227       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7228       break;
7229     case CCValAssign::SExt:
7230       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7231       break;
7232     }
7233 
7234     if (VA.isRegLoc() && !VA.needsCustom()) {
7235       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7236       continue;
7237     }
7238 
7239     // Vector arguments passed to VarArg functions need custom handling when
7240     // they are passed (at least partially) in GPRs.
7241     if (VA.isMemLoc() && VA.needsCustom() && ValVT.isVector()) {
7242       assert(CFlags.IsVarArg && "Custom MemLocs only used for Vector args.");
7243       // Store value to its stack slot.
7244       SDValue PtrOff =
7245           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7246       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7247       SDValue Store =
7248           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
7249       MemOpChains.push_back(Store);
7250       const unsigned OriginalValNo = VA.getValNo();
7251       // Then load the GPRs from the stack
7252       unsigned LoadOffset = 0;
7253       auto HandleCustomVecRegLoc = [&]() {
7254         assert(I != E && "Unexpected end of CCvalAssigns.");
7255         assert(ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
7256                "Expected custom RegLoc.");
7257         CCValAssign RegVA = ArgLocs[I++];
7258         assert(RegVA.getValNo() == OriginalValNo &&
7259                "Custom MemLoc ValNo and custom RegLoc ValNo must match.");
7260         SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
7261                                   DAG.getConstant(LoadOffset, dl, PtrVT));
7262         SDValue Load = DAG.getLoad(PtrVT, dl, Store, Add, MachinePointerInfo());
7263         MemOpChains.push_back(Load.getValue(1));
7264         RegsToPass.push_back(std::make_pair(RegVA.getLocReg(), Load));
7265         LoadOffset += PtrByteSize;
7266       };
7267 
7268       // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
7269       // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
7270       // R10.
7271       HandleCustomVecRegLoc();
7272       HandleCustomVecRegLoc();
7273 
7274       if (I != E && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
7275           ArgLocs[I].getValNo() == OriginalValNo) {
7276         assert(!IsPPC64 &&
7277                "Only 2 custom RegLocs expected for 64-bit codegen.");
7278         HandleCustomVecRegLoc();
7279         HandleCustomVecRegLoc();
7280       }
7281 
7282       continue;
7283     }
7284 
7285     if (VA.isMemLoc()) {
7286       SDValue PtrOff =
7287           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7288       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7289       MemOpChains.push_back(
7290           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7291 
7292       continue;
7293     }
7294 
7295     if (!ValVT.isFloatingPoint())
7296       report_fatal_error(
7297           "Unexpected register handling for calling convention.");
7298 
7299     // Custom handling is used for GPR initializations for vararg float
7300     // arguments.
7301     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7302            LocVT.isInteger() &&
7303            "Custom register handling only expected for VarArg.");
7304 
7305     SDValue ArgAsInt =
7306         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7307 
7308     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7309       // f32 in 32-bit GPR
7310       // f64 in 64-bit GPR
7311       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7312     else if (Arg.getValueType().getFixedSizeInBits() <
7313              LocVT.getFixedSizeInBits())
7314       // f32 in 64-bit GPR.
7315       RegsToPass.push_back(std::make_pair(
7316           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7317     else {
7318       // f64 in two 32-bit GPRs
7319       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7320       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7321              "Unexpected custom register for argument!");
7322       CCValAssign &GPR1 = VA;
7323       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7324                                      DAG.getConstant(32, dl, MVT::i8));
7325       RegsToPass.push_back(std::make_pair(
7326           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7327 
7328       if (I != E) {
7329         // If only 1 GPR was available, there will only be one custom GPR and
7330         // the argument will also pass in memory.
7331         CCValAssign &PeekArg = ArgLocs[I];
7332         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7333           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7334           CCValAssign &GPR2 = ArgLocs[I++];
7335           RegsToPass.push_back(std::make_pair(
7336               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7337         }
7338       }
7339     }
7340   }
7341 
7342   if (!MemOpChains.empty())
7343     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7344 
7345   // For indirect calls, we need to save the TOC base to the stack for
7346   // restoration after the call.
7347   if (CFlags.IsIndirect) {
7348     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7349     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7350     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7351     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7352     const unsigned TOCSaveOffset =
7353         Subtarget.getFrameLowering()->getTOCSaveOffset();
7354 
7355     setUsesTOCBasePtr(DAG);
7356     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7357     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7358     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7359     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7360     Chain = DAG.getStore(
7361         Val.getValue(1), dl, Val, AddPtr,
7362         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7363   }
7364 
7365   // Build a sequence of copy-to-reg nodes chained together with token chain
7366   // and flag operands which copy the outgoing args into the appropriate regs.
7367   SDValue InFlag;
7368   for (auto Reg : RegsToPass) {
7369     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7370     InFlag = Chain.getValue(1);
7371   }
7372 
7373   const int SPDiff = 0;
7374   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7375                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7376 }
7377 
7378 bool
7379 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7380                                   MachineFunction &MF, bool isVarArg,
7381                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7382                                   LLVMContext &Context) const {
7383   SmallVector<CCValAssign, 16> RVLocs;
7384   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7385   return CCInfo.CheckReturn(
7386       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7387                 ? RetCC_PPC_Cold
7388                 : RetCC_PPC);
7389 }
7390 
7391 SDValue
7392 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7393                                bool isVarArg,
7394                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7395                                const SmallVectorImpl<SDValue> &OutVals,
7396                                const SDLoc &dl, SelectionDAG &DAG) const {
7397   SmallVector<CCValAssign, 16> RVLocs;
7398   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7399                  *DAG.getContext());
7400   CCInfo.AnalyzeReturn(Outs,
7401                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7402                            ? RetCC_PPC_Cold
7403                            : RetCC_PPC);
7404 
7405   SDValue Flag;
7406   SmallVector<SDValue, 4> RetOps(1, Chain);
7407 
7408   // Copy the result values into the output registers.
7409   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7410     CCValAssign &VA = RVLocs[i];
7411     assert(VA.isRegLoc() && "Can only return in registers!");
7412 
7413     SDValue Arg = OutVals[RealResIdx];
7414 
7415     switch (VA.getLocInfo()) {
7416     default: llvm_unreachable("Unknown loc info!");
7417     case CCValAssign::Full: break;
7418     case CCValAssign::AExt:
7419       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7420       break;
7421     case CCValAssign::ZExt:
7422       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7423       break;
7424     case CCValAssign::SExt:
7425       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7426       break;
7427     }
7428     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7429       bool isLittleEndian = Subtarget.isLittleEndian();
7430       // Legalize ret f64 -> ret 2 x i32.
7431       SDValue SVal =
7432           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7433                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7434       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7435       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7436       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7437                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7438       Flag = Chain.getValue(1);
7439       VA = RVLocs[++i]; // skip ahead to next loc
7440       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7441     } else
7442       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7443     Flag = Chain.getValue(1);
7444     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7445   }
7446 
7447   RetOps[0] = Chain;  // Update chain.
7448 
7449   // Add the flag if we have it.
7450   if (Flag.getNode())
7451     RetOps.push_back(Flag);
7452 
7453   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7454 }
7455 
7456 SDValue
7457 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7458                                                 SelectionDAG &DAG) const {
7459   SDLoc dl(Op);
7460 
7461   // Get the correct type for integers.
7462   EVT IntVT = Op.getValueType();
7463 
7464   // Get the inputs.
7465   SDValue Chain = Op.getOperand(0);
7466   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7467   // Build a DYNAREAOFFSET node.
7468   SDValue Ops[2] = {Chain, FPSIdx};
7469   SDVTList VTs = DAG.getVTList(IntVT);
7470   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7471 }
7472 
7473 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7474                                              SelectionDAG &DAG) const {
7475   // When we pop the dynamic allocation we need to restore the SP link.
7476   SDLoc dl(Op);
7477 
7478   // Get the correct type for pointers.
7479   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7480 
7481   // Construct the stack pointer operand.
7482   bool isPPC64 = Subtarget.isPPC64();
7483   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7484   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7485 
7486   // Get the operands for the STACKRESTORE.
7487   SDValue Chain = Op.getOperand(0);
7488   SDValue SaveSP = Op.getOperand(1);
7489 
7490   // Load the old link SP.
7491   SDValue LoadLinkSP =
7492       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7493 
7494   // Restore the stack pointer.
7495   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7496 
7497   // Store the old link SP.
7498   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7499 }
7500 
7501 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7502   MachineFunction &MF = DAG.getMachineFunction();
7503   bool isPPC64 = Subtarget.isPPC64();
7504   EVT PtrVT = getPointerTy(MF.getDataLayout());
7505 
7506   // Get current frame pointer save index.  The users of this index will be
7507   // primarily DYNALLOC instructions.
7508   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7509   int RASI = FI->getReturnAddrSaveIndex();
7510 
7511   // If the frame pointer save index hasn't been defined yet.
7512   if (!RASI) {
7513     // Find out what the fix offset of the frame pointer save area.
7514     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7515     // Allocate the frame index for frame pointer save area.
7516     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7517     // Save the result.
7518     FI->setReturnAddrSaveIndex(RASI);
7519   }
7520   return DAG.getFrameIndex(RASI, PtrVT);
7521 }
7522 
7523 SDValue
7524 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7525   MachineFunction &MF = DAG.getMachineFunction();
7526   bool isPPC64 = Subtarget.isPPC64();
7527   EVT PtrVT = getPointerTy(MF.getDataLayout());
7528 
7529   // Get current frame pointer save index.  The users of this index will be
7530   // primarily DYNALLOC instructions.
7531   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7532   int FPSI = FI->getFramePointerSaveIndex();
7533 
7534   // If the frame pointer save index hasn't been defined yet.
7535   if (!FPSI) {
7536     // Find out what the fix offset of the frame pointer save area.
7537     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7538     // Allocate the frame index for frame pointer save area.
7539     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7540     // Save the result.
7541     FI->setFramePointerSaveIndex(FPSI);
7542   }
7543   return DAG.getFrameIndex(FPSI, PtrVT);
7544 }
7545 
7546 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7547                                                    SelectionDAG &DAG) const {
7548   MachineFunction &MF = DAG.getMachineFunction();
7549   // Get the inputs.
7550   SDValue Chain = Op.getOperand(0);
7551   SDValue Size  = Op.getOperand(1);
7552   SDLoc dl(Op);
7553 
7554   // Get the correct type for pointers.
7555   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7556   // Negate the size.
7557   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7558                                 DAG.getConstant(0, dl, PtrVT), Size);
7559   // Construct a node for the frame pointer save index.
7560   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7561   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7562   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7563   if (hasInlineStackProbe(MF))
7564     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7565   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7566 }
7567 
7568 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7569                                                      SelectionDAG &DAG) const {
7570   MachineFunction &MF = DAG.getMachineFunction();
7571 
7572   bool isPPC64 = Subtarget.isPPC64();
7573   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7574 
7575   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7576   return DAG.getFrameIndex(FI, PtrVT);
7577 }
7578 
7579 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7580                                                SelectionDAG &DAG) const {
7581   SDLoc DL(Op);
7582   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7583                      DAG.getVTList(MVT::i32, MVT::Other),
7584                      Op.getOperand(0), Op.getOperand(1));
7585 }
7586 
7587 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7588                                                 SelectionDAG &DAG) const {
7589   SDLoc DL(Op);
7590   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7591                      Op.getOperand(0), Op.getOperand(1));
7592 }
7593 
7594 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7595   if (Op.getValueType().isVector())
7596     return LowerVectorLoad(Op, DAG);
7597 
7598   assert(Op.getValueType() == MVT::i1 &&
7599          "Custom lowering only for i1 loads");
7600 
7601   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7602 
7603   SDLoc dl(Op);
7604   LoadSDNode *LD = cast<LoadSDNode>(Op);
7605 
7606   SDValue Chain = LD->getChain();
7607   SDValue BasePtr = LD->getBasePtr();
7608   MachineMemOperand *MMO = LD->getMemOperand();
7609 
7610   SDValue NewLD =
7611       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7612                      BasePtr, MVT::i8, MMO);
7613   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7614 
7615   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7616   return DAG.getMergeValues(Ops, dl);
7617 }
7618 
7619 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7620   if (Op.getOperand(1).getValueType().isVector())
7621     return LowerVectorStore(Op, DAG);
7622 
7623   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7624          "Custom lowering only for i1 stores");
7625 
7626   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7627 
7628   SDLoc dl(Op);
7629   StoreSDNode *ST = cast<StoreSDNode>(Op);
7630 
7631   SDValue Chain = ST->getChain();
7632   SDValue BasePtr = ST->getBasePtr();
7633   SDValue Value = ST->getValue();
7634   MachineMemOperand *MMO = ST->getMemOperand();
7635 
7636   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7637                       Value);
7638   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7639 }
7640 
7641 // FIXME: Remove this once the ANDI glue bug is fixed:
7642 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7643   assert(Op.getValueType() == MVT::i1 &&
7644          "Custom lowering only for i1 results");
7645 
7646   SDLoc DL(Op);
7647   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7648 }
7649 
7650 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7651                                                SelectionDAG &DAG) const {
7652 
7653   // Implements a vector truncate that fits in a vector register as a shuffle.
7654   // We want to legalize vector truncates down to where the source fits in
7655   // a vector register (and target is therefore smaller than vector register
7656   // size).  At that point legalization will try to custom lower the sub-legal
7657   // result and get here - where we can contain the truncate as a single target
7658   // operation.
7659 
7660   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7661   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7662   //
7663   // We will implement it for big-endian ordering as this (where x denotes
7664   // undefined):
7665   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7666   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7667   //
7668   // The same operation in little-endian ordering will be:
7669   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7670   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7671 
7672   EVT TrgVT = Op.getValueType();
7673   assert(TrgVT.isVector() && "Vector type expected.");
7674   unsigned TrgNumElts = TrgVT.getVectorNumElements();
7675   EVT EltVT = TrgVT.getVectorElementType();
7676   if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
7677       TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
7678       !isPowerOf2_32(EltVT.getSizeInBits()))
7679     return SDValue();
7680 
7681   SDValue N1 = Op.getOperand(0);
7682   EVT SrcVT = N1.getValueType();
7683   unsigned SrcSize = SrcVT.getSizeInBits();
7684   if (SrcSize > 256 ||
7685       !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
7686       !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
7687     return SDValue();
7688   if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
7689     return SDValue();
7690 
7691   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7692   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7693 
7694   SDLoc DL(Op);
7695   SDValue Op1, Op2;
7696   if (SrcSize == 256) {
7697     EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
7698     EVT SplitVT =
7699         N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
7700     unsigned SplitNumElts = SplitVT.getVectorNumElements();
7701     Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7702                       DAG.getConstant(0, DL, VecIdxTy));
7703     Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7704                       DAG.getConstant(SplitNumElts, DL, VecIdxTy));
7705   }
7706   else {
7707     Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7708     Op2 = DAG.getUNDEF(WideVT);
7709   }
7710 
7711   // First list the elements we want to keep.
7712   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7713   SmallVector<int, 16> ShuffV;
7714   if (Subtarget.isLittleEndian())
7715     for (unsigned i = 0; i < TrgNumElts; ++i)
7716       ShuffV.push_back(i * SizeMult);
7717   else
7718     for (unsigned i = 1; i <= TrgNumElts; ++i)
7719       ShuffV.push_back(i * SizeMult - 1);
7720 
7721   // Populate the remaining elements with undefs.
7722   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7723     // ShuffV.push_back(i + WideNumElts);
7724     ShuffV.push_back(WideNumElts + 1);
7725 
7726   Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
7727   Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
7728   return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
7729 }
7730 
7731 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7732 /// possible.
7733 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7734   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7735   EVT ResVT = Op.getValueType();
7736   EVT CmpVT = Op.getOperand(0).getValueType();
7737   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7738   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
7739   SDLoc dl(Op);
7740 
7741   // Without power9-vector, we don't have native instruction for f128 comparison.
7742   // Following transformation to libcall is needed for setcc:
7743   // select_cc lhs, rhs, tv, fv, cc -> select_cc (setcc cc, x, y), 0, tv, fv, NE
7744   if (!Subtarget.hasP9Vector() && CmpVT == MVT::f128) {
7745     SDValue Z = DAG.getSetCC(
7746         dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT),
7747         LHS, RHS, CC);
7748     SDValue Zero = DAG.getConstant(0, dl, Z.getValueType());
7749     return DAG.getSelectCC(dl, Z, Zero, TV, FV, ISD::SETNE);
7750   }
7751 
7752   // Not FP, or using SPE? Not a fsel.
7753   if (!CmpVT.isFloatingPoint() || !TV.getValueType().isFloatingPoint() ||
7754       Subtarget.hasSPE())
7755     return Op;
7756 
7757   SDNodeFlags Flags = Op.getNode()->getFlags();
7758 
7759   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7760   // presence of infinities.
7761   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7762     switch (CC) {
7763     default:
7764       break;
7765     case ISD::SETOGT:
7766     case ISD::SETGT:
7767       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7768     case ISD::SETOLT:
7769     case ISD::SETLT:
7770       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7771     }
7772   }
7773 
7774   // We might be able to do better than this under some circumstances, but in
7775   // general, fsel-based lowering of select is a finite-math-only optimization.
7776   // For more information, see section F.3 of the 2.06 ISA specification.
7777   // With ISA 3.0
7778   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
7779       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
7780     return Op;
7781 
7782   // If the RHS of the comparison is a 0.0, we don't need to do the
7783   // subtraction at all.
7784   SDValue Sel1;
7785   if (isFloatingPointZero(RHS))
7786     switch (CC) {
7787     default: break;       // SETUO etc aren't handled by fsel.
7788     case ISD::SETNE:
7789       std::swap(TV, FV);
7790       LLVM_FALLTHROUGH;
7791     case ISD::SETEQ:
7792       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7793         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7794       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7795       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7796         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7797       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7798                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7799     case ISD::SETULT:
7800     case ISD::SETLT:
7801       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7802       LLVM_FALLTHROUGH;
7803     case ISD::SETOGE:
7804     case ISD::SETGE:
7805       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7806         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7807       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7808     case ISD::SETUGT:
7809     case ISD::SETGT:
7810       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7811       LLVM_FALLTHROUGH;
7812     case ISD::SETOLE:
7813     case ISD::SETLE:
7814       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7815         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7816       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7817                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7818     }
7819 
7820   SDValue Cmp;
7821   switch (CC) {
7822   default: break;       // SETUO etc aren't handled by fsel.
7823   case ISD::SETNE:
7824     std::swap(TV, FV);
7825     LLVM_FALLTHROUGH;
7826   case ISD::SETEQ:
7827     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7828     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7829       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7830     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7831     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7832       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7833     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7834                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7835   case ISD::SETULT:
7836   case ISD::SETLT:
7837     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7838     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7839       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7840     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7841   case ISD::SETOGE:
7842   case ISD::SETGE:
7843     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7844     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7845       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7846     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7847   case ISD::SETUGT:
7848   case ISD::SETGT:
7849     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7850     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7851       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7852     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7853   case ISD::SETOLE:
7854   case ISD::SETLE:
7855     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7856     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7857       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7858     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7859   }
7860   return Op;
7861 }
7862 
7863 static unsigned getPPCStrictOpcode(unsigned Opc) {
7864   switch (Opc) {
7865   default:
7866     llvm_unreachable("No strict version of this opcode!");
7867   case PPCISD::FCTIDZ:
7868     return PPCISD::STRICT_FCTIDZ;
7869   case PPCISD::FCTIWZ:
7870     return PPCISD::STRICT_FCTIWZ;
7871   case PPCISD::FCTIDUZ:
7872     return PPCISD::STRICT_FCTIDUZ;
7873   case PPCISD::FCTIWUZ:
7874     return PPCISD::STRICT_FCTIWUZ;
7875   case PPCISD::FCFID:
7876     return PPCISD::STRICT_FCFID;
7877   case PPCISD::FCFIDU:
7878     return PPCISD::STRICT_FCFIDU;
7879   case PPCISD::FCFIDS:
7880     return PPCISD::STRICT_FCFIDS;
7881   case PPCISD::FCFIDUS:
7882     return PPCISD::STRICT_FCFIDUS;
7883   }
7884 }
7885 
7886 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
7887                               const PPCSubtarget &Subtarget) {
7888   SDLoc dl(Op);
7889   bool IsStrict = Op->isStrictFPOpcode();
7890   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7891                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7892 
7893   // TODO: Any other flags to propagate?
7894   SDNodeFlags Flags;
7895   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7896 
7897   // For strict nodes, source is the second operand.
7898   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7899   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
7900   assert(Src.getValueType().isFloatingPoint());
7901   if (Src.getValueType() == MVT::f32) {
7902     if (IsStrict) {
7903       Src =
7904           DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
7905                       DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
7906       Chain = Src.getValue(1);
7907     } else
7908       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7909   }
7910   SDValue Conv;
7911   unsigned Opc = ISD::DELETED_NODE;
7912   switch (Op.getSimpleValueType().SimpleTy) {
7913   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7914   case MVT::i32:
7915     Opc = IsSigned ? PPCISD::FCTIWZ
7916                    : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
7917     break;
7918   case MVT::i64:
7919     assert((IsSigned || Subtarget.hasFPCVT()) &&
7920            "i64 FP_TO_UINT is supported only with FPCVT");
7921     Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
7922   }
7923   if (IsStrict) {
7924     Opc = getPPCStrictOpcode(Opc);
7925     Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other),
7926                        {Chain, Src}, Flags);
7927   } else {
7928     Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
7929   }
7930   return Conv;
7931 }
7932 
7933 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7934                                                SelectionDAG &DAG,
7935                                                const SDLoc &dl) const {
7936   SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
7937   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7938                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7939   bool IsStrict = Op->isStrictFPOpcode();
7940 
7941   // Convert the FP value to an int value through memory.
7942   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7943                   (IsSigned || Subtarget.hasFPCVT());
7944   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7945   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7946   MachinePointerInfo MPI =
7947       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7948 
7949   // Emit a store to the stack slot.
7950   SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
7951   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
7952   if (i32Stack) {
7953     MachineFunction &MF = DAG.getMachineFunction();
7954     Alignment = Align(4);
7955     MachineMemOperand *MMO =
7956         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
7957     SDValue Ops[] = { Chain, Tmp, FIPtr };
7958     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7959               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7960   } else
7961     Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
7962 
7963   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
7964   // add in a bias on big endian.
7965   if (Op.getValueType() == MVT::i32 && !i32Stack) {
7966     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7967                         DAG.getConstant(4, dl, FIPtr.getValueType()));
7968     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7969   }
7970 
7971   RLI.Chain = Chain;
7972   RLI.Ptr = FIPtr;
7973   RLI.MPI = MPI;
7974   RLI.Alignment = Alignment;
7975 }
7976 
7977 /// Custom lowers floating point to integer conversions to use
7978 /// the direct move instructions available in ISA 2.07 to avoid the
7979 /// need for load/store combinations.
7980 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7981                                                     SelectionDAG &DAG,
7982                                                     const SDLoc &dl) const {
7983   SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
7984   SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
7985   if (Op->isStrictFPOpcode())
7986     return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
7987   else
7988     return Mov;
7989 }
7990 
7991 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7992                                           const SDLoc &dl) const {
7993   bool IsStrict = Op->isStrictFPOpcode();
7994   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7995                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7996   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7997   EVT SrcVT = Src.getValueType();
7998   EVT DstVT = Op.getValueType();
7999 
8000   // FP to INT conversions are legal for f128.
8001   if (SrcVT == MVT::f128)
8002     return Subtarget.hasP9Vector() ? Op : SDValue();
8003 
8004   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8005   // PPC (the libcall is not available).
8006   if (SrcVT == MVT::ppcf128) {
8007     if (DstVT == MVT::i32) {
8008       // TODO: Conservatively pass only nofpexcept flag here. Need to check and
8009       // set other fast-math flags to FP operations in both strict and
8010       // non-strict cases. (FP_TO_SINT, FSUB)
8011       SDNodeFlags Flags;
8012       Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8013 
8014       if (IsSigned) {
8015         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8016                                  DAG.getIntPtrConstant(0, dl));
8017         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8018                                  DAG.getIntPtrConstant(1, dl));
8019 
8020         // Add the two halves of the long double in round-to-zero mode, and use
8021         // a smaller FP_TO_SINT.
8022         if (IsStrict) {
8023           SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl,
8024                                     DAG.getVTList(MVT::f64, MVT::Other),
8025                                     {Op.getOperand(0), Lo, Hi}, Flags);
8026           return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8027                              DAG.getVTList(MVT::i32, MVT::Other),
8028                              {Res.getValue(1), Res}, Flags);
8029         } else {
8030           SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8031           return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8032         }
8033       } else {
8034         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8035         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8036         SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
8037         SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
8038         if (IsStrict) {
8039           // Sel = Src < 0x80000000
8040           // FltOfs = select Sel, 0.0, 0x80000000
8041           // IntOfs = select Sel, 0, 0x80000000
8042           // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
8043           SDValue Chain = Op.getOperand(0);
8044           EVT SetCCVT =
8045               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
8046           EVT DstSetCCVT =
8047               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
8048           SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
8049                                      Chain, true);
8050           Chain = Sel.getValue(1);
8051 
8052           SDValue FltOfs = DAG.getSelect(
8053               dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
8054           Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
8055 
8056           SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
8057                                     DAG.getVTList(SrcVT, MVT::Other),
8058                                     {Chain, Src, FltOfs}, Flags);
8059           Chain = Val.getValue(1);
8060           SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8061                                      DAG.getVTList(DstVT, MVT::Other),
8062                                      {Chain, Val}, Flags);
8063           Chain = SInt.getValue(1);
8064           SDValue IntOfs = DAG.getSelect(
8065               dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
8066           SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
8067           return DAG.getMergeValues({Result, Chain}, dl);
8068         } else {
8069           // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8070           // FIXME: generated code sucks.
8071           SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
8072           True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8073           True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
8074           SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
8075           return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
8076         }
8077       }
8078     }
8079 
8080     return SDValue();
8081   }
8082 
8083   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8084     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8085 
8086   ReuseLoadInfo RLI;
8087   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8088 
8089   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8090                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8091 }
8092 
8093 // We're trying to insert a regular store, S, and then a load, L. If the
8094 // incoming value, O, is a load, we might just be able to have our load use the
8095 // address used by O. However, we don't know if anything else will store to
8096 // that address before we can load from it. To prevent this situation, we need
8097 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8098 // the same chain operand as O, we create a token factor from the chain results
8099 // of O and L, and we replace all uses of O's chain result with that token
8100 // factor (see spliceIntoChain below for this last part).
8101 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8102                                             ReuseLoadInfo &RLI,
8103                                             SelectionDAG &DAG,
8104                                             ISD::LoadExtType ET) const {
8105   // Conservatively skip reusing for constrained FP nodes.
8106   if (Op->isStrictFPOpcode())
8107     return false;
8108 
8109   SDLoc dl(Op);
8110   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8111                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8112   if (ET == ISD::NON_EXTLOAD &&
8113       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8114       isOperationLegalOrCustom(Op.getOpcode(),
8115                                Op.getOperand(0).getValueType())) {
8116 
8117     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8118     return true;
8119   }
8120 
8121   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8122   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8123       LD->isNonTemporal())
8124     return false;
8125   if (LD->getMemoryVT() != MemVT)
8126     return false;
8127 
8128   // If the result of the load is an illegal type, then we can't build a
8129   // valid chain for reuse since the legalised loads and token factor node that
8130   // ties the legalised loads together uses a different output chain then the
8131   // illegal load.
8132   if (!isTypeLegal(LD->getValueType(0)))
8133     return false;
8134 
8135   RLI.Ptr = LD->getBasePtr();
8136   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8137     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8138            "Non-pre-inc AM on PPC?");
8139     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8140                           LD->getOffset());
8141   }
8142 
8143   RLI.Chain = LD->getChain();
8144   RLI.MPI = LD->getPointerInfo();
8145   RLI.IsDereferenceable = LD->isDereferenceable();
8146   RLI.IsInvariant = LD->isInvariant();
8147   RLI.Alignment = LD->getAlign();
8148   RLI.AAInfo = LD->getAAInfo();
8149   RLI.Ranges = LD->getRanges();
8150 
8151   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8152   return true;
8153 }
8154 
8155 // Given the head of the old chain, ResChain, insert a token factor containing
8156 // it and NewResChain, and make users of ResChain now be users of that token
8157 // factor.
8158 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8159 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8160                                         SDValue NewResChain,
8161                                         SelectionDAG &DAG) const {
8162   if (!ResChain)
8163     return;
8164 
8165   SDLoc dl(NewResChain);
8166 
8167   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8168                            NewResChain, DAG.getUNDEF(MVT::Other));
8169   assert(TF.getNode() != NewResChain.getNode() &&
8170          "A new TF really is required here");
8171 
8172   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8173   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8174 }
8175 
8176 /// Analyze profitability of direct move
8177 /// prefer float load to int load plus direct move
8178 /// when there is no integer use of int load
8179 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8180   SDNode *Origin = Op.getOperand(0).getNode();
8181   if (Origin->getOpcode() != ISD::LOAD)
8182     return true;
8183 
8184   // If there is no LXSIBZX/LXSIHZX, like Power8,
8185   // prefer direct move if the memory size is 1 or 2 bytes.
8186   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8187   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8188     return true;
8189 
8190   for (SDNode::use_iterator UI = Origin->use_begin(),
8191                             UE = Origin->use_end();
8192        UI != UE; ++UI) {
8193 
8194     // Only look at the users of the loaded value.
8195     if (UI.getUse().get().getResNo() != 0)
8196       continue;
8197 
8198     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8199         UI->getOpcode() != ISD::UINT_TO_FP &&
8200         UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
8201         UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
8202       return true;
8203   }
8204 
8205   return false;
8206 }
8207 
8208 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
8209                               const PPCSubtarget &Subtarget,
8210                               SDValue Chain = SDValue()) {
8211   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8212                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8213   SDLoc dl(Op);
8214 
8215   // TODO: Any other flags to propagate?
8216   SDNodeFlags Flags;
8217   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8218 
8219   // If we have FCFIDS, then use it when converting to single-precision.
8220   // Otherwise, convert to double-precision and then round.
8221   bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8222   unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
8223                               : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
8224   EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8225   if (Op->isStrictFPOpcode()) {
8226     if (!Chain)
8227       Chain = Op.getOperand(0);
8228     return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl,
8229                        DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
8230   } else
8231     return DAG.getNode(ConvOpc, dl, ConvTy, Src);
8232 }
8233 
8234 /// Custom lowers integer to floating point conversions to use
8235 /// the direct move instructions available in ISA 2.07 to avoid the
8236 /// need for load/store combinations.
8237 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8238                                                     SelectionDAG &DAG,
8239                                                     const SDLoc &dl) const {
8240   assert((Op.getValueType() == MVT::f32 ||
8241           Op.getValueType() == MVT::f64) &&
8242          "Invalid floating point type as target of conversion");
8243   assert(Subtarget.hasFPCVT() &&
8244          "Int to FP conversions with direct moves require FPCVT");
8245   SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
8246   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8247   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
8248                 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8249   unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
8250   SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
8251   return convertIntToFP(Op, Mov, DAG, Subtarget);
8252 }
8253 
8254 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8255 
8256   EVT VecVT = Vec.getValueType();
8257   assert(VecVT.isVector() && "Expected a vector type.");
8258   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8259 
8260   EVT EltVT = VecVT.getVectorElementType();
8261   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8262   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8263 
8264   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8265   SmallVector<SDValue, 16> Ops(NumConcat);
8266   Ops[0] = Vec;
8267   SDValue UndefVec = DAG.getUNDEF(VecVT);
8268   for (unsigned i = 1; i < NumConcat; ++i)
8269     Ops[i] = UndefVec;
8270 
8271   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8272 }
8273 
8274 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8275                                                 const SDLoc &dl) const {
8276   bool IsStrict = Op->isStrictFPOpcode();
8277   unsigned Opc = Op.getOpcode();
8278   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8279   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||
8280           Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
8281          "Unexpected conversion type");
8282   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8283          "Supports conversions to v2f64/v4f32 only.");
8284 
8285   // TODO: Any other flags to propagate?
8286   SDNodeFlags Flags;
8287   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8288 
8289   bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
8290   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8291 
8292   SDValue Wide = widenVec(DAG, Src, dl);
8293   EVT WideVT = Wide.getValueType();
8294   unsigned WideNumElts = WideVT.getVectorNumElements();
8295   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8296 
8297   SmallVector<int, 16> ShuffV;
8298   for (unsigned i = 0; i < WideNumElts; ++i)
8299     ShuffV.push_back(i + WideNumElts);
8300 
8301   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8302   int SaveElts = FourEltRes ? 4 : 2;
8303   if (Subtarget.isLittleEndian())
8304     for (int i = 0; i < SaveElts; i++)
8305       ShuffV[i * Stride] = i;
8306   else
8307     for (int i = 1; i <= SaveElts; i++)
8308       ShuffV[i * Stride - 1] = i - 1;
8309 
8310   SDValue ShuffleSrc2 =
8311       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8312   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8313 
8314   SDValue Extend;
8315   if (SignedConv) {
8316     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8317     EVT ExtVT = Src.getValueType();
8318     if (Subtarget.hasP9Altivec())
8319       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8320                                IntermediateVT.getVectorNumElements());
8321 
8322     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8323                          DAG.getValueType(ExtVT));
8324   } else
8325     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8326 
8327   if (IsStrict)
8328     return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
8329                        {Op.getOperand(0), Extend}, Flags);
8330 
8331   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8332 }
8333 
8334 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8335                                           SelectionDAG &DAG) const {
8336   SDLoc dl(Op);
8337   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8338                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8339   bool IsStrict = Op->isStrictFPOpcode();
8340   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8341   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
8342 
8343   // TODO: Any other flags to propagate?
8344   SDNodeFlags Flags;
8345   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8346 
8347   EVT InVT = Src.getValueType();
8348   EVT OutVT = Op.getValueType();
8349   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8350       isOperationCustom(Op.getOpcode(), InVT))
8351     return LowerINT_TO_FPVector(Op, DAG, dl);
8352 
8353   // Conversions to f128 are legal.
8354   if (Op.getValueType() == MVT::f128)
8355     return Subtarget.hasP9Vector() ? Op : SDValue();
8356 
8357   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8358   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8359     return SDValue();
8360 
8361   if (Src.getValueType() == MVT::i1) {
8362     SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
8363                               DAG.getConstantFP(1.0, dl, Op.getValueType()),
8364                               DAG.getConstantFP(0.0, dl, Op.getValueType()));
8365     if (IsStrict)
8366       return DAG.getMergeValues({Sel, Chain}, dl);
8367     else
8368       return Sel;
8369   }
8370 
8371   // If we have direct moves, we can do all the conversion, skip the store/load
8372   // however, without FPCVT we can't do most conversions.
8373   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8374       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8375     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8376 
8377   assert((IsSigned || Subtarget.hasFPCVT()) &&
8378          "UINT_TO_FP is supported only with FPCVT");
8379 
8380   if (Src.getValueType() == MVT::i64) {
8381     SDValue SINT = Src;
8382     // When converting to single-precision, we actually need to convert
8383     // to double-precision first and then round to single-precision.
8384     // To avoid double-rounding effects during that operation, we have
8385     // to prepare the input operand.  Bits that might be truncated when
8386     // converting to double-precision are replaced by a bit that won't
8387     // be lost at this stage, but is below the single-precision rounding
8388     // position.
8389     //
8390     // However, if -enable-unsafe-fp-math is in effect, accept double
8391     // rounding to avoid the extra overhead.
8392     if (Op.getValueType() == MVT::f32 &&
8393         !Subtarget.hasFPCVT() &&
8394         !DAG.getTarget().Options.UnsafeFPMath) {
8395 
8396       // Twiddle input to make sure the low 11 bits are zero.  (If this
8397       // is the case, we are guaranteed the value will fit into the 53 bit
8398       // mantissa of an IEEE double-precision value without rounding.)
8399       // If any of those low 11 bits were not zero originally, make sure
8400       // bit 12 (value 2048) is set instead, so that the final rounding
8401       // to single-precision gets the correct result.
8402       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8403                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8404       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8405                           Round, DAG.getConstant(2047, dl, MVT::i64));
8406       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8407       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8408                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8409 
8410       // However, we cannot use that value unconditionally: if the magnitude
8411       // of the input value is small, the bit-twiddling we did above might
8412       // end up visibly changing the output.  Fortunately, in that case, we
8413       // don't need to twiddle bits since the original input will convert
8414       // exactly to double-precision floating-point already.  Therefore,
8415       // construct a conditional to use the original value if the top 11
8416       // bits are all sign-bit copies, and use the rounded value computed
8417       // above otherwise.
8418       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8419                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8420       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8421                          Cond, DAG.getConstant(1, dl, MVT::i64));
8422       Cond = DAG.getSetCC(
8423           dl,
8424           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8425           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8426 
8427       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8428     }
8429 
8430     ReuseLoadInfo RLI;
8431     SDValue Bits;
8432 
8433     MachineFunction &MF = DAG.getMachineFunction();
8434     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8435       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8436                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8437       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8438     } else if (Subtarget.hasLFIWAX() &&
8439                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8440       MachineMemOperand *MMO =
8441         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8442                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8443       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8444       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8445                                      DAG.getVTList(MVT::f64, MVT::Other),
8446                                      Ops, MVT::i32, MMO);
8447       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8448     } else if (Subtarget.hasFPCVT() &&
8449                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8450       MachineMemOperand *MMO =
8451         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8452                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8453       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8454       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8455                                      DAG.getVTList(MVT::f64, MVT::Other),
8456                                      Ops, MVT::i32, MMO);
8457       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8458     } else if (((Subtarget.hasLFIWAX() &&
8459                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8460                 (Subtarget.hasFPCVT() &&
8461                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8462                SINT.getOperand(0).getValueType() == MVT::i32) {
8463       MachineFrameInfo &MFI = MF.getFrameInfo();
8464       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8465 
8466       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8467       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8468 
8469       SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8470                                    MachinePointerInfo::getFixedStack(
8471                                        DAG.getMachineFunction(), FrameIdx));
8472       Chain = Store;
8473 
8474       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8475              "Expected an i32 store");
8476 
8477       RLI.Ptr = FIdx;
8478       RLI.Chain = Chain;
8479       RLI.MPI =
8480           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8481       RLI.Alignment = Align(4);
8482 
8483       MachineMemOperand *MMO =
8484         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8485                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8486       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8487       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8488                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8489                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8490                                      Ops, MVT::i32, MMO);
8491       Chain = Bits.getValue(1);
8492     } else
8493       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8494 
8495     SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8496     if (IsStrict)
8497       Chain = FP.getValue(1);
8498 
8499     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8500       if (IsStrict)
8501         FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8502                          DAG.getVTList(MVT::f32, MVT::Other),
8503                          {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8504       else
8505         FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8506                          DAG.getIntPtrConstant(0, dl));
8507     }
8508     return FP;
8509   }
8510 
8511   assert(Src.getValueType() == MVT::i32 &&
8512          "Unhandled INT_TO_FP type in custom expander!");
8513   // Since we only generate this in 64-bit mode, we can take advantage of
8514   // 64-bit registers.  In particular, sign extend the input value into the
8515   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8516   // then lfd it and fcfid it.
8517   MachineFunction &MF = DAG.getMachineFunction();
8518   MachineFrameInfo &MFI = MF.getFrameInfo();
8519   EVT PtrVT = getPointerTy(MF.getDataLayout());
8520 
8521   SDValue Ld;
8522   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8523     ReuseLoadInfo RLI;
8524     bool ReusingLoad;
8525     if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8526       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8527       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8528 
8529       SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8530                                    MachinePointerInfo::getFixedStack(
8531                                        DAG.getMachineFunction(), FrameIdx));
8532       Chain = Store;
8533 
8534       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8535              "Expected an i32 store");
8536 
8537       RLI.Ptr = FIdx;
8538       RLI.Chain = Chain;
8539       RLI.MPI =
8540           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8541       RLI.Alignment = Align(4);
8542     }
8543 
8544     MachineMemOperand *MMO =
8545       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8546                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8547     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8548     Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8549                                  DAG.getVTList(MVT::f64, MVT::Other), Ops,
8550                                  MVT::i32, MMO);
8551     Chain = Ld.getValue(1);
8552     if (ReusingLoad)
8553       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8554   } else {
8555     assert(Subtarget.isPPC64() &&
8556            "i32->FP without LFIWAX supported only on PPC64");
8557 
8558     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8559     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8560 
8561     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8562 
8563     // STD the extended value into the stack slot.
8564     SDValue Store = DAG.getStore(
8565         Chain, dl, Ext64, FIdx,
8566         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8567     Chain = Store;
8568 
8569     // Load the value as a double.
8570     Ld = DAG.getLoad(
8571         MVT::f64, dl, Chain, FIdx,
8572         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8573     Chain = Ld.getValue(1);
8574   }
8575 
8576   // FCFID it and return it.
8577   SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8578   if (IsStrict)
8579     Chain = FP.getValue(1);
8580   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8581     if (IsStrict)
8582       FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8583                        DAG.getVTList(MVT::f32, MVT::Other),
8584                        {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8585     else
8586       FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8587                        DAG.getIntPtrConstant(0, dl));
8588   }
8589   return FP;
8590 }
8591 
8592 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8593                                             SelectionDAG &DAG) const {
8594   SDLoc dl(Op);
8595   /*
8596    The rounding mode is in bits 30:31 of FPSR, and has the following
8597    settings:
8598      00 Round to nearest
8599      01 Round to 0
8600      10 Round to +inf
8601      11 Round to -inf
8602 
8603   FLT_ROUNDS, on the other hand, expects the following:
8604     -1 Undefined
8605      0 Round to 0
8606      1 Round to nearest
8607      2 Round to +inf
8608      3 Round to -inf
8609 
8610   To perform the conversion, we do:
8611     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8612   */
8613 
8614   MachineFunction &MF = DAG.getMachineFunction();
8615   EVT VT = Op.getValueType();
8616   EVT PtrVT = getPointerTy(MF.getDataLayout());
8617 
8618   // Save FP Control Word to register
8619   SDValue Chain = Op.getOperand(0);
8620   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8621   Chain = MFFS.getValue(1);
8622 
8623   SDValue CWD;
8624   if (isTypeLegal(MVT::i64)) {
8625     CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
8626                       DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
8627   } else {
8628     // Save FP register to stack slot
8629     int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8630     SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8631     Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8632 
8633     // Load FP Control Word from low 32 bits of stack slot.
8634     assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&
8635            "Stack slot adjustment is valid only on big endian subtargets!");
8636     SDValue Four = DAG.getConstant(4, dl, PtrVT);
8637     SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8638     CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8639     Chain = CWD.getValue(1);
8640   }
8641 
8642   // Transform as necessary
8643   SDValue CWD1 =
8644     DAG.getNode(ISD::AND, dl, MVT::i32,
8645                 CWD, DAG.getConstant(3, dl, MVT::i32));
8646   SDValue CWD2 =
8647     DAG.getNode(ISD::SRL, dl, MVT::i32,
8648                 DAG.getNode(ISD::AND, dl, MVT::i32,
8649                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8650                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8651                             DAG.getConstant(3, dl, MVT::i32)),
8652                 DAG.getConstant(1, dl, MVT::i32));
8653 
8654   SDValue RetVal =
8655     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8656 
8657   RetVal =
8658       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8659                   dl, VT, RetVal);
8660 
8661   return DAG.getMergeValues({RetVal, Chain}, dl);
8662 }
8663 
8664 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8665   EVT VT = Op.getValueType();
8666   unsigned BitWidth = VT.getSizeInBits();
8667   SDLoc dl(Op);
8668   assert(Op.getNumOperands() == 3 &&
8669          VT == Op.getOperand(1).getValueType() &&
8670          "Unexpected SHL!");
8671 
8672   // Expand into a bunch of logical ops.  Note that these ops
8673   // depend on the PPC behavior for oversized shift amounts.
8674   SDValue Lo = Op.getOperand(0);
8675   SDValue Hi = Op.getOperand(1);
8676   SDValue Amt = Op.getOperand(2);
8677   EVT AmtVT = Amt.getValueType();
8678 
8679   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8680                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8681   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8682   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8683   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8684   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8685                              DAG.getConstant(-BitWidth, dl, AmtVT));
8686   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8687   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8688   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8689   SDValue OutOps[] = { OutLo, OutHi };
8690   return DAG.getMergeValues(OutOps, dl);
8691 }
8692 
8693 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8694   EVT VT = Op.getValueType();
8695   SDLoc dl(Op);
8696   unsigned BitWidth = VT.getSizeInBits();
8697   assert(Op.getNumOperands() == 3 &&
8698          VT == Op.getOperand(1).getValueType() &&
8699          "Unexpected SRL!");
8700 
8701   // Expand into a bunch of logical ops.  Note that these ops
8702   // depend on the PPC behavior for oversized shift amounts.
8703   SDValue Lo = Op.getOperand(0);
8704   SDValue Hi = Op.getOperand(1);
8705   SDValue Amt = Op.getOperand(2);
8706   EVT AmtVT = Amt.getValueType();
8707 
8708   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8709                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8710   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8711   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8712   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8713   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8714                              DAG.getConstant(-BitWidth, dl, AmtVT));
8715   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8716   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8717   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8718   SDValue OutOps[] = { OutLo, OutHi };
8719   return DAG.getMergeValues(OutOps, dl);
8720 }
8721 
8722 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8723   SDLoc dl(Op);
8724   EVT VT = Op.getValueType();
8725   unsigned BitWidth = VT.getSizeInBits();
8726   assert(Op.getNumOperands() == 3 &&
8727          VT == Op.getOperand(1).getValueType() &&
8728          "Unexpected SRA!");
8729 
8730   // Expand into a bunch of logical ops, followed by a select_cc.
8731   SDValue Lo = Op.getOperand(0);
8732   SDValue Hi = Op.getOperand(1);
8733   SDValue Amt = Op.getOperand(2);
8734   EVT AmtVT = Amt.getValueType();
8735 
8736   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8737                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8738   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8739   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8740   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8741   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8742                              DAG.getConstant(-BitWidth, dl, AmtVT));
8743   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8744   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8745   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8746                                   Tmp4, Tmp6, ISD::SETLE);
8747   SDValue OutOps[] = { OutLo, OutHi };
8748   return DAG.getMergeValues(OutOps, dl);
8749 }
8750 
8751 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
8752                                             SelectionDAG &DAG) const {
8753   SDLoc dl(Op);
8754   EVT VT = Op.getValueType();
8755   unsigned BitWidth = VT.getSizeInBits();
8756 
8757   bool IsFSHL = Op.getOpcode() == ISD::FSHL;
8758   SDValue X = Op.getOperand(0);
8759   SDValue Y = Op.getOperand(1);
8760   SDValue Z = Op.getOperand(2);
8761   EVT AmtVT = Z.getValueType();
8762 
8763   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8764   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8765   // This is simpler than TargetLowering::expandFunnelShift because we can rely
8766   // on PowerPC shift by BW being well defined.
8767   Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
8768                   DAG.getConstant(BitWidth - 1, dl, AmtVT));
8769   SDValue SubZ =
8770       DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
8771   X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
8772   Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
8773   return DAG.getNode(ISD::OR, dl, VT, X, Y);
8774 }
8775 
8776 //===----------------------------------------------------------------------===//
8777 // Vector related lowering.
8778 //
8779 
8780 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8781 /// element size of SplatSize. Cast the result to VT.
8782 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8783                                       SelectionDAG &DAG, const SDLoc &dl) {
8784   static const MVT VTys[] = { // canonical VT to use for each size.
8785     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8786   };
8787 
8788   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8789 
8790   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8791   if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
8792     SplatSize = 1;
8793     Val = 0xFF;
8794   }
8795 
8796   EVT CanonicalVT = VTys[SplatSize-1];
8797 
8798   // Build a canonical splat for this value.
8799   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8800 }
8801 
8802 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8803 /// specified intrinsic ID.
8804 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8805                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
8806   if (DestVT == MVT::Other) DestVT = Op.getValueType();
8807   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8808                      DAG.getConstant(IID, dl, MVT::i32), Op);
8809 }
8810 
8811 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8812 /// specified intrinsic ID.
8813 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8814                                 SelectionDAG &DAG, const SDLoc &dl,
8815                                 EVT DestVT = MVT::Other) {
8816   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8817   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8818                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8819 }
8820 
8821 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8822 /// specified intrinsic ID.
8823 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8824                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8825                                 EVT DestVT = MVT::Other) {
8826   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8827   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8828                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8829 }
8830 
8831 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8832 /// amount.  The result has the specified value type.
8833 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8834                            SelectionDAG &DAG, const SDLoc &dl) {
8835   // Force LHS/RHS to be the right type.
8836   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8837   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8838 
8839   int Ops[16];
8840   for (unsigned i = 0; i != 16; ++i)
8841     Ops[i] = i + Amt;
8842   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8843   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8844 }
8845 
8846 /// Do we have an efficient pattern in a .td file for this node?
8847 ///
8848 /// \param V - pointer to the BuildVectorSDNode being matched
8849 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8850 ///
8851 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8852 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8853 /// the opposite is true (expansion is beneficial) are:
8854 /// - The node builds a vector out of integers that are not 32 or 64-bits
8855 /// - The node builds a vector out of constants
8856 /// - The node is a "load-and-splat"
8857 /// In all other cases, we will choose to keep the BUILD_VECTOR.
8858 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8859                                             bool HasDirectMove,
8860                                             bool HasP8Vector) {
8861   EVT VecVT = V->getValueType(0);
8862   bool RightType = VecVT == MVT::v2f64 ||
8863     (HasP8Vector && VecVT == MVT::v4f32) ||
8864     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8865   if (!RightType)
8866     return false;
8867 
8868   bool IsSplat = true;
8869   bool IsLoad = false;
8870   SDValue Op0 = V->getOperand(0);
8871 
8872   // This function is called in a block that confirms the node is not a constant
8873   // splat. So a constant BUILD_VECTOR here means the vector is built out of
8874   // different constants.
8875   if (V->isConstant())
8876     return false;
8877   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8878     if (V->getOperand(i).isUndef())
8879       return false;
8880     // We want to expand nodes that represent load-and-splat even if the
8881     // loaded value is a floating point truncation or conversion to int.
8882     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8883         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8884          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8885         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8886          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8887         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8888          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8889       IsLoad = true;
8890     // If the operands are different or the input is not a load and has more
8891     // uses than just this BV node, then it isn't a splat.
8892     if (V->getOperand(i) != Op0 ||
8893         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8894       IsSplat = false;
8895   }
8896   return !(IsSplat && IsLoad);
8897 }
8898 
8899 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8900 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8901 
8902   SDLoc dl(Op);
8903   SDValue Op0 = Op->getOperand(0);
8904 
8905   if ((Op.getValueType() != MVT::f128) ||
8906       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8907       (Op0.getOperand(0).getValueType() != MVT::i64) ||
8908       (Op0.getOperand(1).getValueType() != MVT::i64))
8909     return SDValue();
8910 
8911   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8912                      Op0.getOperand(1));
8913 }
8914 
8915 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
8916   const SDValue *InputLoad = &Op;
8917   if (InputLoad->getOpcode() == ISD::BITCAST)
8918     InputLoad = &InputLoad->getOperand(0);
8919   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
8920       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
8921     IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
8922     InputLoad = &InputLoad->getOperand(0);
8923   }
8924   if (InputLoad->getOpcode() != ISD::LOAD)
8925     return nullptr;
8926   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8927   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
8928 }
8929 
8930 // Convert the argument APFloat to a single precision APFloat if there is no
8931 // loss in information during the conversion to single precision APFloat and the
8932 // resulting number is not a denormal number. Return true if successful.
8933 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
8934   APFloat APFloatToConvert = ArgAPFloat;
8935   bool LosesInfo = true;
8936   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
8937                            &LosesInfo);
8938   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
8939   if (Success)
8940     ArgAPFloat = APFloatToConvert;
8941   return Success;
8942 }
8943 
8944 // Bitcast the argument APInt to a double and convert it to a single precision
8945 // APFloat, bitcast the APFloat to an APInt and assign it to the original
8946 // argument if there is no loss in information during the conversion from
8947 // double to single precision APFloat and the resulting number is not a denormal
8948 // number. Return true if successful.
8949 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
8950   double DpValue = ArgAPInt.bitsToDouble();
8951   APFloat APFloatDp(DpValue);
8952   bool Success = convertToNonDenormSingle(APFloatDp);
8953   if (Success)
8954     ArgAPInt = APFloatDp.bitcastToAPInt();
8955   return Success;
8956 }
8957 
8958 // Nondestructive check for convertTonNonDenormSingle.
8959 bool llvm::checkConvertToNonDenormSingle(APFloat &ArgAPFloat) {
8960   // Only convert if it loses info, since XXSPLTIDP should
8961   // handle the other case.
8962   APFloat APFloatToConvert = ArgAPFloat;
8963   bool LosesInfo = true;
8964   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
8965                            &LosesInfo);
8966 
8967   return (!LosesInfo && !APFloatToConvert.isDenormal());
8968 }
8969 
8970 // If this is a case we can't handle, return null and let the default
8971 // expansion code take care of it.  If we CAN select this case, and if it
8972 // selects to a single instruction, return Op.  Otherwise, if we can codegen
8973 // this case more efficiently than a constant pool load, lower it to the
8974 // sequence of ops that should be used.
8975 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
8976                                              SelectionDAG &DAG) const {
8977   SDLoc dl(Op);
8978   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
8979   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
8980 
8981   // Check if this is a splat of a constant value.
8982   APInt APSplatBits, APSplatUndef;
8983   unsigned SplatBitSize;
8984   bool HasAnyUndefs;
8985   bool BVNIsConstantSplat =
8986       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
8987                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
8988 
8989   // If it is a splat of a double, check if we can shrink it to a 32 bit
8990   // non-denormal float which when converted back to double gives us the same
8991   // double. This is to exploit the XXSPLTIDP instruction.
8992   // If we lose precision, we use XXSPLTI32DX.
8993   if (BVNIsConstantSplat && (SplatBitSize == 64) &&
8994       Subtarget.hasPrefixInstrs()) {
8995     // Check the type first to short-circuit so we don't modify APSplatBits if
8996     // this block isn't executed.
8997     if ((Op->getValueType(0) == MVT::v2f64) &&
8998         convertToNonDenormSingle(APSplatBits)) {
8999       SDValue SplatNode = DAG.getNode(
9000           PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9001           DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9002       return DAG.getBitcast(Op.getValueType(), SplatNode);
9003     } else {
9004       // We may lose precision, so we have to use XXSPLTI32DX.
9005 
9006       uint32_t Hi =
9007           (uint32_t)((APSplatBits.getZExtValue() & 0xFFFFFFFF00000000LL) >> 32);
9008       uint32_t Lo =
9009           (uint32_t)(APSplatBits.getZExtValue() & 0xFFFFFFFF);
9010       SDValue SplatNode = DAG.getUNDEF(MVT::v2i64);
9011 
9012       if (!Hi || !Lo)
9013         // If either load is 0, then we should generate XXLXOR to set to 0.
9014         SplatNode = DAG.getTargetConstant(0, dl, MVT::v2i64);
9015 
9016       if (Hi)
9017         SplatNode = DAG.getNode(
9018             PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
9019             DAG.getTargetConstant(0, dl, MVT::i32),
9020             DAG.getTargetConstant(Hi, dl, MVT::i32));
9021 
9022       if (Lo)
9023         SplatNode =
9024             DAG.getNode(PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
9025                         DAG.getTargetConstant(1, dl, MVT::i32),
9026                         DAG.getTargetConstant(Lo, dl, MVT::i32));
9027 
9028       return DAG.getBitcast(Op.getValueType(), SplatNode);
9029     }
9030   }
9031 
9032   if (!BVNIsConstantSplat || SplatBitSize > 32) {
9033 
9034     bool IsPermutedLoad = false;
9035     const SDValue *InputLoad =
9036         getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
9037     // Handle load-and-splat patterns as we have instructions that will do this
9038     // in one go.
9039     if (InputLoad && DAG.isSplatValue(Op, true)) {
9040       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9041 
9042       // We have handling for 4 and 8 byte elements.
9043       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9044 
9045       // Checking for a single use of this load, we have to check for vector
9046       // width (128 bits) / ElementSize uses (since each operand of the
9047       // BUILD_VECTOR is a separate use of the value.
9048       unsigned NumUsesOfInputLD = 128 / ElementSize;
9049       for (SDValue BVInOp : Op->ops())
9050         if (BVInOp.isUndef())
9051           NumUsesOfInputLD--;
9052       assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?");
9053       if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
9054           ((Subtarget.hasVSX() && ElementSize == 64) ||
9055            (Subtarget.hasP9Vector() && ElementSize == 32))) {
9056         SDValue Ops[] = {
9057           LD->getChain(),    // Chain
9058           LD->getBasePtr(),  // Ptr
9059           DAG.getValueType(Op.getValueType()) // VT
9060         };
9061         SDValue LdSplt = DAG.getMemIntrinsicNode(
9062             PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
9063             Ops, LD->getMemoryVT(), LD->getMemOperand());
9064         // Replace all uses of the output chain of the original load with the
9065         // output chain of the new load.
9066         DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
9067                                       LdSplt.getValue(1));
9068         return LdSplt;
9069       }
9070     }
9071 
9072     // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to
9073     // 32-bits can be lowered to VSX instructions under certain conditions.
9074     // Without VSX, there is no pattern more efficient than expanding the node.
9075     if (Subtarget.hasVSX() && Subtarget.isPPC64() &&
9076         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9077                                         Subtarget.hasP8Vector()))
9078       return Op;
9079     return SDValue();
9080   }
9081 
9082   uint64_t SplatBits = APSplatBits.getZExtValue();
9083   uint64_t SplatUndef = APSplatUndef.getZExtValue();
9084   unsigned SplatSize = SplatBitSize / 8;
9085 
9086   // First, handle single instruction cases.
9087 
9088   // All zeros?
9089   if (SplatBits == 0) {
9090     // Canonicalize all zero vectors to be v4i32.
9091     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9092       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9093       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9094     }
9095     return Op;
9096   }
9097 
9098   // We have XXSPLTIW for constant splats four bytes wide.
9099   // Given vector length is a multiple of 4, 2-byte splats can be replaced
9100   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9101   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9102   // turned into a 4-byte splat of 0xABABABAB.
9103   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9104     return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2,
9105                                   Op.getValueType(), DAG, dl);
9106 
9107   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9108     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9109                                   dl);
9110 
9111   // We have XXSPLTIB for constant splats one byte wide.
9112   if (Subtarget.hasP9Vector() && SplatSize == 1)
9113     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9114                                   dl);
9115 
9116   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9117   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9118                     (32-SplatBitSize));
9119   if (SextVal >= -16 && SextVal <= 15)
9120     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9121                                   dl);
9122 
9123   // Two instruction sequences.
9124 
9125   // If this value is in the range [-32,30] and is even, use:
9126   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9127   // If this value is in the range [17,31] and is odd, use:
9128   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9129   // If this value is in the range [-31,-17] and is odd, use:
9130   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9131   // Note the last two are three-instruction sequences.
9132   if (SextVal >= -32 && SextVal <= 31) {
9133     // To avoid having these optimizations undone by constant folding,
9134     // we convert to a pseudo that will be expanded later into one of
9135     // the above forms.
9136     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9137     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9138               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9139     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9140     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9141     if (VT == Op.getValueType())
9142       return RetVal;
9143     else
9144       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9145   }
9146 
9147   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9148   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9149   // for fneg/fabs.
9150   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9151     // Make -1 and vspltisw -1:
9152     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9153 
9154     // Make the VSLW intrinsic, computing 0x8000_0000.
9155     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9156                                    OnesV, DAG, dl);
9157 
9158     // xor by OnesV to invert it.
9159     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9160     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9161   }
9162 
9163   // Check to see if this is a wide variety of vsplti*, binop self cases.
9164   static const signed char SplatCsts[] = {
9165     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9166     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9167   };
9168 
9169   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9170     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9171     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9172     int i = SplatCsts[idx];
9173 
9174     // Figure out what shift amount will be used by altivec if shifted by i in
9175     // this splat size.
9176     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9177 
9178     // vsplti + shl self.
9179     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9180       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9181       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9182         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9183         Intrinsic::ppc_altivec_vslw
9184       };
9185       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9186       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9187     }
9188 
9189     // vsplti + srl self.
9190     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9191       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9192       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9193         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9194         Intrinsic::ppc_altivec_vsrw
9195       };
9196       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9197       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9198     }
9199 
9200     // vsplti + rol self.
9201     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9202                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9203       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9204       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9205         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9206         Intrinsic::ppc_altivec_vrlw
9207       };
9208       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9209       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9210     }
9211 
9212     // t = vsplti c, result = vsldoi t, t, 1
9213     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9214       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9215       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9216       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9217     }
9218     // t = vsplti c, result = vsldoi t, t, 2
9219     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9220       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9221       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9222       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9223     }
9224     // t = vsplti c, result = vsldoi t, t, 3
9225     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9226       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9227       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9228       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9229     }
9230   }
9231 
9232   return SDValue();
9233 }
9234 
9235 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9236 /// the specified operations to build the shuffle.
9237 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9238                                       SDValue RHS, SelectionDAG &DAG,
9239                                       const SDLoc &dl) {
9240   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9241   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9242   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9243 
9244   enum {
9245     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9246     OP_VMRGHW,
9247     OP_VMRGLW,
9248     OP_VSPLTISW0,
9249     OP_VSPLTISW1,
9250     OP_VSPLTISW2,
9251     OP_VSPLTISW3,
9252     OP_VSLDOI4,
9253     OP_VSLDOI8,
9254     OP_VSLDOI12
9255   };
9256 
9257   if (OpNum == OP_COPY) {
9258     if (LHSID == (1*9+2)*9+3) return LHS;
9259     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9260     return RHS;
9261   }
9262 
9263   SDValue OpLHS, OpRHS;
9264   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9265   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9266 
9267   int ShufIdxs[16];
9268   switch (OpNum) {
9269   default: llvm_unreachable("Unknown i32 permute!");
9270   case OP_VMRGHW:
9271     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9272     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9273     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9274     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9275     break;
9276   case OP_VMRGLW:
9277     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9278     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9279     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9280     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9281     break;
9282   case OP_VSPLTISW0:
9283     for (unsigned i = 0; i != 16; ++i)
9284       ShufIdxs[i] = (i&3)+0;
9285     break;
9286   case OP_VSPLTISW1:
9287     for (unsigned i = 0; i != 16; ++i)
9288       ShufIdxs[i] = (i&3)+4;
9289     break;
9290   case OP_VSPLTISW2:
9291     for (unsigned i = 0; i != 16; ++i)
9292       ShufIdxs[i] = (i&3)+8;
9293     break;
9294   case OP_VSPLTISW3:
9295     for (unsigned i = 0; i != 16; ++i)
9296       ShufIdxs[i] = (i&3)+12;
9297     break;
9298   case OP_VSLDOI4:
9299     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9300   case OP_VSLDOI8:
9301     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9302   case OP_VSLDOI12:
9303     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9304   }
9305   EVT VT = OpLHS.getValueType();
9306   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9307   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9308   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9309   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9310 }
9311 
9312 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9313 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9314 /// SDValue.
9315 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9316                                            SelectionDAG &DAG) const {
9317   const unsigned BytesInVector = 16;
9318   bool IsLE = Subtarget.isLittleEndian();
9319   SDLoc dl(N);
9320   SDValue V1 = N->getOperand(0);
9321   SDValue V2 = N->getOperand(1);
9322   unsigned ShiftElts = 0, InsertAtByte = 0;
9323   bool Swap = false;
9324 
9325   // Shifts required to get the byte we want at element 7.
9326   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9327                                    0, 15, 14, 13, 12, 11, 10, 9};
9328   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9329                                 1, 2,  3,  4,  5,  6,  7,  8};
9330 
9331   ArrayRef<int> Mask = N->getMask();
9332   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9333 
9334   // For each mask element, find out if we're just inserting something
9335   // from V2 into V1 or vice versa.
9336   // Possible permutations inserting an element from V2 into V1:
9337   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9338   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9339   //   ...
9340   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9341   // Inserting from V1 into V2 will be similar, except mask range will be
9342   // [16,31].
9343 
9344   bool FoundCandidate = false;
9345   // If both vector operands for the shuffle are the same vector, the mask
9346   // will contain only elements from the first one and the second one will be
9347   // undef.
9348   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9349   // Go through the mask of half-words to find an element that's being moved
9350   // from one vector to the other.
9351   for (unsigned i = 0; i < BytesInVector; ++i) {
9352     unsigned CurrentElement = Mask[i];
9353     // If 2nd operand is undefined, we should only look for element 7 in the
9354     // Mask.
9355     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9356       continue;
9357 
9358     bool OtherElementsInOrder = true;
9359     // Examine the other elements in the Mask to see if they're in original
9360     // order.
9361     for (unsigned j = 0; j < BytesInVector; ++j) {
9362       if (j == i)
9363         continue;
9364       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9365       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9366       // in which we always assume we're always picking from the 1st operand.
9367       int MaskOffset =
9368           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9369       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9370         OtherElementsInOrder = false;
9371         break;
9372       }
9373     }
9374     // If other elements are in original order, we record the number of shifts
9375     // we need to get the element we want into element 7. Also record which byte
9376     // in the vector we should insert into.
9377     if (OtherElementsInOrder) {
9378       // If 2nd operand is undefined, we assume no shifts and no swapping.
9379       if (V2.isUndef()) {
9380         ShiftElts = 0;
9381         Swap = false;
9382       } else {
9383         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9384         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9385                          : BigEndianShifts[CurrentElement & 0xF];
9386         Swap = CurrentElement < BytesInVector;
9387       }
9388       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9389       FoundCandidate = true;
9390       break;
9391     }
9392   }
9393 
9394   if (!FoundCandidate)
9395     return SDValue();
9396 
9397   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9398   // optionally with VECSHL if shift is required.
9399   if (Swap)
9400     std::swap(V1, V2);
9401   if (V2.isUndef())
9402     V2 = V1;
9403   if (ShiftElts) {
9404     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9405                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9406     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9407                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9408   }
9409   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9410                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9411 }
9412 
9413 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9414 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9415 /// SDValue.
9416 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9417                                            SelectionDAG &DAG) const {
9418   const unsigned NumHalfWords = 8;
9419   const unsigned BytesInVector = NumHalfWords * 2;
9420   // Check that the shuffle is on half-words.
9421   if (!isNByteElemShuffleMask(N, 2, 1))
9422     return SDValue();
9423 
9424   bool IsLE = Subtarget.isLittleEndian();
9425   SDLoc dl(N);
9426   SDValue V1 = N->getOperand(0);
9427   SDValue V2 = N->getOperand(1);
9428   unsigned ShiftElts = 0, InsertAtByte = 0;
9429   bool Swap = false;
9430 
9431   // Shifts required to get the half-word we want at element 3.
9432   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9433   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9434 
9435   uint32_t Mask = 0;
9436   uint32_t OriginalOrderLow = 0x1234567;
9437   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9438   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9439   // 32-bit space, only need 4-bit nibbles per element.
9440   for (unsigned i = 0; i < NumHalfWords; ++i) {
9441     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9442     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9443   }
9444 
9445   // For each mask element, find out if we're just inserting something
9446   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9447   // from V2 into V1:
9448   //   X, 1, 2, 3, 4, 5, 6, 7
9449   //   0, X, 2, 3, 4, 5, 6, 7
9450   //   0, 1, X, 3, 4, 5, 6, 7
9451   //   0, 1, 2, X, 4, 5, 6, 7
9452   //   0, 1, 2, 3, X, 5, 6, 7
9453   //   0, 1, 2, 3, 4, X, 6, 7
9454   //   0, 1, 2, 3, 4, 5, X, 7
9455   //   0, 1, 2, 3, 4, 5, 6, X
9456   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9457 
9458   bool FoundCandidate = false;
9459   // Go through the mask of half-words to find an element that's being moved
9460   // from one vector to the other.
9461   for (unsigned i = 0; i < NumHalfWords; ++i) {
9462     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9463     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9464     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9465     uint32_t TargetOrder = 0x0;
9466 
9467     // If both vector operands for the shuffle are the same vector, the mask
9468     // will contain only elements from the first one and the second one will be
9469     // undef.
9470     if (V2.isUndef()) {
9471       ShiftElts = 0;
9472       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9473       TargetOrder = OriginalOrderLow;
9474       Swap = false;
9475       // Skip if not the correct element or mask of other elements don't equal
9476       // to our expected order.
9477       if (MaskOneElt == VINSERTHSrcElem &&
9478           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9479         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9480         FoundCandidate = true;
9481         break;
9482       }
9483     } else { // If both operands are defined.
9484       // Target order is [8,15] if the current mask is between [0,7].
9485       TargetOrder =
9486           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9487       // Skip if mask of other elements don't equal our expected order.
9488       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9489         // We only need the last 3 bits for the number of shifts.
9490         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9491                          : BigEndianShifts[MaskOneElt & 0x7];
9492         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9493         Swap = MaskOneElt < NumHalfWords;
9494         FoundCandidate = true;
9495         break;
9496       }
9497     }
9498   }
9499 
9500   if (!FoundCandidate)
9501     return SDValue();
9502 
9503   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9504   // optionally with VECSHL if shift is required.
9505   if (Swap)
9506     std::swap(V1, V2);
9507   if (V2.isUndef())
9508     V2 = V1;
9509   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9510   if (ShiftElts) {
9511     // Double ShiftElts because we're left shifting on v16i8 type.
9512     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9513                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9514     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9515     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9516                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9517     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9518   }
9519   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9520   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9521                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9522   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9523 }
9524 
9525 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9526 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9527 /// return the default SDValue.
9528 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9529                                               SelectionDAG &DAG) const {
9530   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9531   // to v16i8. Peek through the bitcasts to get the actual operands.
9532   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9533   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9534 
9535   auto ShuffleMask = SVN->getMask();
9536   SDValue VecShuffle(SVN, 0);
9537   SDLoc DL(SVN);
9538 
9539   // Check that we have a four byte shuffle.
9540   if (!isNByteElemShuffleMask(SVN, 4, 1))
9541     return SDValue();
9542 
9543   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9544   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9545     std::swap(LHS, RHS);
9546     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9547     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9548   }
9549 
9550   // Ensure that the RHS is a vector of constants.
9551   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9552   if (!BVN)
9553     return SDValue();
9554 
9555   // Check if RHS is a splat of 4-bytes (or smaller).
9556   APInt APSplatValue, APSplatUndef;
9557   unsigned SplatBitSize;
9558   bool HasAnyUndefs;
9559   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9560                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9561       SplatBitSize > 32)
9562     return SDValue();
9563 
9564   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9565   // The instruction splats a constant C into two words of the source vector
9566   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9567   // Thus we check that the shuffle mask is the equivalent  of
9568   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9569   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9570   // within each word are consecutive, so we only need to check the first byte.
9571   SDValue Index;
9572   bool IsLE = Subtarget.isLittleEndian();
9573   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9574       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9575        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9576     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9577   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9578            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9579             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9580     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9581   else
9582     return SDValue();
9583 
9584   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9585   // for XXSPLTI32DX.
9586   unsigned SplatVal = APSplatValue.getZExtValue();
9587   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9588     SplatVal |= (SplatVal << SplatBitSize);
9589 
9590   SDValue SplatNode = DAG.getNode(
9591       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9592       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9593   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9594 }
9595 
9596 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9597 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9598 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9599 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9600 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9601   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9602   assert(Op.getValueType() == MVT::v1i128 &&
9603          "Only set v1i128 as custom, other type shouldn't reach here!");
9604   SDLoc dl(Op);
9605   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9606   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9607   unsigned SHLAmt = N1.getConstantOperandVal(0);
9608   if (SHLAmt % 8 == 0) {
9609     SmallVector<int, 16> Mask(16, 0);
9610     std::iota(Mask.begin(), Mask.end(), 0);
9611     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9612     if (SDValue Shuffle =
9613             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9614                                  DAG.getUNDEF(MVT::v16i8), Mask))
9615       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9616   }
9617   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9618   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9619                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9620   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9621                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9622   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9623   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9624 }
9625 
9626 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9627 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9628 /// return the code it can be lowered into.  Worst case, it can always be
9629 /// lowered into a vperm.
9630 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9631                                                SelectionDAG &DAG) const {
9632   SDLoc dl(Op);
9633   SDValue V1 = Op.getOperand(0);
9634   SDValue V2 = Op.getOperand(1);
9635   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9636 
9637   // Any nodes that were combined in the target-independent combiner prior
9638   // to vector legalization will not be sent to the target combine. Try to
9639   // combine it here.
9640   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9641     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9642       return NewShuffle;
9643     Op = NewShuffle;
9644     SVOp = cast<ShuffleVectorSDNode>(Op);
9645     V1 = Op.getOperand(0);
9646     V2 = Op.getOperand(1);
9647   }
9648   EVT VT = Op.getValueType();
9649   bool isLittleEndian = Subtarget.isLittleEndian();
9650 
9651   unsigned ShiftElts, InsertAtByte;
9652   bool Swap = false;
9653 
9654   // If this is a load-and-splat, we can do that with a single instruction
9655   // in some cases. However if the load has multiple uses, we don't want to
9656   // combine it because that will just produce multiple loads.
9657   bool IsPermutedLoad = false;
9658   const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
9659   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9660       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9661       InputLoad->hasOneUse()) {
9662     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9663     int SplatIdx =
9664       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9665 
9666     // The splat index for permuted loads will be in the left half of the vector
9667     // which is strictly wider than the loaded value by 8 bytes. So we need to
9668     // adjust the splat index to point to the correct address in memory.
9669     if (IsPermutedLoad) {
9670       assert((isLittleEndian || IsFourByte) &&
9671              "Unexpected size for permuted load on big endian target");
9672       SplatIdx += IsFourByte ? 2 : 1;
9673       assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
9674              "Splat of a value outside of the loaded memory");
9675     }
9676 
9677     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9678     // For 4-byte load-and-splat, we need Power9.
9679     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9680       uint64_t Offset = 0;
9681       if (IsFourByte)
9682         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9683       else
9684         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9685 
9686       // If the width of the load is the same as the width of the splat,
9687       // loading with an offset would load the wrong memory.
9688       if (LD->getValueType(0).getSizeInBits() == (IsFourByte ? 32 : 64))
9689         Offset = 0;
9690 
9691       SDValue BasePtr = LD->getBasePtr();
9692       if (Offset != 0)
9693         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9694                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9695       SDValue Ops[] = {
9696         LD->getChain(),    // Chain
9697         BasePtr,           // BasePtr
9698         DAG.getValueType(Op.getValueType()) // VT
9699       };
9700       SDVTList VTL =
9701         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9702       SDValue LdSplt =
9703         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9704                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9705       DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
9706       if (LdSplt.getValueType() != SVOp->getValueType(0))
9707         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9708       return LdSplt;
9709     }
9710   }
9711   if (Subtarget.hasP9Vector() &&
9712       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9713                            isLittleEndian)) {
9714     if (Swap)
9715       std::swap(V1, V2);
9716     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9717     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9718     if (ShiftElts) {
9719       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9720                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9721       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9722                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9723       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9724     }
9725     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9726                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9727     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9728   }
9729 
9730   if (Subtarget.hasPrefixInstrs()) {
9731     SDValue SplatInsertNode;
9732     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9733       return SplatInsertNode;
9734   }
9735 
9736   if (Subtarget.hasP9Altivec()) {
9737     SDValue NewISDNode;
9738     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9739       return NewISDNode;
9740 
9741     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9742       return NewISDNode;
9743   }
9744 
9745   if (Subtarget.hasVSX() &&
9746       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9747     if (Swap)
9748       std::swap(V1, V2);
9749     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9750     SDValue Conv2 =
9751         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9752 
9753     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9754                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9755     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9756   }
9757 
9758   if (Subtarget.hasVSX() &&
9759     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9760     if (Swap)
9761       std::swap(V1, V2);
9762     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9763     SDValue Conv2 =
9764         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9765 
9766     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9767                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9768     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9769   }
9770 
9771   if (Subtarget.hasP9Vector()) {
9772      if (PPC::isXXBRHShuffleMask(SVOp)) {
9773       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9774       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9775       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9776     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9777       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9778       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9779       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9780     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9781       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9782       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9783       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9784     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9785       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9786       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9787       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9788     }
9789   }
9790 
9791   if (Subtarget.hasVSX()) {
9792     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9793       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9794 
9795       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9796       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9797                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
9798       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9799     }
9800 
9801     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9802     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9803       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9804       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9805       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9806     }
9807   }
9808 
9809   // Cases that are handled by instructions that take permute immediates
9810   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9811   // selected by the instruction selector.
9812   if (V2.isUndef()) {
9813     if (PPC::isSplatShuffleMask(SVOp, 1) ||
9814         PPC::isSplatShuffleMask(SVOp, 2) ||
9815         PPC::isSplatShuffleMask(SVOp, 4) ||
9816         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9817         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9818         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9819         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9820         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9821         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9822         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9823         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9824         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9825         (Subtarget.hasP8Altivec() && (
9826          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9827          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9828          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9829       return Op;
9830     }
9831   }
9832 
9833   // Altivec has a variety of "shuffle immediates" that take two vector inputs
9834   // and produce a fixed permutation.  If any of these match, do not lower to
9835   // VPERM.
9836   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9837   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9838       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9839       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9840       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9841       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9842       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9843       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9844       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9845       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9846       (Subtarget.hasP8Altivec() && (
9847        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9848        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9849        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9850     return Op;
9851 
9852   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
9853   // perfect shuffle table to emit an optimal matching sequence.
9854   ArrayRef<int> PermMask = SVOp->getMask();
9855 
9856   unsigned PFIndexes[4];
9857   bool isFourElementShuffle = true;
9858   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9859     unsigned EltNo = 8;   // Start out undef.
9860     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
9861       if (PermMask[i*4+j] < 0)
9862         continue;   // Undef, ignore it.
9863 
9864       unsigned ByteSource = PermMask[i*4+j];
9865       if ((ByteSource & 3) != j) {
9866         isFourElementShuffle = false;
9867         break;
9868       }
9869 
9870       if (EltNo == 8) {
9871         EltNo = ByteSource/4;
9872       } else if (EltNo != ByteSource/4) {
9873         isFourElementShuffle = false;
9874         break;
9875       }
9876     }
9877     PFIndexes[i] = EltNo;
9878   }
9879 
9880   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9881   // perfect shuffle vector to determine if it is cost effective to do this as
9882   // discrete instructions, or whether we should use a vperm.
9883   // For now, we skip this for little endian until such time as we have a
9884   // little-endian perfect shuffle table.
9885   if (isFourElementShuffle && !isLittleEndian) {
9886     // Compute the index in the perfect shuffle table.
9887     unsigned PFTableIndex =
9888       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9889 
9890     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9891     unsigned Cost  = (PFEntry >> 30);
9892 
9893     // Determining when to avoid vperm is tricky.  Many things affect the cost
9894     // of vperm, particularly how many times the perm mask needs to be computed.
9895     // For example, if the perm mask can be hoisted out of a loop or is already
9896     // used (perhaps because there are multiple permutes with the same shuffle
9897     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
9898     // the loop requires an extra register.
9899     //
9900     // As a compromise, we only emit discrete instructions if the shuffle can be
9901     // generated in 3 or fewer operations.  When we have loop information
9902     // available, if this block is within a loop, we should avoid using vperm
9903     // for 3-operation perms and use a constant pool load instead.
9904     if (Cost < 3)
9905       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9906   }
9907 
9908   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9909   // vector that will get spilled to the constant pool.
9910   if (V2.isUndef()) V2 = V1;
9911 
9912   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9913   // that it is in input element units, not in bytes.  Convert now.
9914 
9915   // For little endian, the order of the input vectors is reversed, and
9916   // the permutation mask is complemented with respect to 31.  This is
9917   // necessary to produce proper semantics with the big-endian-biased vperm
9918   // instruction.
9919   EVT EltVT = V1.getValueType().getVectorElementType();
9920   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9921 
9922   SmallVector<SDValue, 16> ResultMask;
9923   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9924     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9925 
9926     for (unsigned j = 0; j != BytesPerElement; ++j)
9927       if (isLittleEndian)
9928         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9929                                              dl, MVT::i32));
9930       else
9931         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9932                                              MVT::i32));
9933   }
9934 
9935   ShufflesHandledWithVPERM++;
9936   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9937   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
9938   LLVM_DEBUG(SVOp->dump());
9939   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
9940   LLVM_DEBUG(VPermMask.dump());
9941 
9942   if (isLittleEndian)
9943     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9944                        V2, V1, VPermMask);
9945   else
9946     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9947                        V1, V2, VPermMask);
9948 }
9949 
9950 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9951 /// vector comparison.  If it is, return true and fill in Opc/isDot with
9952 /// information about the intrinsic.
9953 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9954                                  bool &isDot, const PPCSubtarget &Subtarget) {
9955   unsigned IntrinsicID =
9956       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9957   CompareOpc = -1;
9958   isDot = false;
9959   switch (IntrinsicID) {
9960   default:
9961     return false;
9962   // Comparison predicates.
9963   case Intrinsic::ppc_altivec_vcmpbfp_p:
9964     CompareOpc = 966;
9965     isDot = true;
9966     break;
9967   case Intrinsic::ppc_altivec_vcmpeqfp_p:
9968     CompareOpc = 198;
9969     isDot = true;
9970     break;
9971   case Intrinsic::ppc_altivec_vcmpequb_p:
9972     CompareOpc = 6;
9973     isDot = true;
9974     break;
9975   case Intrinsic::ppc_altivec_vcmpequh_p:
9976     CompareOpc = 70;
9977     isDot = true;
9978     break;
9979   case Intrinsic::ppc_altivec_vcmpequw_p:
9980     CompareOpc = 134;
9981     isDot = true;
9982     break;
9983   case Intrinsic::ppc_altivec_vcmpequd_p:
9984     if (Subtarget.hasP8Altivec()) {
9985       CompareOpc = 199;
9986       isDot = true;
9987     } else
9988       return false;
9989     break;
9990   case Intrinsic::ppc_altivec_vcmpneb_p:
9991   case Intrinsic::ppc_altivec_vcmpneh_p:
9992   case Intrinsic::ppc_altivec_vcmpnew_p:
9993   case Intrinsic::ppc_altivec_vcmpnezb_p:
9994   case Intrinsic::ppc_altivec_vcmpnezh_p:
9995   case Intrinsic::ppc_altivec_vcmpnezw_p:
9996     if (Subtarget.hasP9Altivec()) {
9997       switch (IntrinsicID) {
9998       default:
9999         llvm_unreachable("Unknown comparison intrinsic.");
10000       case Intrinsic::ppc_altivec_vcmpneb_p:
10001         CompareOpc = 7;
10002         break;
10003       case Intrinsic::ppc_altivec_vcmpneh_p:
10004         CompareOpc = 71;
10005         break;
10006       case Intrinsic::ppc_altivec_vcmpnew_p:
10007         CompareOpc = 135;
10008         break;
10009       case Intrinsic::ppc_altivec_vcmpnezb_p:
10010         CompareOpc = 263;
10011         break;
10012       case Intrinsic::ppc_altivec_vcmpnezh_p:
10013         CompareOpc = 327;
10014         break;
10015       case Intrinsic::ppc_altivec_vcmpnezw_p:
10016         CompareOpc = 391;
10017         break;
10018       }
10019       isDot = true;
10020     } else
10021       return false;
10022     break;
10023   case Intrinsic::ppc_altivec_vcmpgefp_p:
10024     CompareOpc = 454;
10025     isDot = true;
10026     break;
10027   case Intrinsic::ppc_altivec_vcmpgtfp_p:
10028     CompareOpc = 710;
10029     isDot = true;
10030     break;
10031   case Intrinsic::ppc_altivec_vcmpgtsb_p:
10032     CompareOpc = 774;
10033     isDot = true;
10034     break;
10035   case Intrinsic::ppc_altivec_vcmpgtsh_p:
10036     CompareOpc = 838;
10037     isDot = true;
10038     break;
10039   case Intrinsic::ppc_altivec_vcmpgtsw_p:
10040     CompareOpc = 902;
10041     isDot = true;
10042     break;
10043   case Intrinsic::ppc_altivec_vcmpgtsd_p:
10044     if (Subtarget.hasP8Altivec()) {
10045       CompareOpc = 967;
10046       isDot = true;
10047     } else
10048       return false;
10049     break;
10050   case Intrinsic::ppc_altivec_vcmpgtub_p:
10051     CompareOpc = 518;
10052     isDot = true;
10053     break;
10054   case Intrinsic::ppc_altivec_vcmpgtuh_p:
10055     CompareOpc = 582;
10056     isDot = true;
10057     break;
10058   case Intrinsic::ppc_altivec_vcmpgtuw_p:
10059     CompareOpc = 646;
10060     isDot = true;
10061     break;
10062   case Intrinsic::ppc_altivec_vcmpgtud_p:
10063     if (Subtarget.hasP8Altivec()) {
10064       CompareOpc = 711;
10065       isDot = true;
10066     } else
10067       return false;
10068     break;
10069 
10070   case Intrinsic::ppc_altivec_vcmpequq:
10071   case Intrinsic::ppc_altivec_vcmpgtsq:
10072   case Intrinsic::ppc_altivec_vcmpgtuq:
10073     if (!Subtarget.isISA3_1())
10074       return false;
10075     switch (IntrinsicID) {
10076     default:
10077       llvm_unreachable("Unknown comparison intrinsic.");
10078     case Intrinsic::ppc_altivec_vcmpequq:
10079       CompareOpc = 455;
10080       break;
10081     case Intrinsic::ppc_altivec_vcmpgtsq:
10082       CompareOpc = 903;
10083       break;
10084     case Intrinsic::ppc_altivec_vcmpgtuq:
10085       CompareOpc = 647;
10086       break;
10087     }
10088     break;
10089 
10090   // VSX predicate comparisons use the same infrastructure
10091   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10092   case Intrinsic::ppc_vsx_xvcmpgedp_p:
10093   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10094   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10095   case Intrinsic::ppc_vsx_xvcmpgesp_p:
10096   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10097     if (Subtarget.hasVSX()) {
10098       switch (IntrinsicID) {
10099       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10100         CompareOpc = 99;
10101         break;
10102       case Intrinsic::ppc_vsx_xvcmpgedp_p:
10103         CompareOpc = 115;
10104         break;
10105       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10106         CompareOpc = 107;
10107         break;
10108       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10109         CompareOpc = 67;
10110         break;
10111       case Intrinsic::ppc_vsx_xvcmpgesp_p:
10112         CompareOpc = 83;
10113         break;
10114       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10115         CompareOpc = 75;
10116         break;
10117       }
10118       isDot = true;
10119     } else
10120       return false;
10121     break;
10122 
10123   // Normal Comparisons.
10124   case Intrinsic::ppc_altivec_vcmpbfp:
10125     CompareOpc = 966;
10126     break;
10127   case Intrinsic::ppc_altivec_vcmpeqfp:
10128     CompareOpc = 198;
10129     break;
10130   case Intrinsic::ppc_altivec_vcmpequb:
10131     CompareOpc = 6;
10132     break;
10133   case Intrinsic::ppc_altivec_vcmpequh:
10134     CompareOpc = 70;
10135     break;
10136   case Intrinsic::ppc_altivec_vcmpequw:
10137     CompareOpc = 134;
10138     break;
10139   case Intrinsic::ppc_altivec_vcmpequd:
10140     if (Subtarget.hasP8Altivec())
10141       CompareOpc = 199;
10142     else
10143       return false;
10144     break;
10145   case Intrinsic::ppc_altivec_vcmpneb:
10146   case Intrinsic::ppc_altivec_vcmpneh:
10147   case Intrinsic::ppc_altivec_vcmpnew:
10148   case Intrinsic::ppc_altivec_vcmpnezb:
10149   case Intrinsic::ppc_altivec_vcmpnezh:
10150   case Intrinsic::ppc_altivec_vcmpnezw:
10151     if (Subtarget.hasP9Altivec())
10152       switch (IntrinsicID) {
10153       default:
10154         llvm_unreachable("Unknown comparison intrinsic.");
10155       case Intrinsic::ppc_altivec_vcmpneb:
10156         CompareOpc = 7;
10157         break;
10158       case Intrinsic::ppc_altivec_vcmpneh:
10159         CompareOpc = 71;
10160         break;
10161       case Intrinsic::ppc_altivec_vcmpnew:
10162         CompareOpc = 135;
10163         break;
10164       case Intrinsic::ppc_altivec_vcmpnezb:
10165         CompareOpc = 263;
10166         break;
10167       case Intrinsic::ppc_altivec_vcmpnezh:
10168         CompareOpc = 327;
10169         break;
10170       case Intrinsic::ppc_altivec_vcmpnezw:
10171         CompareOpc = 391;
10172         break;
10173       }
10174     else
10175       return false;
10176     break;
10177   case Intrinsic::ppc_altivec_vcmpgefp:
10178     CompareOpc = 454;
10179     break;
10180   case Intrinsic::ppc_altivec_vcmpgtfp:
10181     CompareOpc = 710;
10182     break;
10183   case Intrinsic::ppc_altivec_vcmpgtsb:
10184     CompareOpc = 774;
10185     break;
10186   case Intrinsic::ppc_altivec_vcmpgtsh:
10187     CompareOpc = 838;
10188     break;
10189   case Intrinsic::ppc_altivec_vcmpgtsw:
10190     CompareOpc = 902;
10191     break;
10192   case Intrinsic::ppc_altivec_vcmpgtsd:
10193     if (Subtarget.hasP8Altivec())
10194       CompareOpc = 967;
10195     else
10196       return false;
10197     break;
10198   case Intrinsic::ppc_altivec_vcmpgtub:
10199     CompareOpc = 518;
10200     break;
10201   case Intrinsic::ppc_altivec_vcmpgtuh:
10202     CompareOpc = 582;
10203     break;
10204   case Intrinsic::ppc_altivec_vcmpgtuw:
10205     CompareOpc = 646;
10206     break;
10207   case Intrinsic::ppc_altivec_vcmpgtud:
10208     if (Subtarget.hasP8Altivec())
10209       CompareOpc = 711;
10210     else
10211       return false;
10212     break;
10213   case Intrinsic::ppc_altivec_vcmpequq_p:
10214   case Intrinsic::ppc_altivec_vcmpgtsq_p:
10215   case Intrinsic::ppc_altivec_vcmpgtuq_p:
10216     if (!Subtarget.isISA3_1())
10217       return false;
10218     switch (IntrinsicID) {
10219     default:
10220       llvm_unreachable("Unknown comparison intrinsic.");
10221     case Intrinsic::ppc_altivec_vcmpequq_p:
10222       CompareOpc = 455;
10223       break;
10224     case Intrinsic::ppc_altivec_vcmpgtsq_p:
10225       CompareOpc = 903;
10226       break;
10227     case Intrinsic::ppc_altivec_vcmpgtuq_p:
10228       CompareOpc = 647;
10229       break;
10230     }
10231     isDot = true;
10232     break;
10233   }
10234   return true;
10235 }
10236 
10237 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10238 /// lower, do it, otherwise return null.
10239 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10240                                                    SelectionDAG &DAG) const {
10241   unsigned IntrinsicID =
10242     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10243 
10244   SDLoc dl(Op);
10245 
10246   switch (IntrinsicID) {
10247   case Intrinsic::thread_pointer:
10248     // Reads the thread pointer register, used for __builtin_thread_pointer.
10249     if (Subtarget.isPPC64())
10250       return DAG.getRegister(PPC::X13, MVT::i64);
10251     return DAG.getRegister(PPC::R2, MVT::i32);
10252 
10253   case Intrinsic::ppc_mma_disassemble_acc:
10254   case Intrinsic::ppc_vsx_disassemble_pair: {
10255     int NumVecs = 2;
10256     SDValue WideVec = Op.getOperand(1);
10257     if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
10258       NumVecs = 4;
10259       WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec);
10260     }
10261     SmallVector<SDValue, 4> RetOps;
10262     for (int VecNo = 0; VecNo < NumVecs; VecNo++) {
10263       SDValue Extract = DAG.getNode(
10264           PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec,
10265           DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
10266                                                      : VecNo,
10267                           dl, getPointerTy(DAG.getDataLayout())));
10268       RetOps.push_back(Extract);
10269     }
10270     return DAG.getMergeValues(RetOps, dl);
10271   }
10272   }
10273 
10274   // If this is a lowered altivec predicate compare, CompareOpc is set to the
10275   // opcode number of the comparison.
10276   int CompareOpc;
10277   bool isDot;
10278   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10279     return SDValue();    // Don't custom lower most intrinsics.
10280 
10281   // If this is a non-dot comparison, make the VCMP node and we are done.
10282   if (!isDot) {
10283     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10284                               Op.getOperand(1), Op.getOperand(2),
10285                               DAG.getConstant(CompareOpc, dl, MVT::i32));
10286     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10287   }
10288 
10289   // Create the PPCISD altivec 'dot' comparison node.
10290   SDValue Ops[] = {
10291     Op.getOperand(2),  // LHS
10292     Op.getOperand(3),  // RHS
10293     DAG.getConstant(CompareOpc, dl, MVT::i32)
10294   };
10295   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10296   SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
10297 
10298   // Now that we have the comparison, emit a copy from the CR to a GPR.
10299   // This is flagged to the above dot comparison.
10300   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10301                                 DAG.getRegister(PPC::CR6, MVT::i32),
10302                                 CompNode.getValue(1));
10303 
10304   // Unpack the result based on how the target uses it.
10305   unsigned BitNo;   // Bit # of CR6.
10306   bool InvertBit;   // Invert result?
10307   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10308   default:  // Can't happen, don't crash on invalid number though.
10309   case 0:   // Return the value of the EQ bit of CR6.
10310     BitNo = 0; InvertBit = false;
10311     break;
10312   case 1:   // Return the inverted value of the EQ bit of CR6.
10313     BitNo = 0; InvertBit = true;
10314     break;
10315   case 2:   // Return the value of the LT bit of CR6.
10316     BitNo = 2; InvertBit = false;
10317     break;
10318   case 3:   // Return the inverted value of the LT bit of CR6.
10319     BitNo = 2; InvertBit = true;
10320     break;
10321   }
10322 
10323   // Shift the bit into the low position.
10324   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10325                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10326   // Isolate the bit.
10327   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10328                       DAG.getConstant(1, dl, MVT::i32));
10329 
10330   // If we are supposed to, toggle the bit.
10331   if (InvertBit)
10332     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10333                         DAG.getConstant(1, dl, MVT::i32));
10334   return Flags;
10335 }
10336 
10337 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10338                                                SelectionDAG &DAG) const {
10339   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10340   // the beginning of the argument list.
10341   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10342   SDLoc DL(Op);
10343   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10344   case Intrinsic::ppc_cfence: {
10345     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10346     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10347     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10348                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10349                                                   Op.getOperand(ArgStart + 1)),
10350                                       Op.getOperand(0)),
10351                    0);
10352   }
10353   default:
10354     break;
10355   }
10356   return SDValue();
10357 }
10358 
10359 // Lower scalar BSWAP64 to xxbrd.
10360 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10361   SDLoc dl(Op);
10362   if (!Subtarget.isPPC64())
10363     return Op;
10364   // MTVSRDD
10365   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10366                    Op.getOperand(0));
10367   // XXBRD
10368   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10369   // MFVSRD
10370   int VectorIndex = 0;
10371   if (Subtarget.isLittleEndian())
10372     VectorIndex = 1;
10373   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10374                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10375   return Op;
10376 }
10377 
10378 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10379 // compared to a value that is atomically loaded (atomic loads zero-extend).
10380 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10381                                                 SelectionDAG &DAG) const {
10382   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10383          "Expecting an atomic compare-and-swap here.");
10384   SDLoc dl(Op);
10385   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10386   EVT MemVT = AtomicNode->getMemoryVT();
10387   if (MemVT.getSizeInBits() >= 32)
10388     return Op;
10389 
10390   SDValue CmpOp = Op.getOperand(2);
10391   // If this is already correctly zero-extended, leave it alone.
10392   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10393   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10394     return Op;
10395 
10396   // Clear the high bits of the compare operand.
10397   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10398   SDValue NewCmpOp =
10399     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10400                 DAG.getConstant(MaskVal, dl, MVT::i32));
10401 
10402   // Replace the existing compare operand with the properly zero-extended one.
10403   SmallVector<SDValue, 4> Ops;
10404   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10405     Ops.push_back(AtomicNode->getOperand(i));
10406   Ops[2] = NewCmpOp;
10407   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10408   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10409   auto NodeTy =
10410     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10411   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10412 }
10413 
10414 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10415                                                  SelectionDAG &DAG) const {
10416   SDLoc dl(Op);
10417   // Create a stack slot that is 16-byte aligned.
10418   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10419   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10420   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10421   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10422 
10423   // Store the input value into Value#0 of the stack slot.
10424   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10425                                MachinePointerInfo());
10426   // Load it out.
10427   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10428 }
10429 
10430 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10431                                                   SelectionDAG &DAG) const {
10432   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10433          "Should only be called for ISD::INSERT_VECTOR_ELT");
10434 
10435   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10436 
10437   EVT VT = Op.getValueType();
10438   SDLoc dl(Op);
10439   SDValue V1 = Op.getOperand(0);
10440   SDValue V2 = Op.getOperand(1);
10441   SDValue V3 = Op.getOperand(2);
10442 
10443   if (VT == MVT::v2f64 && C)
10444     return Op;
10445 
10446   if (Subtarget.isISA3_1()) {
10447     if ((VT == MVT::v2i64 || VT == MVT::v2f64) && !Subtarget.isPPC64())
10448       return SDValue();
10449     // On P10, we have legal lowering for constant and variable indices for
10450     // integer vectors.
10451     if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
10452         VT == MVT::v2i64)
10453       return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3);
10454     // For f32 and f64 vectors, we have legal lowering for variable indices.
10455     // For f32 we also have legal lowering when the element is loaded from
10456     // memory.
10457     if (VT == MVT::v4f32 || VT == MVT::v2f64) {
10458       if (!C || (VT == MVT::v4f32 && dyn_cast<LoadSDNode>(V2)))
10459         return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3);
10460       return Op;
10461     }
10462   }
10463 
10464   // Before P10, we have legal lowering for constant indices but not for
10465   // variable ones.
10466   if (!C)
10467     return SDValue();
10468 
10469   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10470   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10471     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10472     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10473     unsigned InsertAtElement = C->getZExtValue();
10474     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10475     if (Subtarget.isLittleEndian()) {
10476       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10477     }
10478     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10479                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10480   }
10481   return Op;
10482 }
10483 
10484 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10485                                            SelectionDAG &DAG) const {
10486   SDLoc dl(Op);
10487   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10488   SDValue LoadChain = LN->getChain();
10489   SDValue BasePtr = LN->getBasePtr();
10490   EVT VT = Op.getValueType();
10491 
10492   if (VT != MVT::v256i1 && VT != MVT::v512i1)
10493     return Op;
10494 
10495   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10496   // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in
10497   // 2 or 4 vsx registers.
10498   assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&
10499          "Type unsupported without MMA");
10500   assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10501          "Type unsupported without paired vector support");
10502   Align Alignment = LN->getAlign();
10503   SmallVector<SDValue, 4> Loads;
10504   SmallVector<SDValue, 4> LoadChains;
10505   unsigned NumVecs = VT.getSizeInBits() / 128;
10506   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10507     SDValue Load =
10508         DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
10509                     LN->getPointerInfo().getWithOffset(Idx * 16),
10510                     commonAlignment(Alignment, Idx * 16),
10511                     LN->getMemOperand()->getFlags(), LN->getAAInfo());
10512     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10513                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10514     Loads.push_back(Load);
10515     LoadChains.push_back(Load.getValue(1));
10516   }
10517   if (Subtarget.isLittleEndian()) {
10518     std::reverse(Loads.begin(), Loads.end());
10519     std::reverse(LoadChains.begin(), LoadChains.end());
10520   }
10521   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10522   SDValue Value =
10523       DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD,
10524                   dl, VT, Loads);
10525   SDValue RetOps[] = {Value, TF};
10526   return DAG.getMergeValues(RetOps, dl);
10527 }
10528 
10529 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10530                                             SelectionDAG &DAG) const {
10531   SDLoc dl(Op);
10532   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10533   SDValue StoreChain = SN->getChain();
10534   SDValue BasePtr = SN->getBasePtr();
10535   SDValue Value = SN->getValue();
10536   EVT StoreVT = Value.getValueType();
10537 
10538   if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
10539     return Op;
10540 
10541   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10542   // Here we create 2 or 4 v16i8 stores to store the pair or accumulator
10543   // underlying registers individually.
10544   assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&
10545          "Type unsupported without MMA");
10546   assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10547          "Type unsupported without paired vector support");
10548   Align Alignment = SN->getAlign();
10549   SmallVector<SDValue, 4> Stores;
10550   unsigned NumVecs = 2;
10551   if (StoreVT == MVT::v512i1) {
10552     Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value);
10553     NumVecs = 4;
10554   }
10555   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10556     unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
10557     SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value,
10558                               DAG.getConstant(VecNum, dl, getPointerTy(DAG.getDataLayout())));
10559     SDValue Store =
10560         DAG.getStore(StoreChain, dl, Elt, BasePtr,
10561                      SN->getPointerInfo().getWithOffset(Idx * 16),
10562                      commonAlignment(Alignment, Idx * 16),
10563                      SN->getMemOperand()->getFlags(), SN->getAAInfo());
10564     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10565                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10566     Stores.push_back(Store);
10567   }
10568   SDValue TF = DAG.getTokenFactor(dl, Stores);
10569   return TF;
10570 }
10571 
10572 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10573   SDLoc dl(Op);
10574   if (Op.getValueType() == MVT::v4i32) {
10575     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10576 
10577     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10578     // +16 as shift amt.
10579     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10580     SDValue RHSSwap =   // = vrlw RHS, 16
10581       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10582 
10583     // Shrinkify inputs to v8i16.
10584     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10585     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10586     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10587 
10588     // Low parts multiplied together, generating 32-bit results (we ignore the
10589     // top parts).
10590     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10591                                         LHS, RHS, DAG, dl, MVT::v4i32);
10592 
10593     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10594                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10595     // Shift the high parts up 16 bits.
10596     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10597                               Neg16, DAG, dl);
10598     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10599   } else if (Op.getValueType() == MVT::v16i8) {
10600     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10601     bool isLittleEndian = Subtarget.isLittleEndian();
10602 
10603     // Multiply the even 8-bit parts, producing 16-bit sums.
10604     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10605                                            LHS, RHS, DAG, dl, MVT::v8i16);
10606     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10607 
10608     // Multiply the odd 8-bit parts, producing 16-bit sums.
10609     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10610                                           LHS, RHS, DAG, dl, MVT::v8i16);
10611     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10612 
10613     // Merge the results together.  Because vmuleub and vmuloub are
10614     // instructions with a big-endian bias, we must reverse the
10615     // element numbering and reverse the meaning of "odd" and "even"
10616     // when generating little endian code.
10617     int Ops[16];
10618     for (unsigned i = 0; i != 8; ++i) {
10619       if (isLittleEndian) {
10620         Ops[i*2  ] = 2*i;
10621         Ops[i*2+1] = 2*i+16;
10622       } else {
10623         Ops[i*2  ] = 2*i+1;
10624         Ops[i*2+1] = 2*i+1+16;
10625       }
10626     }
10627     if (isLittleEndian)
10628       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10629     else
10630       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10631   } else {
10632     llvm_unreachable("Unknown mul to lower!");
10633   }
10634 }
10635 
10636 SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
10637   bool IsStrict = Op->isStrictFPOpcode();
10638   if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
10639       !Subtarget.hasP9Vector())
10640     return SDValue();
10641 
10642   return Op;
10643 }
10644 
10645 // Custom lowering for fpext vf32 to v2f64
10646 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10647 
10648   assert(Op.getOpcode() == ISD::FP_EXTEND &&
10649          "Should only be called for ISD::FP_EXTEND");
10650 
10651   // FIXME: handle extends from half precision float vectors on P9.
10652   // We only want to custom lower an extend from v2f32 to v2f64.
10653   if (Op.getValueType() != MVT::v2f64 ||
10654       Op.getOperand(0).getValueType() != MVT::v2f32)
10655     return SDValue();
10656 
10657   SDLoc dl(Op);
10658   SDValue Op0 = Op.getOperand(0);
10659 
10660   switch (Op0.getOpcode()) {
10661   default:
10662     return SDValue();
10663   case ISD::EXTRACT_SUBVECTOR: {
10664     assert(Op0.getNumOperands() == 2 &&
10665            isa<ConstantSDNode>(Op0->getOperand(1)) &&
10666            "Node should have 2 operands with second one being a constant!");
10667 
10668     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10669       return SDValue();
10670 
10671     // Custom lower is only done for high or low doubleword.
10672     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10673     if (Idx % 2 != 0)
10674       return SDValue();
10675 
10676     // Since input is v4f32, at this point Idx is either 0 or 2.
10677     // Shift to get the doubleword position we want.
10678     int DWord = Idx >> 1;
10679 
10680     // High and low word positions are different on little endian.
10681     if (Subtarget.isLittleEndian())
10682       DWord ^= 0x1;
10683 
10684     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10685                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10686   }
10687   case ISD::FADD:
10688   case ISD::FMUL:
10689   case ISD::FSUB: {
10690     SDValue NewLoad[2];
10691     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10692       // Ensure both input are loads.
10693       SDValue LdOp = Op0.getOperand(i);
10694       if (LdOp.getOpcode() != ISD::LOAD)
10695         return SDValue();
10696       // Generate new load node.
10697       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10698       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10699       NewLoad[i] = DAG.getMemIntrinsicNode(
10700           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10701           LD->getMemoryVT(), LD->getMemOperand());
10702     }
10703     SDValue NewOp =
10704         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10705                     NewLoad[1], Op0.getNode()->getFlags());
10706     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10707                        DAG.getConstant(0, dl, MVT::i32));
10708   }
10709   case ISD::LOAD: {
10710     LoadSDNode *LD = cast<LoadSDNode>(Op0);
10711     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10712     SDValue NewLd = DAG.getMemIntrinsicNode(
10713         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10714         LD->getMemoryVT(), LD->getMemOperand());
10715     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10716                        DAG.getConstant(0, dl, MVT::i32));
10717   }
10718   }
10719   llvm_unreachable("ERROR:Should return for all cases within swtich.");
10720 }
10721 
10722 /// LowerOperation - Provide custom lowering hooks for some operations.
10723 ///
10724 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10725   switch (Op.getOpcode()) {
10726   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10727   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
10728   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
10729   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
10730   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
10731   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
10732   case ISD::STRICT_FSETCC:
10733   case ISD::STRICT_FSETCCS:
10734   case ISD::SETCC:              return LowerSETCC(Op, DAG);
10735   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
10736   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
10737 
10738   // Variable argument lowering.
10739   case ISD::VASTART:            return LowerVASTART(Op, DAG);
10740   case ISD::VAARG:              return LowerVAARG(Op, DAG);
10741   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
10742 
10743   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
10744   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10745   case ISD::GET_DYNAMIC_AREA_OFFSET:
10746     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10747 
10748   // Exception handling lowering.
10749   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
10750   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
10751   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
10752 
10753   case ISD::LOAD:               return LowerLOAD(Op, DAG);
10754   case ISD::STORE:              return LowerSTORE(Op, DAG);
10755   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
10756   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
10757   case ISD::STRICT_FP_TO_UINT:
10758   case ISD::STRICT_FP_TO_SINT:
10759   case ISD::FP_TO_UINT:
10760   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10761   case ISD::STRICT_UINT_TO_FP:
10762   case ISD::STRICT_SINT_TO_FP:
10763   case ISD::UINT_TO_FP:
10764   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
10765   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
10766 
10767   // Lower 64-bit shifts.
10768   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
10769   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
10770   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
10771 
10772   case ISD::FSHL:               return LowerFunnelShift(Op, DAG);
10773   case ISD::FSHR:               return LowerFunnelShift(Op, DAG);
10774 
10775   // Vector-related lowering.
10776   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
10777   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
10778   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10779   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
10780   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
10781   case ISD::MUL:                return LowerMUL(Op, DAG);
10782   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
10783   case ISD::STRICT_FP_ROUND:
10784   case ISD::FP_ROUND:
10785     return LowerFP_ROUND(Op, DAG);
10786   case ISD::ROTL:               return LowerROTL(Op, DAG);
10787 
10788   // For counter-based loop handling.
10789   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
10790 
10791   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
10792 
10793   // Frame & Return address.
10794   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
10795   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
10796 
10797   case ISD::INTRINSIC_VOID:
10798     return LowerINTRINSIC_VOID(Op, DAG);
10799   case ISD::BSWAP:
10800     return LowerBSWAP(Op, DAG);
10801   case ISD::ATOMIC_CMP_SWAP:
10802     return LowerATOMIC_CMP_SWAP(Op, DAG);
10803   }
10804 }
10805 
10806 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10807                                            SmallVectorImpl<SDValue>&Results,
10808                                            SelectionDAG &DAG) const {
10809   SDLoc dl(N);
10810   switch (N->getOpcode()) {
10811   default:
10812     llvm_unreachable("Do not know how to custom type legalize this operation!");
10813   case ISD::READCYCLECOUNTER: {
10814     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10815     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10816 
10817     Results.push_back(
10818         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
10819     Results.push_back(RTB.getValue(2));
10820     break;
10821   }
10822   case ISD::INTRINSIC_W_CHAIN: {
10823     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10824         Intrinsic::loop_decrement)
10825       break;
10826 
10827     assert(N->getValueType(0) == MVT::i1 &&
10828            "Unexpected result type for CTR decrement intrinsic");
10829     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10830                                  N->getValueType(0));
10831     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10832     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10833                                  N->getOperand(1));
10834 
10835     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10836     Results.push_back(NewInt.getValue(1));
10837     break;
10838   }
10839   case ISD::VAARG: {
10840     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10841       return;
10842 
10843     EVT VT = N->getValueType(0);
10844 
10845     if (VT == MVT::i64) {
10846       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10847 
10848       Results.push_back(NewNode);
10849       Results.push_back(NewNode.getValue(1));
10850     }
10851     return;
10852   }
10853   case ISD::STRICT_FP_TO_SINT:
10854   case ISD::STRICT_FP_TO_UINT:
10855   case ISD::FP_TO_SINT:
10856   case ISD::FP_TO_UINT:
10857     // LowerFP_TO_INT() can only handle f32 and f64.
10858     if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
10859         MVT::ppcf128)
10860       return;
10861     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10862     return;
10863   case ISD::TRUNCATE: {
10864     if (!N->getValueType(0).isVector())
10865       return;
10866     SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
10867     if (Lowered)
10868       Results.push_back(Lowered);
10869     return;
10870   }
10871   case ISD::FSHL:
10872   case ISD::FSHR:
10873     // Don't handle funnel shifts here.
10874     return;
10875   case ISD::BITCAST:
10876     // Don't handle bitcast here.
10877     return;
10878   case ISD::FP_EXTEND:
10879     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
10880     if (Lowered)
10881       Results.push_back(Lowered);
10882     return;
10883   }
10884 }
10885 
10886 //===----------------------------------------------------------------------===//
10887 //  Other Lowering Code
10888 //===----------------------------------------------------------------------===//
10889 
10890 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10891   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10892   Function *Func = Intrinsic::getDeclaration(M, Id);
10893   return Builder.CreateCall(Func, {});
10894 }
10895 
10896 // The mappings for emitLeading/TrailingFence is taken from
10897 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10898 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10899                                                  Instruction *Inst,
10900                                                  AtomicOrdering Ord) const {
10901   if (Ord == AtomicOrdering::SequentiallyConsistent)
10902     return callIntrinsic(Builder, Intrinsic::ppc_sync);
10903   if (isReleaseOrStronger(Ord))
10904     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10905   return nullptr;
10906 }
10907 
10908 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10909                                                   Instruction *Inst,
10910                                                   AtomicOrdering Ord) const {
10911   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10912     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10913     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10914     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10915     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10916       return Builder.CreateCall(
10917           Intrinsic::getDeclaration(
10918               Builder.GetInsertBlock()->getParent()->getParent(),
10919               Intrinsic::ppc_cfence, {Inst->getType()}),
10920           {Inst});
10921     // FIXME: Can use isync for rmw operation.
10922     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10923   }
10924   return nullptr;
10925 }
10926 
10927 MachineBasicBlock *
10928 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10929                                     unsigned AtomicSize,
10930                                     unsigned BinOpcode,
10931                                     unsigned CmpOpcode,
10932                                     unsigned CmpPred) const {
10933   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10934   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10935 
10936   auto LoadMnemonic = PPC::LDARX;
10937   auto StoreMnemonic = PPC::STDCX;
10938   switch (AtomicSize) {
10939   default:
10940     llvm_unreachable("Unexpected size of atomic entity");
10941   case 1:
10942     LoadMnemonic = PPC::LBARX;
10943     StoreMnemonic = PPC::STBCX;
10944     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10945     break;
10946   case 2:
10947     LoadMnemonic = PPC::LHARX;
10948     StoreMnemonic = PPC::STHCX;
10949     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10950     break;
10951   case 4:
10952     LoadMnemonic = PPC::LWARX;
10953     StoreMnemonic = PPC::STWCX;
10954     break;
10955   case 8:
10956     LoadMnemonic = PPC::LDARX;
10957     StoreMnemonic = PPC::STDCX;
10958     break;
10959   }
10960 
10961   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10962   MachineFunction *F = BB->getParent();
10963   MachineFunction::iterator It = ++BB->getIterator();
10964 
10965   Register dest = MI.getOperand(0).getReg();
10966   Register ptrA = MI.getOperand(1).getReg();
10967   Register ptrB = MI.getOperand(2).getReg();
10968   Register incr = MI.getOperand(3).getReg();
10969   DebugLoc dl = MI.getDebugLoc();
10970 
10971   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10972   MachineBasicBlock *loop2MBB =
10973     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10974   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10975   F->insert(It, loopMBB);
10976   if (CmpOpcode)
10977     F->insert(It, loop2MBB);
10978   F->insert(It, exitMBB);
10979   exitMBB->splice(exitMBB->begin(), BB,
10980                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10981   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10982 
10983   MachineRegisterInfo &RegInfo = F->getRegInfo();
10984   Register TmpReg = (!BinOpcode) ? incr :
10985     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10986                                            : &PPC::GPRCRegClass);
10987 
10988   //  thisMBB:
10989   //   ...
10990   //   fallthrough --> loopMBB
10991   BB->addSuccessor(loopMBB);
10992 
10993   //  loopMBB:
10994   //   l[wd]arx dest, ptr
10995   //   add r0, dest, incr
10996   //   st[wd]cx. r0, ptr
10997   //   bne- loopMBB
10998   //   fallthrough --> exitMBB
10999 
11000   // For max/min...
11001   //  loopMBB:
11002   //   l[wd]arx dest, ptr
11003   //   cmpl?[wd] incr, dest
11004   //   bgt exitMBB
11005   //  loop2MBB:
11006   //   st[wd]cx. dest, ptr
11007   //   bne- loopMBB
11008   //   fallthrough --> exitMBB
11009 
11010   BB = loopMBB;
11011   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
11012     .addReg(ptrA).addReg(ptrB);
11013   if (BinOpcode)
11014     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
11015   if (CmpOpcode) {
11016     // Signed comparisons of byte or halfword values must be sign-extended.
11017     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
11018       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11019       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
11020               ExtReg).addReg(dest);
11021       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11022         .addReg(incr).addReg(ExtReg);
11023     } else
11024       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11025         .addReg(incr).addReg(dest);
11026 
11027     BuildMI(BB, dl, TII->get(PPC::BCC))
11028       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
11029     BB->addSuccessor(loop2MBB);
11030     BB->addSuccessor(exitMBB);
11031     BB = loop2MBB;
11032   }
11033   BuildMI(BB, dl, TII->get(StoreMnemonic))
11034     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
11035   BuildMI(BB, dl, TII->get(PPC::BCC))
11036     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
11037   BB->addSuccessor(loopMBB);
11038   BB->addSuccessor(exitMBB);
11039 
11040   //  exitMBB:
11041   //   ...
11042   BB = exitMBB;
11043   return BB;
11044 }
11045 
11046 static bool isSignExtended(MachineInstr &MI, const PPCInstrInfo *TII) {
11047   switch(MI.getOpcode()) {
11048   default:
11049     return false;
11050   case PPC::COPY:
11051     return TII->isSignExtended(MI);
11052   case PPC::LHA:
11053   case PPC::LHA8:
11054   case PPC::LHAU:
11055   case PPC::LHAU8:
11056   case PPC::LHAUX:
11057   case PPC::LHAUX8:
11058   case PPC::LHAX:
11059   case PPC::LHAX8:
11060   case PPC::LWA:
11061   case PPC::LWAUX:
11062   case PPC::LWAX:
11063   case PPC::LWAX_32:
11064   case PPC::LWA_32:
11065   case PPC::PLHA:
11066   case PPC::PLHA8:
11067   case PPC::PLHA8pc:
11068   case PPC::PLHApc:
11069   case PPC::PLWA:
11070   case PPC::PLWA8:
11071   case PPC::PLWA8pc:
11072   case PPC::PLWApc:
11073   case PPC::EXTSB:
11074   case PPC::EXTSB8:
11075   case PPC::EXTSB8_32_64:
11076   case PPC::EXTSB8_rec:
11077   case PPC::EXTSB_rec:
11078   case PPC::EXTSH:
11079   case PPC::EXTSH8:
11080   case PPC::EXTSH8_32_64:
11081   case PPC::EXTSH8_rec:
11082   case PPC::EXTSH_rec:
11083   case PPC::EXTSW:
11084   case PPC::EXTSWSLI:
11085   case PPC::EXTSWSLI_32_64:
11086   case PPC::EXTSWSLI_32_64_rec:
11087   case PPC::EXTSWSLI_rec:
11088   case PPC::EXTSW_32:
11089   case PPC::EXTSW_32_64:
11090   case PPC::EXTSW_32_64_rec:
11091   case PPC::EXTSW_rec:
11092   case PPC::SRAW:
11093   case PPC::SRAWI:
11094   case PPC::SRAWI_rec:
11095   case PPC::SRAW_rec:
11096     return true;
11097   }
11098   return false;
11099 }
11100 
11101 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
11102     MachineInstr &MI, MachineBasicBlock *BB,
11103     bool is8bit, // operation
11104     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11105   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11106   const PPCInstrInfo *TII = Subtarget.getInstrInfo();
11107 
11108   // If this is a signed comparison and the value being compared is not known
11109   // to be sign extended, sign extend it here.
11110   DebugLoc dl = MI.getDebugLoc();
11111   MachineFunction *F = BB->getParent();
11112   MachineRegisterInfo &RegInfo = F->getRegInfo();
11113   Register incr = MI.getOperand(3).getReg();
11114   bool IsSignExtended = Register::isVirtualRegister(incr) &&
11115     isSignExtended(*RegInfo.getVRegDef(incr), TII);
11116 
11117   if (CmpOpcode == PPC::CMPW && !IsSignExtended) {
11118     Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11119     BuildMI(*BB, MI, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg)
11120         .addReg(MI.getOperand(3).getReg());
11121     MI.getOperand(3).setReg(ValueReg);
11122   }
11123   // If we support part-word atomic mnemonics, just use them
11124   if (Subtarget.hasPartwordAtomics())
11125     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11126                             CmpPred);
11127 
11128   // In 64 bit mode we have to use 64 bits for addresses, even though the
11129   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
11130   // registers without caring whether they're 32 or 64, but here we're
11131   // doing actual arithmetic on the addresses.
11132   bool is64bit = Subtarget.isPPC64();
11133   bool isLittleEndian = Subtarget.isLittleEndian();
11134   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11135 
11136   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11137   MachineFunction::iterator It = ++BB->getIterator();
11138 
11139   Register dest = MI.getOperand(0).getReg();
11140   Register ptrA = MI.getOperand(1).getReg();
11141   Register ptrB = MI.getOperand(2).getReg();
11142 
11143   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11144   MachineBasicBlock *loop2MBB =
11145       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11146   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11147   F->insert(It, loopMBB);
11148   if (CmpOpcode)
11149     F->insert(It, loop2MBB);
11150   F->insert(It, exitMBB);
11151   exitMBB->splice(exitMBB->begin(), BB,
11152                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11153   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11154 
11155   const TargetRegisterClass *RC =
11156       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11157   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11158 
11159   Register PtrReg = RegInfo.createVirtualRegister(RC);
11160   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11161   Register ShiftReg =
11162       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11163   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11164   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11165   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11166   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11167   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11168   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11169   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11170   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11171   Register Ptr1Reg;
11172   Register TmpReg =
11173       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11174 
11175   //  thisMBB:
11176   //   ...
11177   //   fallthrough --> loopMBB
11178   BB->addSuccessor(loopMBB);
11179 
11180   // The 4-byte load must be aligned, while a char or short may be
11181   // anywhere in the word.  Hence all this nasty bookkeeping code.
11182   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11183   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11184   //   xori shift, shift1, 24 [16]
11185   //   rlwinm ptr, ptr1, 0, 0, 29
11186   //   slw incr2, incr, shift
11187   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11188   //   slw mask, mask2, shift
11189   //  loopMBB:
11190   //   lwarx tmpDest, ptr
11191   //   add tmp, tmpDest, incr2
11192   //   andc tmp2, tmpDest, mask
11193   //   and tmp3, tmp, mask
11194   //   or tmp4, tmp3, tmp2
11195   //   stwcx. tmp4, ptr
11196   //   bne- loopMBB
11197   //   fallthrough --> exitMBB
11198   //   srw dest, tmpDest, shift
11199   if (ptrA != ZeroReg) {
11200     Ptr1Reg = RegInfo.createVirtualRegister(RC);
11201     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11202         .addReg(ptrA)
11203         .addReg(ptrB);
11204   } else {
11205     Ptr1Reg = ptrB;
11206   }
11207   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11208   // mode.
11209   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11210       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11211       .addImm(3)
11212       .addImm(27)
11213       .addImm(is8bit ? 28 : 27);
11214   if (!isLittleEndian)
11215     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11216         .addReg(Shift1Reg)
11217         .addImm(is8bit ? 24 : 16);
11218   if (is64bit)
11219     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11220         .addReg(Ptr1Reg)
11221         .addImm(0)
11222         .addImm(61);
11223   else
11224     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11225         .addReg(Ptr1Reg)
11226         .addImm(0)
11227         .addImm(0)
11228         .addImm(29);
11229   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11230   if (is8bit)
11231     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11232   else {
11233     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11234     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11235         .addReg(Mask3Reg)
11236         .addImm(65535);
11237   }
11238   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11239       .addReg(Mask2Reg)
11240       .addReg(ShiftReg);
11241 
11242   BB = loopMBB;
11243   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11244       .addReg(ZeroReg)
11245       .addReg(PtrReg);
11246   if (BinOpcode)
11247     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11248         .addReg(Incr2Reg)
11249         .addReg(TmpDestReg);
11250   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11251       .addReg(TmpDestReg)
11252       .addReg(MaskReg);
11253   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11254   if (CmpOpcode) {
11255     // For unsigned comparisons, we can directly compare the shifted values.
11256     // For signed comparisons we shift and sign extend.
11257     Register SReg = RegInfo.createVirtualRegister(GPRC);
11258     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11259         .addReg(TmpDestReg)
11260         .addReg(MaskReg);
11261     unsigned ValueReg = SReg;
11262     unsigned CmpReg = Incr2Reg;
11263     if (CmpOpcode == PPC::CMPW) {
11264       ValueReg = RegInfo.createVirtualRegister(GPRC);
11265       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11266           .addReg(SReg)
11267           .addReg(ShiftReg);
11268       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11269       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11270           .addReg(ValueReg);
11271       ValueReg = ValueSReg;
11272       CmpReg = incr;
11273     }
11274     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11275         .addReg(CmpReg)
11276         .addReg(ValueReg);
11277     BuildMI(BB, dl, TII->get(PPC::BCC))
11278         .addImm(CmpPred)
11279         .addReg(PPC::CR0)
11280         .addMBB(exitMBB);
11281     BB->addSuccessor(loop2MBB);
11282     BB->addSuccessor(exitMBB);
11283     BB = loop2MBB;
11284   }
11285   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11286   BuildMI(BB, dl, TII->get(PPC::STWCX))
11287       .addReg(Tmp4Reg)
11288       .addReg(ZeroReg)
11289       .addReg(PtrReg);
11290   BuildMI(BB, dl, TII->get(PPC::BCC))
11291       .addImm(PPC::PRED_NE)
11292       .addReg(PPC::CR0)
11293       .addMBB(loopMBB);
11294   BB->addSuccessor(loopMBB);
11295   BB->addSuccessor(exitMBB);
11296 
11297   //  exitMBB:
11298   //   ...
11299   BB = exitMBB;
11300   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11301       .addReg(TmpDestReg)
11302       .addReg(ShiftReg);
11303   return BB;
11304 }
11305 
11306 llvm::MachineBasicBlock *
11307 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11308                                     MachineBasicBlock *MBB) const {
11309   DebugLoc DL = MI.getDebugLoc();
11310   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11311   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11312 
11313   MachineFunction *MF = MBB->getParent();
11314   MachineRegisterInfo &MRI = MF->getRegInfo();
11315 
11316   const BasicBlock *BB = MBB->getBasicBlock();
11317   MachineFunction::iterator I = ++MBB->getIterator();
11318 
11319   Register DstReg = MI.getOperand(0).getReg();
11320   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11321   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11322   Register mainDstReg = MRI.createVirtualRegister(RC);
11323   Register restoreDstReg = MRI.createVirtualRegister(RC);
11324 
11325   MVT PVT = getPointerTy(MF->getDataLayout());
11326   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11327          "Invalid Pointer Size!");
11328   // For v = setjmp(buf), we generate
11329   //
11330   // thisMBB:
11331   //  SjLjSetup mainMBB
11332   //  bl mainMBB
11333   //  v_restore = 1
11334   //  b sinkMBB
11335   //
11336   // mainMBB:
11337   //  buf[LabelOffset] = LR
11338   //  v_main = 0
11339   //
11340   // sinkMBB:
11341   //  v = phi(main, restore)
11342   //
11343 
11344   MachineBasicBlock *thisMBB = MBB;
11345   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11346   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11347   MF->insert(I, mainMBB);
11348   MF->insert(I, sinkMBB);
11349 
11350   MachineInstrBuilder MIB;
11351 
11352   // Transfer the remainder of BB and its successor edges to sinkMBB.
11353   sinkMBB->splice(sinkMBB->begin(), MBB,
11354                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11355   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11356 
11357   // Note that the structure of the jmp_buf used here is not compatible
11358   // with that used by libc, and is not designed to be. Specifically, it
11359   // stores only those 'reserved' registers that LLVM does not otherwise
11360   // understand how to spill. Also, by convention, by the time this
11361   // intrinsic is called, Clang has already stored the frame address in the
11362   // first slot of the buffer and stack address in the third. Following the
11363   // X86 target code, we'll store the jump address in the second slot. We also
11364   // need to save the TOC pointer (R2) to handle jumps between shared
11365   // libraries, and that will be stored in the fourth slot. The thread
11366   // identifier (R13) is not affected.
11367 
11368   // thisMBB:
11369   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11370   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11371   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11372 
11373   // Prepare IP either in reg.
11374   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11375   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11376   Register BufReg = MI.getOperand(1).getReg();
11377 
11378   if (Subtarget.is64BitELFABI()) {
11379     setUsesTOCBasePtr(*MBB->getParent());
11380     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11381               .addReg(PPC::X2)
11382               .addImm(TOCOffset)
11383               .addReg(BufReg)
11384               .cloneMemRefs(MI);
11385   }
11386 
11387   // Naked functions never have a base pointer, and so we use r1. For all
11388   // other functions, this decision must be delayed until during PEI.
11389   unsigned BaseReg;
11390   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11391     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11392   else
11393     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11394 
11395   MIB = BuildMI(*thisMBB, MI, DL,
11396                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11397             .addReg(BaseReg)
11398             .addImm(BPOffset)
11399             .addReg(BufReg)
11400             .cloneMemRefs(MI);
11401 
11402   // Setup
11403   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11404   MIB.addRegMask(TRI->getNoPreservedMask());
11405 
11406   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11407 
11408   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11409           .addMBB(mainMBB);
11410   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11411 
11412   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11413   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11414 
11415   // mainMBB:
11416   //  mainDstReg = 0
11417   MIB =
11418       BuildMI(mainMBB, DL,
11419               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11420 
11421   // Store IP
11422   if (Subtarget.isPPC64()) {
11423     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11424             .addReg(LabelReg)
11425             .addImm(LabelOffset)
11426             .addReg(BufReg);
11427   } else {
11428     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11429             .addReg(LabelReg)
11430             .addImm(LabelOffset)
11431             .addReg(BufReg);
11432   }
11433   MIB.cloneMemRefs(MI);
11434 
11435   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11436   mainMBB->addSuccessor(sinkMBB);
11437 
11438   // sinkMBB:
11439   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11440           TII->get(PPC::PHI), DstReg)
11441     .addReg(mainDstReg).addMBB(mainMBB)
11442     .addReg(restoreDstReg).addMBB(thisMBB);
11443 
11444   MI.eraseFromParent();
11445   return sinkMBB;
11446 }
11447 
11448 MachineBasicBlock *
11449 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11450                                      MachineBasicBlock *MBB) const {
11451   DebugLoc DL = MI.getDebugLoc();
11452   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11453 
11454   MachineFunction *MF = MBB->getParent();
11455   MachineRegisterInfo &MRI = MF->getRegInfo();
11456 
11457   MVT PVT = getPointerTy(MF->getDataLayout());
11458   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11459          "Invalid Pointer Size!");
11460 
11461   const TargetRegisterClass *RC =
11462     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11463   Register Tmp = MRI.createVirtualRegister(RC);
11464   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11465   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11466   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11467   unsigned BP =
11468       (PVT == MVT::i64)
11469           ? PPC::X30
11470           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11471                                                               : PPC::R30);
11472 
11473   MachineInstrBuilder MIB;
11474 
11475   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11476   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11477   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11478   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11479 
11480   Register BufReg = MI.getOperand(0).getReg();
11481 
11482   // Reload FP (the jumped-to function may not have had a
11483   // frame pointer, and if so, then its r31 will be restored
11484   // as necessary).
11485   if (PVT == MVT::i64) {
11486     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11487             .addImm(0)
11488             .addReg(BufReg);
11489   } else {
11490     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11491             .addImm(0)
11492             .addReg(BufReg);
11493   }
11494   MIB.cloneMemRefs(MI);
11495 
11496   // Reload IP
11497   if (PVT == MVT::i64) {
11498     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11499             .addImm(LabelOffset)
11500             .addReg(BufReg);
11501   } else {
11502     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11503             .addImm(LabelOffset)
11504             .addReg(BufReg);
11505   }
11506   MIB.cloneMemRefs(MI);
11507 
11508   // Reload SP
11509   if (PVT == MVT::i64) {
11510     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11511             .addImm(SPOffset)
11512             .addReg(BufReg);
11513   } else {
11514     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11515             .addImm(SPOffset)
11516             .addReg(BufReg);
11517   }
11518   MIB.cloneMemRefs(MI);
11519 
11520   // Reload BP
11521   if (PVT == MVT::i64) {
11522     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11523             .addImm(BPOffset)
11524             .addReg(BufReg);
11525   } else {
11526     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11527             .addImm(BPOffset)
11528             .addReg(BufReg);
11529   }
11530   MIB.cloneMemRefs(MI);
11531 
11532   // Reload TOC
11533   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11534     setUsesTOCBasePtr(*MBB->getParent());
11535     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11536               .addImm(TOCOffset)
11537               .addReg(BufReg)
11538               .cloneMemRefs(MI);
11539   }
11540 
11541   // Jump
11542   BuildMI(*MBB, MI, DL,
11543           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11544   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11545 
11546   MI.eraseFromParent();
11547   return MBB;
11548 }
11549 
11550 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11551   // If the function specifically requests inline stack probes, emit them.
11552   if (MF.getFunction().hasFnAttribute("probe-stack"))
11553     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11554            "inline-asm";
11555   return false;
11556 }
11557 
11558 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11559   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11560   unsigned StackAlign = TFI->getStackAlignment();
11561   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11562          "Unexpected stack alignment");
11563   // The default stack probe size is 4096 if the function has no
11564   // stack-probe-size attribute.
11565   unsigned StackProbeSize = 4096;
11566   const Function &Fn = MF.getFunction();
11567   if (Fn.hasFnAttribute("stack-probe-size"))
11568     Fn.getFnAttribute("stack-probe-size")
11569         .getValueAsString()
11570         .getAsInteger(0, StackProbeSize);
11571   // Round down to the stack alignment.
11572   StackProbeSize &= ~(StackAlign - 1);
11573   return StackProbeSize ? StackProbeSize : StackAlign;
11574 }
11575 
11576 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11577 // into three phases. In the first phase, it uses pseudo instruction
11578 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11579 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11580 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11581 // MaxCallFrameSize so that it can calculate correct data area pointer.
11582 MachineBasicBlock *
11583 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11584                                     MachineBasicBlock *MBB) const {
11585   const bool isPPC64 = Subtarget.isPPC64();
11586   MachineFunction *MF = MBB->getParent();
11587   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11588   DebugLoc DL = MI.getDebugLoc();
11589   const unsigned ProbeSize = getStackProbeSize(*MF);
11590   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11591   MachineRegisterInfo &MRI = MF->getRegInfo();
11592   // The CFG of probing stack looks as
11593   //         +-----+
11594   //         | MBB |
11595   //         +--+--+
11596   //            |
11597   //       +----v----+
11598   //  +--->+ TestMBB +---+
11599   //  |    +----+----+   |
11600   //  |         |        |
11601   //  |   +-----v----+   |
11602   //  +---+ BlockMBB |   |
11603   //      +----------+   |
11604   //                     |
11605   //       +---------+   |
11606   //       | TailMBB +<--+
11607   //       +---------+
11608   // In MBB, calculate previous frame pointer and final stack pointer.
11609   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11610   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11611   // TailMBB is spliced via \p MI.
11612   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11613   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11614   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11615 
11616   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11617   MF->insert(MBBIter, TestMBB);
11618   MF->insert(MBBIter, BlockMBB);
11619   MF->insert(MBBIter, TailMBB);
11620 
11621   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11622   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11623 
11624   Register DstReg = MI.getOperand(0).getReg();
11625   Register NegSizeReg = MI.getOperand(1).getReg();
11626   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11627   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11628   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11629   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11630 
11631   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11632   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11633   // NegSize.
11634   unsigned ProbeOpc;
11635   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11636     ProbeOpc =
11637         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11638   else
11639     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11640     // and NegSizeReg will be allocated in the same phyreg to avoid
11641     // redundant copy when NegSizeReg has only one use which is current MI and
11642     // will be replaced by PREPARE_PROBED_ALLOCA then.
11643     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11644                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11645   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11646       .addDef(ActualNegSizeReg)
11647       .addReg(NegSizeReg)
11648       .add(MI.getOperand(2))
11649       .add(MI.getOperand(3));
11650 
11651   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11652   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11653           FinalStackPtr)
11654       .addReg(SPReg)
11655       .addReg(ActualNegSizeReg);
11656 
11657   // Materialize a scratch register for update.
11658   int64_t NegProbeSize = -(int64_t)ProbeSize;
11659   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11660   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11661   if (!isInt<16>(NegProbeSize)) {
11662     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11663     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11664         .addImm(NegProbeSize >> 16);
11665     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11666             ScratchReg)
11667         .addReg(TempReg)
11668         .addImm(NegProbeSize & 0xFFFF);
11669   } else
11670     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11671         .addImm(NegProbeSize);
11672 
11673   {
11674     // Probing leading residual part.
11675     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11676     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11677         .addReg(ActualNegSizeReg)
11678         .addReg(ScratchReg);
11679     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11680     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11681         .addReg(Div)
11682         .addReg(ScratchReg);
11683     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11684     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11685         .addReg(Mul)
11686         .addReg(ActualNegSizeReg);
11687     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11688         .addReg(FramePointer)
11689         .addReg(SPReg)
11690         .addReg(NegMod);
11691   }
11692 
11693   {
11694     // Remaining part should be multiple of ProbeSize.
11695     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11696     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11697         .addReg(SPReg)
11698         .addReg(FinalStackPtr);
11699     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11700         .addImm(PPC::PRED_EQ)
11701         .addReg(CmpResult)
11702         .addMBB(TailMBB);
11703     TestMBB->addSuccessor(BlockMBB);
11704     TestMBB->addSuccessor(TailMBB);
11705   }
11706 
11707   {
11708     // Touch the block.
11709     // |P...|P...|P...
11710     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11711         .addReg(FramePointer)
11712         .addReg(SPReg)
11713         .addReg(ScratchReg);
11714     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11715     BlockMBB->addSuccessor(TestMBB);
11716   }
11717 
11718   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11719   // DYNAREAOFFSET pseudo instruction to get the future result.
11720   Register MaxCallFrameSizeReg =
11721       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11722   BuildMI(TailMBB, DL,
11723           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11724           MaxCallFrameSizeReg)
11725       .add(MI.getOperand(2))
11726       .add(MI.getOperand(3));
11727   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11728       .addReg(SPReg)
11729       .addReg(MaxCallFrameSizeReg);
11730 
11731   // Splice instructions after MI to TailMBB.
11732   TailMBB->splice(TailMBB->end(), MBB,
11733                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11734   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11735   MBB->addSuccessor(TestMBB);
11736 
11737   // Delete the pseudo instruction.
11738   MI.eraseFromParent();
11739 
11740   ++NumDynamicAllocaProbed;
11741   return TailMBB;
11742 }
11743 
11744 MachineBasicBlock *
11745 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11746                                                MachineBasicBlock *BB) const {
11747   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11748       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11749     if (Subtarget.is64BitELFABI() &&
11750         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11751         !Subtarget.isUsingPCRelativeCalls()) {
11752       // Call lowering should have added an r2 operand to indicate a dependence
11753       // on the TOC base pointer value. It can't however, because there is no
11754       // way to mark the dependence as implicit there, and so the stackmap code
11755       // will confuse it with a regular operand. Instead, add the dependence
11756       // here.
11757       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11758     }
11759 
11760     return emitPatchPoint(MI, BB);
11761   }
11762 
11763   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11764       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11765     return emitEHSjLjSetJmp(MI, BB);
11766   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11767              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11768     return emitEHSjLjLongJmp(MI, BB);
11769   }
11770 
11771   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11772 
11773   // To "insert" these instructions we actually have to insert their
11774   // control-flow patterns.
11775   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11776   MachineFunction::iterator It = ++BB->getIterator();
11777 
11778   MachineFunction *F = BB->getParent();
11779 
11780   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11781       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11782       MI.getOpcode() == PPC::SELECT_I8) {
11783     SmallVector<MachineOperand, 2> Cond;
11784     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11785         MI.getOpcode() == PPC::SELECT_CC_I8)
11786       Cond.push_back(MI.getOperand(4));
11787     else
11788       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11789     Cond.push_back(MI.getOperand(1));
11790 
11791     DebugLoc dl = MI.getDebugLoc();
11792     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11793                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11794   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11795              MI.getOpcode() == PPC::SELECT_CC_F8 ||
11796              MI.getOpcode() == PPC::SELECT_CC_F16 ||
11797              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11798              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11799              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11800              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11801              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11802              MI.getOpcode() == PPC::SELECT_CC_SPE ||
11803              MI.getOpcode() == PPC::SELECT_F4 ||
11804              MI.getOpcode() == PPC::SELECT_F8 ||
11805              MI.getOpcode() == PPC::SELECT_F16 ||
11806              MI.getOpcode() == PPC::SELECT_SPE ||
11807              MI.getOpcode() == PPC::SELECT_SPE4 ||
11808              MI.getOpcode() == PPC::SELECT_VRRC ||
11809              MI.getOpcode() == PPC::SELECT_VSFRC ||
11810              MI.getOpcode() == PPC::SELECT_VSSRC ||
11811              MI.getOpcode() == PPC::SELECT_VSRC) {
11812     // The incoming instruction knows the destination vreg to set, the
11813     // condition code register to branch on, the true/false values to
11814     // select between, and a branch opcode to use.
11815 
11816     //  thisMBB:
11817     //  ...
11818     //   TrueVal = ...
11819     //   cmpTY ccX, r1, r2
11820     //   bCC copy1MBB
11821     //   fallthrough --> copy0MBB
11822     MachineBasicBlock *thisMBB = BB;
11823     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11824     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11825     DebugLoc dl = MI.getDebugLoc();
11826     F->insert(It, copy0MBB);
11827     F->insert(It, sinkMBB);
11828 
11829     // Transfer the remainder of BB and its successor edges to sinkMBB.
11830     sinkMBB->splice(sinkMBB->begin(), BB,
11831                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11832     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11833 
11834     // Next, add the true and fallthrough blocks as its successors.
11835     BB->addSuccessor(copy0MBB);
11836     BB->addSuccessor(sinkMBB);
11837 
11838     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11839         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11840         MI.getOpcode() == PPC::SELECT_F16 ||
11841         MI.getOpcode() == PPC::SELECT_SPE4 ||
11842         MI.getOpcode() == PPC::SELECT_SPE ||
11843         MI.getOpcode() == PPC::SELECT_VRRC ||
11844         MI.getOpcode() == PPC::SELECT_VSFRC ||
11845         MI.getOpcode() == PPC::SELECT_VSSRC ||
11846         MI.getOpcode() == PPC::SELECT_VSRC) {
11847       BuildMI(BB, dl, TII->get(PPC::BC))
11848           .addReg(MI.getOperand(1).getReg())
11849           .addMBB(sinkMBB);
11850     } else {
11851       unsigned SelectPred = MI.getOperand(4).getImm();
11852       BuildMI(BB, dl, TII->get(PPC::BCC))
11853           .addImm(SelectPred)
11854           .addReg(MI.getOperand(1).getReg())
11855           .addMBB(sinkMBB);
11856     }
11857 
11858     //  copy0MBB:
11859     //   %FalseValue = ...
11860     //   # fallthrough to sinkMBB
11861     BB = copy0MBB;
11862 
11863     // Update machine-CFG edges
11864     BB->addSuccessor(sinkMBB);
11865 
11866     //  sinkMBB:
11867     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11868     //  ...
11869     BB = sinkMBB;
11870     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11871         .addReg(MI.getOperand(3).getReg())
11872         .addMBB(copy0MBB)
11873         .addReg(MI.getOperand(2).getReg())
11874         .addMBB(thisMBB);
11875   } else if (MI.getOpcode() == PPC::ReadTB) {
11876     // To read the 64-bit time-base register on a 32-bit target, we read the
11877     // two halves. Should the counter have wrapped while it was being read, we
11878     // need to try again.
11879     // ...
11880     // readLoop:
11881     // mfspr Rx,TBU # load from TBU
11882     // mfspr Ry,TB  # load from TB
11883     // mfspr Rz,TBU # load from TBU
11884     // cmpw crX,Rx,Rz # check if 'old'='new'
11885     // bne readLoop   # branch if they're not equal
11886     // ...
11887 
11888     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11889     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11890     DebugLoc dl = MI.getDebugLoc();
11891     F->insert(It, readMBB);
11892     F->insert(It, sinkMBB);
11893 
11894     // Transfer the remainder of BB and its successor edges to sinkMBB.
11895     sinkMBB->splice(sinkMBB->begin(), BB,
11896                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11897     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11898 
11899     BB->addSuccessor(readMBB);
11900     BB = readMBB;
11901 
11902     MachineRegisterInfo &RegInfo = F->getRegInfo();
11903     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11904     Register LoReg = MI.getOperand(0).getReg();
11905     Register HiReg = MI.getOperand(1).getReg();
11906 
11907     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11908     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11909     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11910 
11911     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11912 
11913     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11914         .addReg(HiReg)
11915         .addReg(ReadAgainReg);
11916     BuildMI(BB, dl, TII->get(PPC::BCC))
11917         .addImm(PPC::PRED_NE)
11918         .addReg(CmpReg)
11919         .addMBB(readMBB);
11920 
11921     BB->addSuccessor(readMBB);
11922     BB->addSuccessor(sinkMBB);
11923   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11924     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11925   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11926     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11927   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11928     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11929   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11930     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11931 
11932   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11933     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11934   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11935     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11936   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11937     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11938   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11939     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11940 
11941   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11942     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11943   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11944     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11945   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11946     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11947   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11948     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11949 
11950   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11951     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11952   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11953     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11954   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11955     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11956   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11957     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11958 
11959   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11960     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11961   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11962     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11963   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11964     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11965   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11966     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11967 
11968   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11969     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11970   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11971     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11972   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11973     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11974   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11975     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11976 
11977   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11978     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11979   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11980     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11981   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11982     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11983   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11984     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11985 
11986   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11987     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11988   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11989     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11990   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11991     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11992   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11993     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11994 
11995   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11996     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11997   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11998     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11999   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
12000     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
12001   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
12002     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
12003 
12004   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
12005     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
12006   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
12007     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
12008   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
12009     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
12010   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
12011     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
12012 
12013   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
12014     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
12015   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
12016     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
12017   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
12018     BB = EmitAtomicBinary(MI, BB, 4, 0);
12019   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
12020     BB = EmitAtomicBinary(MI, BB, 8, 0);
12021   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
12022            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
12023            (Subtarget.hasPartwordAtomics() &&
12024             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
12025            (Subtarget.hasPartwordAtomics() &&
12026             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
12027     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
12028 
12029     auto LoadMnemonic = PPC::LDARX;
12030     auto StoreMnemonic = PPC::STDCX;
12031     switch (MI.getOpcode()) {
12032     default:
12033       llvm_unreachable("Compare and swap of unknown size");
12034     case PPC::ATOMIC_CMP_SWAP_I8:
12035       LoadMnemonic = PPC::LBARX;
12036       StoreMnemonic = PPC::STBCX;
12037       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12038       break;
12039     case PPC::ATOMIC_CMP_SWAP_I16:
12040       LoadMnemonic = PPC::LHARX;
12041       StoreMnemonic = PPC::STHCX;
12042       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12043       break;
12044     case PPC::ATOMIC_CMP_SWAP_I32:
12045       LoadMnemonic = PPC::LWARX;
12046       StoreMnemonic = PPC::STWCX;
12047       break;
12048     case PPC::ATOMIC_CMP_SWAP_I64:
12049       LoadMnemonic = PPC::LDARX;
12050       StoreMnemonic = PPC::STDCX;
12051       break;
12052     }
12053     Register dest = MI.getOperand(0).getReg();
12054     Register ptrA = MI.getOperand(1).getReg();
12055     Register ptrB = MI.getOperand(2).getReg();
12056     Register oldval = MI.getOperand(3).getReg();
12057     Register newval = MI.getOperand(4).getReg();
12058     DebugLoc dl = MI.getDebugLoc();
12059 
12060     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12061     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12062     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12063     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12064     F->insert(It, loop1MBB);
12065     F->insert(It, loop2MBB);
12066     F->insert(It, midMBB);
12067     F->insert(It, exitMBB);
12068     exitMBB->splice(exitMBB->begin(), BB,
12069                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12070     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12071 
12072     //  thisMBB:
12073     //   ...
12074     //   fallthrough --> loopMBB
12075     BB->addSuccessor(loop1MBB);
12076 
12077     // loop1MBB:
12078     //   l[bhwd]arx dest, ptr
12079     //   cmp[wd] dest, oldval
12080     //   bne- midMBB
12081     // loop2MBB:
12082     //   st[bhwd]cx. newval, ptr
12083     //   bne- loopMBB
12084     //   b exitBB
12085     // midMBB:
12086     //   st[bhwd]cx. dest, ptr
12087     // exitBB:
12088     BB = loop1MBB;
12089     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
12090     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
12091         .addReg(oldval)
12092         .addReg(dest);
12093     BuildMI(BB, dl, TII->get(PPC::BCC))
12094         .addImm(PPC::PRED_NE)
12095         .addReg(PPC::CR0)
12096         .addMBB(midMBB);
12097     BB->addSuccessor(loop2MBB);
12098     BB->addSuccessor(midMBB);
12099 
12100     BB = loop2MBB;
12101     BuildMI(BB, dl, TII->get(StoreMnemonic))
12102         .addReg(newval)
12103         .addReg(ptrA)
12104         .addReg(ptrB);
12105     BuildMI(BB, dl, TII->get(PPC::BCC))
12106         .addImm(PPC::PRED_NE)
12107         .addReg(PPC::CR0)
12108         .addMBB(loop1MBB);
12109     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12110     BB->addSuccessor(loop1MBB);
12111     BB->addSuccessor(exitMBB);
12112 
12113     BB = midMBB;
12114     BuildMI(BB, dl, TII->get(StoreMnemonic))
12115         .addReg(dest)
12116         .addReg(ptrA)
12117         .addReg(ptrB);
12118     BB->addSuccessor(exitMBB);
12119 
12120     //  exitMBB:
12121     //   ...
12122     BB = exitMBB;
12123   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12124              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12125     // We must use 64-bit registers for addresses when targeting 64-bit,
12126     // since we're actually doing arithmetic on them.  Other registers
12127     // can be 32-bit.
12128     bool is64bit = Subtarget.isPPC64();
12129     bool isLittleEndian = Subtarget.isLittleEndian();
12130     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12131 
12132     Register dest = MI.getOperand(0).getReg();
12133     Register ptrA = MI.getOperand(1).getReg();
12134     Register ptrB = MI.getOperand(2).getReg();
12135     Register oldval = MI.getOperand(3).getReg();
12136     Register newval = MI.getOperand(4).getReg();
12137     DebugLoc dl = MI.getDebugLoc();
12138 
12139     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12140     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12141     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12142     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12143     F->insert(It, loop1MBB);
12144     F->insert(It, loop2MBB);
12145     F->insert(It, midMBB);
12146     F->insert(It, exitMBB);
12147     exitMBB->splice(exitMBB->begin(), BB,
12148                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12149     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12150 
12151     MachineRegisterInfo &RegInfo = F->getRegInfo();
12152     const TargetRegisterClass *RC =
12153         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12154     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12155 
12156     Register PtrReg = RegInfo.createVirtualRegister(RC);
12157     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12158     Register ShiftReg =
12159         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12160     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12161     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12162     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12163     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12164     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12165     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12166     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12167     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12168     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12169     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12170     Register Ptr1Reg;
12171     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12172     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12173     //  thisMBB:
12174     //   ...
12175     //   fallthrough --> loopMBB
12176     BB->addSuccessor(loop1MBB);
12177 
12178     // The 4-byte load must be aligned, while a char or short may be
12179     // anywhere in the word.  Hence all this nasty bookkeeping code.
12180     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
12181     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12182     //   xori shift, shift1, 24 [16]
12183     //   rlwinm ptr, ptr1, 0, 0, 29
12184     //   slw newval2, newval, shift
12185     //   slw oldval2, oldval,shift
12186     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12187     //   slw mask, mask2, shift
12188     //   and newval3, newval2, mask
12189     //   and oldval3, oldval2, mask
12190     // loop1MBB:
12191     //   lwarx tmpDest, ptr
12192     //   and tmp, tmpDest, mask
12193     //   cmpw tmp, oldval3
12194     //   bne- midMBB
12195     // loop2MBB:
12196     //   andc tmp2, tmpDest, mask
12197     //   or tmp4, tmp2, newval3
12198     //   stwcx. tmp4, ptr
12199     //   bne- loop1MBB
12200     //   b exitBB
12201     // midMBB:
12202     //   stwcx. tmpDest, ptr
12203     // exitBB:
12204     //   srw dest, tmpDest, shift
12205     if (ptrA != ZeroReg) {
12206       Ptr1Reg = RegInfo.createVirtualRegister(RC);
12207       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12208           .addReg(ptrA)
12209           .addReg(ptrB);
12210     } else {
12211       Ptr1Reg = ptrB;
12212     }
12213 
12214     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12215     // mode.
12216     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12217         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12218         .addImm(3)
12219         .addImm(27)
12220         .addImm(is8bit ? 28 : 27);
12221     if (!isLittleEndian)
12222       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12223           .addReg(Shift1Reg)
12224           .addImm(is8bit ? 24 : 16);
12225     if (is64bit)
12226       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12227           .addReg(Ptr1Reg)
12228           .addImm(0)
12229           .addImm(61);
12230     else
12231       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12232           .addReg(Ptr1Reg)
12233           .addImm(0)
12234           .addImm(0)
12235           .addImm(29);
12236     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12237         .addReg(newval)
12238         .addReg(ShiftReg);
12239     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12240         .addReg(oldval)
12241         .addReg(ShiftReg);
12242     if (is8bit)
12243       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12244     else {
12245       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12246       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12247           .addReg(Mask3Reg)
12248           .addImm(65535);
12249     }
12250     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12251         .addReg(Mask2Reg)
12252         .addReg(ShiftReg);
12253     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12254         .addReg(NewVal2Reg)
12255         .addReg(MaskReg);
12256     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12257         .addReg(OldVal2Reg)
12258         .addReg(MaskReg);
12259 
12260     BB = loop1MBB;
12261     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12262         .addReg(ZeroReg)
12263         .addReg(PtrReg);
12264     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12265         .addReg(TmpDestReg)
12266         .addReg(MaskReg);
12267     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12268         .addReg(TmpReg)
12269         .addReg(OldVal3Reg);
12270     BuildMI(BB, dl, TII->get(PPC::BCC))
12271         .addImm(PPC::PRED_NE)
12272         .addReg(PPC::CR0)
12273         .addMBB(midMBB);
12274     BB->addSuccessor(loop2MBB);
12275     BB->addSuccessor(midMBB);
12276 
12277     BB = loop2MBB;
12278     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12279         .addReg(TmpDestReg)
12280         .addReg(MaskReg);
12281     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12282         .addReg(Tmp2Reg)
12283         .addReg(NewVal3Reg);
12284     BuildMI(BB, dl, TII->get(PPC::STWCX))
12285         .addReg(Tmp4Reg)
12286         .addReg(ZeroReg)
12287         .addReg(PtrReg);
12288     BuildMI(BB, dl, TII->get(PPC::BCC))
12289         .addImm(PPC::PRED_NE)
12290         .addReg(PPC::CR0)
12291         .addMBB(loop1MBB);
12292     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12293     BB->addSuccessor(loop1MBB);
12294     BB->addSuccessor(exitMBB);
12295 
12296     BB = midMBB;
12297     BuildMI(BB, dl, TII->get(PPC::STWCX))
12298         .addReg(TmpDestReg)
12299         .addReg(ZeroReg)
12300         .addReg(PtrReg);
12301     BB->addSuccessor(exitMBB);
12302 
12303     //  exitMBB:
12304     //   ...
12305     BB = exitMBB;
12306     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12307         .addReg(TmpReg)
12308         .addReg(ShiftReg);
12309   } else if (MI.getOpcode() == PPC::FADDrtz) {
12310     // This pseudo performs an FADD with rounding mode temporarily forced
12311     // to round-to-zero.  We emit this via custom inserter since the FPSCR
12312     // is not modeled at the SelectionDAG level.
12313     Register Dest = MI.getOperand(0).getReg();
12314     Register Src1 = MI.getOperand(1).getReg();
12315     Register Src2 = MI.getOperand(2).getReg();
12316     DebugLoc dl = MI.getDebugLoc();
12317 
12318     MachineRegisterInfo &RegInfo = F->getRegInfo();
12319     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12320 
12321     // Save FPSCR value.
12322     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12323 
12324     // Set rounding mode to round-to-zero.
12325     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
12326         .addImm(31)
12327         .addReg(PPC::RM, RegState::ImplicitDefine);
12328 
12329     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
12330         .addImm(30)
12331         .addReg(PPC::RM, RegState::ImplicitDefine);
12332 
12333     // Perform addition.
12334     auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
12335                    .addReg(Src1)
12336                    .addReg(Src2);
12337     if (MI.getFlag(MachineInstr::NoFPExcept))
12338       MIB.setMIFlag(MachineInstr::NoFPExcept);
12339 
12340     // Restore FPSCR value.
12341     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12342   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12343              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12344              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12345              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12346     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12347                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12348                           ? PPC::ANDI8_rec
12349                           : PPC::ANDI_rec;
12350     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12351                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12352 
12353     MachineRegisterInfo &RegInfo = F->getRegInfo();
12354     Register Dest = RegInfo.createVirtualRegister(
12355         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12356 
12357     DebugLoc Dl = MI.getDebugLoc();
12358     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12359         .addReg(MI.getOperand(1).getReg())
12360         .addImm(1);
12361     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12362             MI.getOperand(0).getReg())
12363         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12364   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12365     DebugLoc Dl = MI.getDebugLoc();
12366     MachineRegisterInfo &RegInfo = F->getRegInfo();
12367     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12368     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12369     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12370             MI.getOperand(0).getReg())
12371         .addReg(CRReg);
12372   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12373     DebugLoc Dl = MI.getDebugLoc();
12374     unsigned Imm = MI.getOperand(1).getImm();
12375     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12376     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12377             MI.getOperand(0).getReg())
12378         .addReg(PPC::CR0EQ);
12379   } else if (MI.getOpcode() == PPC::SETRNDi) {
12380     DebugLoc dl = MI.getDebugLoc();
12381     Register OldFPSCRReg = MI.getOperand(0).getReg();
12382 
12383     // Save FPSCR value.
12384     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12385 
12386     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12387     // the following settings:
12388     //   00 Round to nearest
12389     //   01 Round to 0
12390     //   10 Round to +inf
12391     //   11 Round to -inf
12392 
12393     // When the operand is immediate, using the two least significant bits of
12394     // the immediate to set the bits 62:63 of FPSCR.
12395     unsigned Mode = MI.getOperand(1).getImm();
12396     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12397         .addImm(31)
12398         .addReg(PPC::RM, RegState::ImplicitDefine);
12399 
12400     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12401         .addImm(30)
12402         .addReg(PPC::RM, RegState::ImplicitDefine);
12403   } else if (MI.getOpcode() == PPC::SETRND) {
12404     DebugLoc dl = MI.getDebugLoc();
12405 
12406     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12407     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12408     // If the target doesn't have DirectMove, we should use stack to do the
12409     // conversion, because the target doesn't have the instructions like mtvsrd
12410     // or mfvsrd to do this conversion directly.
12411     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12412       if (Subtarget.hasDirectMove()) {
12413         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12414           .addReg(SrcReg);
12415       } else {
12416         // Use stack to do the register copy.
12417         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12418         MachineRegisterInfo &RegInfo = F->getRegInfo();
12419         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12420         if (RC == &PPC::F8RCRegClass) {
12421           // Copy register from F8RCRegClass to G8RCRegclass.
12422           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12423                  "Unsupported RegClass.");
12424 
12425           StoreOp = PPC::STFD;
12426           LoadOp = PPC::LD;
12427         } else {
12428           // Copy register from G8RCRegClass to F8RCRegclass.
12429           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12430                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12431                  "Unsupported RegClass.");
12432         }
12433 
12434         MachineFrameInfo &MFI = F->getFrameInfo();
12435         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12436 
12437         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12438             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12439             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12440             MFI.getObjectAlign(FrameIdx));
12441 
12442         // Store the SrcReg into the stack.
12443         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12444           .addReg(SrcReg)
12445           .addImm(0)
12446           .addFrameIndex(FrameIdx)
12447           .addMemOperand(MMOStore);
12448 
12449         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12450             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12451             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12452             MFI.getObjectAlign(FrameIdx));
12453 
12454         // Load from the stack where SrcReg is stored, and save to DestReg,
12455         // so we have done the RegClass conversion from RegClass::SrcReg to
12456         // RegClass::DestReg.
12457         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12458           .addImm(0)
12459           .addFrameIndex(FrameIdx)
12460           .addMemOperand(MMOLoad);
12461       }
12462     };
12463 
12464     Register OldFPSCRReg = MI.getOperand(0).getReg();
12465 
12466     // Save FPSCR value.
12467     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12468 
12469     // When the operand is gprc register, use two least significant bits of the
12470     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12471     //
12472     // copy OldFPSCRTmpReg, OldFPSCRReg
12473     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12474     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12475     // copy NewFPSCRReg, NewFPSCRTmpReg
12476     // mtfsf 255, NewFPSCRReg
12477     MachineOperand SrcOp = MI.getOperand(1);
12478     MachineRegisterInfo &RegInfo = F->getRegInfo();
12479     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12480 
12481     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12482 
12483     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12484     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12485 
12486     // The first operand of INSERT_SUBREG should be a register which has
12487     // subregisters, we only care about its RegClass, so we should use an
12488     // IMPLICIT_DEF register.
12489     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12490     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12491       .addReg(ImDefReg)
12492       .add(SrcOp)
12493       .addImm(1);
12494 
12495     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12496     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12497       .addReg(OldFPSCRTmpReg)
12498       .addReg(ExtSrcReg)
12499       .addImm(0)
12500       .addImm(62);
12501 
12502     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12503     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12504 
12505     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12506     // bits of FPSCR.
12507     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12508       .addImm(255)
12509       .addReg(NewFPSCRReg)
12510       .addImm(0)
12511       .addImm(0);
12512   } else if (MI.getOpcode() == PPC::SETFLM) {
12513     DebugLoc Dl = MI.getDebugLoc();
12514 
12515     // Result of setflm is previous FPSCR content, so we need to save it first.
12516     Register OldFPSCRReg = MI.getOperand(0).getReg();
12517     BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
12518 
12519     // Put bits in 32:63 to FPSCR.
12520     Register NewFPSCRReg = MI.getOperand(1).getReg();
12521     BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
12522         .addImm(255)
12523         .addReg(NewFPSCRReg)
12524         .addImm(0)
12525         .addImm(0);
12526   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12527              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12528     return emitProbedAlloca(MI, BB);
12529   } else {
12530     llvm_unreachable("Unexpected instr type to insert");
12531   }
12532 
12533   MI.eraseFromParent(); // The pseudo instruction is gone now.
12534   return BB;
12535 }
12536 
12537 //===----------------------------------------------------------------------===//
12538 // Target Optimization Hooks
12539 //===----------------------------------------------------------------------===//
12540 
12541 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12542   // For the estimates, convergence is quadratic, so we essentially double the
12543   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12544   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12545   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12546   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12547   if (VT.getScalarType() == MVT::f64)
12548     RefinementSteps++;
12549   return RefinementSteps;
12550 }
12551 
12552 SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
12553                                             const DenormalMode &Mode) const {
12554   // We only have VSX Vector Test for software Square Root.
12555   EVT VT = Op.getValueType();
12556   if (!isTypeLegal(MVT::i1) ||
12557       (VT != MVT::f64 &&
12558        ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
12559     return TargetLowering::getSqrtInputTest(Op, DAG, Mode);
12560 
12561   SDLoc DL(Op);
12562   // The output register of FTSQRT is CR field.
12563   SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
12564   // ftsqrt BF,FRB
12565   // Let e_b be the unbiased exponent of the double-precision
12566   // floating-point operand in register FRB.
12567   // fe_flag is set to 1 if either of the following conditions occurs.
12568   //   - The double-precision floating-point operand in register FRB is a zero,
12569   //     a NaN, or an infinity, or a negative value.
12570   //   - e_b is less than or equal to -970.
12571   // Otherwise fe_flag is set to 0.
12572   // Both VSX and non-VSX versions would set EQ bit in the CR if the number is
12573   // not eligible for iteration. (zero/negative/infinity/nan or unbiased
12574   // exponent is less than -970)
12575   SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32);
12576   return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1,
12577                                     FTSQRT, SRIdxVal),
12578                  0);
12579 }
12580 
12581 SDValue
12582 PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
12583                                                SelectionDAG &DAG) const {
12584   // We only have VSX Vector Square Root.
12585   EVT VT = Op.getValueType();
12586   if (VT != MVT::f64 &&
12587       ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
12588     return TargetLowering::getSqrtResultForDenormInput(Op, DAG);
12589 
12590   return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op);
12591 }
12592 
12593 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12594                                            int Enabled, int &RefinementSteps,
12595                                            bool &UseOneConstNR,
12596                                            bool Reciprocal) const {
12597   EVT VT = Operand.getValueType();
12598   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12599       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12600       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12601       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12602     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12603       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12604 
12605     // The Newton-Raphson computation with a single constant does not provide
12606     // enough accuracy on some CPUs.
12607     UseOneConstNR = !Subtarget.needsTwoConstNR();
12608     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12609   }
12610   return SDValue();
12611 }
12612 
12613 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12614                                             int Enabled,
12615                                             int &RefinementSteps) const {
12616   EVT VT = Operand.getValueType();
12617   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12618       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12619       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12620       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12621     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12622       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12623     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12624   }
12625   return SDValue();
12626 }
12627 
12628 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12629   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12630   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12631   // enabled for division), this functionality is redundant with the default
12632   // combiner logic (once the division -> reciprocal/multiply transformation
12633   // has taken place). As a result, this matters more for older cores than for
12634   // newer ones.
12635 
12636   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12637   // reciprocal if there are two or more FDIVs (for embedded cores with only
12638   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12639   switch (Subtarget.getCPUDirective()) {
12640   default:
12641     return 3;
12642   case PPC::DIR_440:
12643   case PPC::DIR_A2:
12644   case PPC::DIR_E500:
12645   case PPC::DIR_E500mc:
12646   case PPC::DIR_E5500:
12647     return 2;
12648   }
12649 }
12650 
12651 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12652 // collapsed, and so we need to look through chains of them.
12653 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12654                                      int64_t& Offset, SelectionDAG &DAG) {
12655   if (DAG.isBaseWithConstantOffset(Loc)) {
12656     Base = Loc.getOperand(0);
12657     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12658 
12659     // The base might itself be a base plus an offset, and if so, accumulate
12660     // that as well.
12661     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12662   }
12663 }
12664 
12665 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12666                             unsigned Bytes, int Dist,
12667                             SelectionDAG &DAG) {
12668   if (VT.getSizeInBits() / 8 != Bytes)
12669     return false;
12670 
12671   SDValue BaseLoc = Base->getBasePtr();
12672   if (Loc.getOpcode() == ISD::FrameIndex) {
12673     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12674       return false;
12675     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12676     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12677     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12678     int FS  = MFI.getObjectSize(FI);
12679     int BFS = MFI.getObjectSize(BFI);
12680     if (FS != BFS || FS != (int)Bytes) return false;
12681     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12682   }
12683 
12684   SDValue Base1 = Loc, Base2 = BaseLoc;
12685   int64_t Offset1 = 0, Offset2 = 0;
12686   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12687   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12688   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12689     return true;
12690 
12691   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12692   const GlobalValue *GV1 = nullptr;
12693   const GlobalValue *GV2 = nullptr;
12694   Offset1 = 0;
12695   Offset2 = 0;
12696   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12697   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12698   if (isGA1 && isGA2 && GV1 == GV2)
12699     return Offset1 == (Offset2 + Dist*Bytes);
12700   return false;
12701 }
12702 
12703 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12704 // not enforce equality of the chain operands.
12705 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12706                             unsigned Bytes, int Dist,
12707                             SelectionDAG &DAG) {
12708   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12709     EVT VT = LS->getMemoryVT();
12710     SDValue Loc = LS->getBasePtr();
12711     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12712   }
12713 
12714   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12715     EVT VT;
12716     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12717     default: return false;
12718     case Intrinsic::ppc_altivec_lvx:
12719     case Intrinsic::ppc_altivec_lvxl:
12720     case Intrinsic::ppc_vsx_lxvw4x:
12721     case Intrinsic::ppc_vsx_lxvw4x_be:
12722       VT = MVT::v4i32;
12723       break;
12724     case Intrinsic::ppc_vsx_lxvd2x:
12725     case Intrinsic::ppc_vsx_lxvd2x_be:
12726       VT = MVT::v2f64;
12727       break;
12728     case Intrinsic::ppc_altivec_lvebx:
12729       VT = MVT::i8;
12730       break;
12731     case Intrinsic::ppc_altivec_lvehx:
12732       VT = MVT::i16;
12733       break;
12734     case Intrinsic::ppc_altivec_lvewx:
12735       VT = MVT::i32;
12736       break;
12737     }
12738 
12739     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12740   }
12741 
12742   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12743     EVT VT;
12744     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12745     default: return false;
12746     case Intrinsic::ppc_altivec_stvx:
12747     case Intrinsic::ppc_altivec_stvxl:
12748     case Intrinsic::ppc_vsx_stxvw4x:
12749       VT = MVT::v4i32;
12750       break;
12751     case Intrinsic::ppc_vsx_stxvd2x:
12752       VT = MVT::v2f64;
12753       break;
12754     case Intrinsic::ppc_vsx_stxvw4x_be:
12755       VT = MVT::v4i32;
12756       break;
12757     case Intrinsic::ppc_vsx_stxvd2x_be:
12758       VT = MVT::v2f64;
12759       break;
12760     case Intrinsic::ppc_altivec_stvebx:
12761       VT = MVT::i8;
12762       break;
12763     case Intrinsic::ppc_altivec_stvehx:
12764       VT = MVT::i16;
12765       break;
12766     case Intrinsic::ppc_altivec_stvewx:
12767       VT = MVT::i32;
12768       break;
12769     }
12770 
12771     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12772   }
12773 
12774   return false;
12775 }
12776 
12777 // Return true is there is a nearyby consecutive load to the one provided
12778 // (regardless of alignment). We search up and down the chain, looking though
12779 // token factors and other loads (but nothing else). As a result, a true result
12780 // indicates that it is safe to create a new consecutive load adjacent to the
12781 // load provided.
12782 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12783   SDValue Chain = LD->getChain();
12784   EVT VT = LD->getMemoryVT();
12785 
12786   SmallSet<SDNode *, 16> LoadRoots;
12787   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12788   SmallSet<SDNode *, 16> Visited;
12789 
12790   // First, search up the chain, branching to follow all token-factor operands.
12791   // If we find a consecutive load, then we're done, otherwise, record all
12792   // nodes just above the top-level loads and token factors.
12793   while (!Queue.empty()) {
12794     SDNode *ChainNext = Queue.pop_back_val();
12795     if (!Visited.insert(ChainNext).second)
12796       continue;
12797 
12798     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12799       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12800         return true;
12801 
12802       if (!Visited.count(ChainLD->getChain().getNode()))
12803         Queue.push_back(ChainLD->getChain().getNode());
12804     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12805       for (const SDUse &O : ChainNext->ops())
12806         if (!Visited.count(O.getNode()))
12807           Queue.push_back(O.getNode());
12808     } else
12809       LoadRoots.insert(ChainNext);
12810   }
12811 
12812   // Second, search down the chain, starting from the top-level nodes recorded
12813   // in the first phase. These top-level nodes are the nodes just above all
12814   // loads and token factors. Starting with their uses, recursively look though
12815   // all loads (just the chain uses) and token factors to find a consecutive
12816   // load.
12817   Visited.clear();
12818   Queue.clear();
12819 
12820   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12821        IE = LoadRoots.end(); I != IE; ++I) {
12822     Queue.push_back(*I);
12823 
12824     while (!Queue.empty()) {
12825       SDNode *LoadRoot = Queue.pop_back_val();
12826       if (!Visited.insert(LoadRoot).second)
12827         continue;
12828 
12829       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12830         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12831           return true;
12832 
12833       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12834            UE = LoadRoot->use_end(); UI != UE; ++UI)
12835         if (((isa<MemSDNode>(*UI) &&
12836             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12837             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12838           Queue.push_back(*UI);
12839     }
12840   }
12841 
12842   return false;
12843 }
12844 
12845 /// This function is called when we have proved that a SETCC node can be replaced
12846 /// by subtraction (and other supporting instructions) so that the result of
12847 /// comparison is kept in a GPR instead of CR. This function is purely for
12848 /// codegen purposes and has some flags to guide the codegen process.
12849 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12850                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12851   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12852 
12853   // Zero extend the operands to the largest legal integer. Originally, they
12854   // must be of a strictly smaller size.
12855   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12856                          DAG.getConstant(Size, DL, MVT::i32));
12857   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12858                          DAG.getConstant(Size, DL, MVT::i32));
12859 
12860   // Swap if needed. Depends on the condition code.
12861   if (Swap)
12862     std::swap(Op0, Op1);
12863 
12864   // Subtract extended integers.
12865   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12866 
12867   // Move the sign bit to the least significant position and zero out the rest.
12868   // Now the least significant bit carries the result of original comparison.
12869   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12870                              DAG.getConstant(Size - 1, DL, MVT::i32));
12871   auto Final = Shifted;
12872 
12873   // Complement the result if needed. Based on the condition code.
12874   if (Complement)
12875     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12876                         DAG.getConstant(1, DL, MVT::i64));
12877 
12878   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12879 }
12880 
12881 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12882                                                   DAGCombinerInfo &DCI) const {
12883   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12884 
12885   SelectionDAG &DAG = DCI.DAG;
12886   SDLoc DL(N);
12887 
12888   // Size of integers being compared has a critical role in the following
12889   // analysis, so we prefer to do this when all types are legal.
12890   if (!DCI.isAfterLegalizeDAG())
12891     return SDValue();
12892 
12893   // If all users of SETCC extend its value to a legal integer type
12894   // then we replace SETCC with a subtraction
12895   for (SDNode::use_iterator UI = N->use_begin(),
12896        UE = N->use_end(); UI != UE; ++UI) {
12897     if (UI->getOpcode() != ISD::ZERO_EXTEND)
12898       return SDValue();
12899   }
12900 
12901   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12902   auto OpSize = N->getOperand(0).getValueSizeInBits();
12903 
12904   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12905 
12906   if (OpSize < Size) {
12907     switch (CC) {
12908     default: break;
12909     case ISD::SETULT:
12910       return generateEquivalentSub(N, Size, false, false, DL, DAG);
12911     case ISD::SETULE:
12912       return generateEquivalentSub(N, Size, true, true, DL, DAG);
12913     case ISD::SETUGT:
12914       return generateEquivalentSub(N, Size, false, true, DL, DAG);
12915     case ISD::SETUGE:
12916       return generateEquivalentSub(N, Size, true, false, DL, DAG);
12917     }
12918   }
12919 
12920   return SDValue();
12921 }
12922 
12923 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12924                                                   DAGCombinerInfo &DCI) const {
12925   SelectionDAG &DAG = DCI.DAG;
12926   SDLoc dl(N);
12927 
12928   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12929   // If we're tracking CR bits, we need to be careful that we don't have:
12930   //   trunc(binary-ops(zext(x), zext(y)))
12931   // or
12932   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12933   // such that we're unnecessarily moving things into GPRs when it would be
12934   // better to keep them in CR bits.
12935 
12936   // Note that trunc here can be an actual i1 trunc, or can be the effective
12937   // truncation that comes from a setcc or select_cc.
12938   if (N->getOpcode() == ISD::TRUNCATE &&
12939       N->getValueType(0) != MVT::i1)
12940     return SDValue();
12941 
12942   if (N->getOperand(0).getValueType() != MVT::i32 &&
12943       N->getOperand(0).getValueType() != MVT::i64)
12944     return SDValue();
12945 
12946   if (N->getOpcode() == ISD::SETCC ||
12947       N->getOpcode() == ISD::SELECT_CC) {
12948     // If we're looking at a comparison, then we need to make sure that the
12949     // high bits (all except for the first) don't matter the result.
12950     ISD::CondCode CC =
12951       cast<CondCodeSDNode>(N->getOperand(
12952         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12953     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12954 
12955     if (ISD::isSignedIntSetCC(CC)) {
12956       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12957           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12958         return SDValue();
12959     } else if (ISD::isUnsignedIntSetCC(CC)) {
12960       if (!DAG.MaskedValueIsZero(N->getOperand(0),
12961                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12962           !DAG.MaskedValueIsZero(N->getOperand(1),
12963                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
12964         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12965                                              : SDValue());
12966     } else {
12967       // This is neither a signed nor an unsigned comparison, just make sure
12968       // that the high bits are equal.
12969       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12970       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12971 
12972       // We don't really care about what is known about the first bit (if
12973       // anything), so pretend that it is known zero for both to ensure they can
12974       // be compared as constants.
12975       Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0);
12976       Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0);
12977 
12978       if (!Op1Known.isConstant() || !Op2Known.isConstant() ||
12979           Op1Known.getConstant() != Op2Known.getConstant())
12980         return SDValue();
12981     }
12982   }
12983 
12984   // We now know that the higher-order bits are irrelevant, we just need to
12985   // make sure that all of the intermediate operations are bit operations, and
12986   // all inputs are extensions.
12987   if (N->getOperand(0).getOpcode() != ISD::AND &&
12988       N->getOperand(0).getOpcode() != ISD::OR  &&
12989       N->getOperand(0).getOpcode() != ISD::XOR &&
12990       N->getOperand(0).getOpcode() != ISD::SELECT &&
12991       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12992       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12993       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12994       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12995       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12996     return SDValue();
12997 
12998   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12999       N->getOperand(1).getOpcode() != ISD::AND &&
13000       N->getOperand(1).getOpcode() != ISD::OR  &&
13001       N->getOperand(1).getOpcode() != ISD::XOR &&
13002       N->getOperand(1).getOpcode() != ISD::SELECT &&
13003       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
13004       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
13005       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
13006       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
13007       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
13008     return SDValue();
13009 
13010   SmallVector<SDValue, 4> Inputs;
13011   SmallVector<SDValue, 8> BinOps, PromOps;
13012   SmallPtrSet<SDNode *, 16> Visited;
13013 
13014   for (unsigned i = 0; i < 2; ++i) {
13015     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13016           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13017           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13018           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13019         isa<ConstantSDNode>(N->getOperand(i)))
13020       Inputs.push_back(N->getOperand(i));
13021     else
13022       BinOps.push_back(N->getOperand(i));
13023 
13024     if (N->getOpcode() == ISD::TRUNCATE)
13025       break;
13026   }
13027 
13028   // Visit all inputs, collect all binary operations (and, or, xor and
13029   // select) that are all fed by extensions.
13030   while (!BinOps.empty()) {
13031     SDValue BinOp = BinOps.pop_back_val();
13032 
13033     if (!Visited.insert(BinOp.getNode()).second)
13034       continue;
13035 
13036     PromOps.push_back(BinOp);
13037 
13038     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13039       // The condition of the select is not promoted.
13040       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13041         continue;
13042       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13043         continue;
13044 
13045       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13046             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13047             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13048            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13049           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13050         Inputs.push_back(BinOp.getOperand(i));
13051       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13052                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13053                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13054                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13055                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
13056                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13057                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13058                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13059                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
13060         BinOps.push_back(BinOp.getOperand(i));
13061       } else {
13062         // We have an input that is not an extension or another binary
13063         // operation; we'll abort this transformation.
13064         return SDValue();
13065       }
13066     }
13067   }
13068 
13069   // Make sure that this is a self-contained cluster of operations (which
13070   // is not quite the same thing as saying that everything has only one
13071   // use).
13072   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13073     if (isa<ConstantSDNode>(Inputs[i]))
13074       continue;
13075 
13076     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13077                               UE = Inputs[i].getNode()->use_end();
13078          UI != UE; ++UI) {
13079       SDNode *User = *UI;
13080       if (User != N && !Visited.count(User))
13081         return SDValue();
13082 
13083       // Make sure that we're not going to promote the non-output-value
13084       // operand(s) or SELECT or SELECT_CC.
13085       // FIXME: Although we could sometimes handle this, and it does occur in
13086       // practice that one of the condition inputs to the select is also one of
13087       // the outputs, we currently can't deal with this.
13088       if (User->getOpcode() == ISD::SELECT) {
13089         if (User->getOperand(0) == Inputs[i])
13090           return SDValue();
13091       } else if (User->getOpcode() == ISD::SELECT_CC) {
13092         if (User->getOperand(0) == Inputs[i] ||
13093             User->getOperand(1) == Inputs[i])
13094           return SDValue();
13095       }
13096     }
13097   }
13098 
13099   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13100     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13101                               UE = PromOps[i].getNode()->use_end();
13102          UI != UE; ++UI) {
13103       SDNode *User = *UI;
13104       if (User != N && !Visited.count(User))
13105         return SDValue();
13106 
13107       // Make sure that we're not going to promote the non-output-value
13108       // operand(s) or SELECT or SELECT_CC.
13109       // FIXME: Although we could sometimes handle this, and it does occur in
13110       // practice that one of the condition inputs to the select is also one of
13111       // the outputs, we currently can't deal with this.
13112       if (User->getOpcode() == ISD::SELECT) {
13113         if (User->getOperand(0) == PromOps[i])
13114           return SDValue();
13115       } else if (User->getOpcode() == ISD::SELECT_CC) {
13116         if (User->getOperand(0) == PromOps[i] ||
13117             User->getOperand(1) == PromOps[i])
13118           return SDValue();
13119       }
13120     }
13121   }
13122 
13123   // Replace all inputs with the extension operand.
13124   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13125     // Constants may have users outside the cluster of to-be-promoted nodes,
13126     // and so we need to replace those as we do the promotions.
13127     if (isa<ConstantSDNode>(Inputs[i]))
13128       continue;
13129     else
13130       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
13131   }
13132 
13133   std::list<HandleSDNode> PromOpHandles;
13134   for (auto &PromOp : PromOps)
13135     PromOpHandles.emplace_back(PromOp);
13136 
13137   // Replace all operations (these are all the same, but have a different
13138   // (i1) return type). DAG.getNode will validate that the types of
13139   // a binary operator match, so go through the list in reverse so that
13140   // we've likely promoted both operands first. Any intermediate truncations or
13141   // extensions disappear.
13142   while (!PromOpHandles.empty()) {
13143     SDValue PromOp = PromOpHandles.back().getValue();
13144     PromOpHandles.pop_back();
13145 
13146     if (PromOp.getOpcode() == ISD::TRUNCATE ||
13147         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13148         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13149         PromOp.getOpcode() == ISD::ANY_EXTEND) {
13150       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13151           PromOp.getOperand(0).getValueType() != MVT::i1) {
13152         // The operand is not yet ready (see comment below).
13153         PromOpHandles.emplace_front(PromOp);
13154         continue;
13155       }
13156 
13157       SDValue RepValue = PromOp.getOperand(0);
13158       if (isa<ConstantSDNode>(RepValue))
13159         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13160 
13161       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13162       continue;
13163     }
13164 
13165     unsigned C;
13166     switch (PromOp.getOpcode()) {
13167     default:             C = 0; break;
13168     case ISD::SELECT:    C = 1; break;
13169     case ISD::SELECT_CC: C = 2; break;
13170     }
13171 
13172     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13173          PromOp.getOperand(C).getValueType() != MVT::i1) ||
13174         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13175          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13176       // The to-be-promoted operands of this node have not yet been
13177       // promoted (this should be rare because we're going through the
13178       // list backward, but if one of the operands has several users in
13179       // this cluster of to-be-promoted nodes, it is possible).
13180       PromOpHandles.emplace_front(PromOp);
13181       continue;
13182     }
13183 
13184     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13185                                 PromOp.getNode()->op_end());
13186 
13187     // If there are any constant inputs, make sure they're replaced now.
13188     for (unsigned i = 0; i < 2; ++i)
13189       if (isa<ConstantSDNode>(Ops[C+i]))
13190         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13191 
13192     DAG.ReplaceAllUsesOfValueWith(PromOp,
13193       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13194   }
13195 
13196   // Now we're left with the initial truncation itself.
13197   if (N->getOpcode() == ISD::TRUNCATE)
13198     return N->getOperand(0);
13199 
13200   // Otherwise, this is a comparison. The operands to be compared have just
13201   // changed type (to i1), but everything else is the same.
13202   return SDValue(N, 0);
13203 }
13204 
13205 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13206                                                   DAGCombinerInfo &DCI) const {
13207   SelectionDAG &DAG = DCI.DAG;
13208   SDLoc dl(N);
13209 
13210   // If we're tracking CR bits, we need to be careful that we don't have:
13211   //   zext(binary-ops(trunc(x), trunc(y)))
13212   // or
13213   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13214   // such that we're unnecessarily moving things into CR bits that can more
13215   // efficiently stay in GPRs. Note that if we're not certain that the high
13216   // bits are set as required by the final extension, we still may need to do
13217   // some masking to get the proper behavior.
13218 
13219   // This same functionality is important on PPC64 when dealing with
13220   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13221   // the return values of functions. Because it is so similar, it is handled
13222   // here as well.
13223 
13224   if (N->getValueType(0) != MVT::i32 &&
13225       N->getValueType(0) != MVT::i64)
13226     return SDValue();
13227 
13228   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13229         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13230     return SDValue();
13231 
13232   if (N->getOperand(0).getOpcode() != ISD::AND &&
13233       N->getOperand(0).getOpcode() != ISD::OR  &&
13234       N->getOperand(0).getOpcode() != ISD::XOR &&
13235       N->getOperand(0).getOpcode() != ISD::SELECT &&
13236       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13237     return SDValue();
13238 
13239   SmallVector<SDValue, 4> Inputs;
13240   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13241   SmallPtrSet<SDNode *, 16> Visited;
13242 
13243   // Visit all inputs, collect all binary operations (and, or, xor and
13244   // select) that are all fed by truncations.
13245   while (!BinOps.empty()) {
13246     SDValue BinOp = BinOps.pop_back_val();
13247 
13248     if (!Visited.insert(BinOp.getNode()).second)
13249       continue;
13250 
13251     PromOps.push_back(BinOp);
13252 
13253     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13254       // The condition of the select is not promoted.
13255       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13256         continue;
13257       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13258         continue;
13259 
13260       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13261           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13262         Inputs.push_back(BinOp.getOperand(i));
13263       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13264                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13265                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13266                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13267                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13268         BinOps.push_back(BinOp.getOperand(i));
13269       } else {
13270         // We have an input that is not a truncation or another binary
13271         // operation; we'll abort this transformation.
13272         return SDValue();
13273       }
13274     }
13275   }
13276 
13277   // The operands of a select that must be truncated when the select is
13278   // promoted because the operand is actually part of the to-be-promoted set.
13279   DenseMap<SDNode *, EVT> SelectTruncOp[2];
13280 
13281   // Make sure that this is a self-contained cluster of operations (which
13282   // is not quite the same thing as saying that everything has only one
13283   // use).
13284   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13285     if (isa<ConstantSDNode>(Inputs[i]))
13286       continue;
13287 
13288     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13289                               UE = Inputs[i].getNode()->use_end();
13290          UI != UE; ++UI) {
13291       SDNode *User = *UI;
13292       if (User != N && !Visited.count(User))
13293         return SDValue();
13294 
13295       // If we're going to promote the non-output-value operand(s) or SELECT or
13296       // SELECT_CC, record them for truncation.
13297       if (User->getOpcode() == ISD::SELECT) {
13298         if (User->getOperand(0) == Inputs[i])
13299           SelectTruncOp[0].insert(std::make_pair(User,
13300                                     User->getOperand(0).getValueType()));
13301       } else if (User->getOpcode() == ISD::SELECT_CC) {
13302         if (User->getOperand(0) == Inputs[i])
13303           SelectTruncOp[0].insert(std::make_pair(User,
13304                                     User->getOperand(0).getValueType()));
13305         if (User->getOperand(1) == Inputs[i])
13306           SelectTruncOp[1].insert(std::make_pair(User,
13307                                     User->getOperand(1).getValueType()));
13308       }
13309     }
13310   }
13311 
13312   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13313     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13314                               UE = PromOps[i].getNode()->use_end();
13315          UI != UE; ++UI) {
13316       SDNode *User = *UI;
13317       if (User != N && !Visited.count(User))
13318         return SDValue();
13319 
13320       // If we're going to promote the non-output-value operand(s) or SELECT or
13321       // SELECT_CC, record them for truncation.
13322       if (User->getOpcode() == ISD::SELECT) {
13323         if (User->getOperand(0) == PromOps[i])
13324           SelectTruncOp[0].insert(std::make_pair(User,
13325                                     User->getOperand(0).getValueType()));
13326       } else if (User->getOpcode() == ISD::SELECT_CC) {
13327         if (User->getOperand(0) == PromOps[i])
13328           SelectTruncOp[0].insert(std::make_pair(User,
13329                                     User->getOperand(0).getValueType()));
13330         if (User->getOperand(1) == PromOps[i])
13331           SelectTruncOp[1].insert(std::make_pair(User,
13332                                     User->getOperand(1).getValueType()));
13333       }
13334     }
13335   }
13336 
13337   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13338   bool ReallyNeedsExt = false;
13339   if (N->getOpcode() != ISD::ANY_EXTEND) {
13340     // If all of the inputs are not already sign/zero extended, then
13341     // we'll still need to do that at the end.
13342     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13343       if (isa<ConstantSDNode>(Inputs[i]))
13344         continue;
13345 
13346       unsigned OpBits =
13347         Inputs[i].getOperand(0).getValueSizeInBits();
13348       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
13349 
13350       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13351            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13352                                   APInt::getHighBitsSet(OpBits,
13353                                                         OpBits-PromBits))) ||
13354           (N->getOpcode() == ISD::SIGN_EXTEND &&
13355            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13356              (OpBits-(PromBits-1)))) {
13357         ReallyNeedsExt = true;
13358         break;
13359       }
13360     }
13361   }
13362 
13363   // Replace all inputs, either with the truncation operand, or a
13364   // truncation or extension to the final output type.
13365   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13366     // Constant inputs need to be replaced with the to-be-promoted nodes that
13367     // use them because they might have users outside of the cluster of
13368     // promoted nodes.
13369     if (isa<ConstantSDNode>(Inputs[i]))
13370       continue;
13371 
13372     SDValue InSrc = Inputs[i].getOperand(0);
13373     if (Inputs[i].getValueType() == N->getValueType(0))
13374       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13375     else if (N->getOpcode() == ISD::SIGN_EXTEND)
13376       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13377         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13378     else if (N->getOpcode() == ISD::ZERO_EXTEND)
13379       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13380         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13381     else
13382       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13383         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13384   }
13385 
13386   std::list<HandleSDNode> PromOpHandles;
13387   for (auto &PromOp : PromOps)
13388     PromOpHandles.emplace_back(PromOp);
13389 
13390   // Replace all operations (these are all the same, but have a different
13391   // (promoted) return type). DAG.getNode will validate that the types of
13392   // a binary operator match, so go through the list in reverse so that
13393   // we've likely promoted both operands first.
13394   while (!PromOpHandles.empty()) {
13395     SDValue PromOp = PromOpHandles.back().getValue();
13396     PromOpHandles.pop_back();
13397 
13398     unsigned C;
13399     switch (PromOp.getOpcode()) {
13400     default:             C = 0; break;
13401     case ISD::SELECT:    C = 1; break;
13402     case ISD::SELECT_CC: C = 2; break;
13403     }
13404 
13405     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13406          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13407         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13408          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13409       // The to-be-promoted operands of this node have not yet been
13410       // promoted (this should be rare because we're going through the
13411       // list backward, but if one of the operands has several users in
13412       // this cluster of to-be-promoted nodes, it is possible).
13413       PromOpHandles.emplace_front(PromOp);
13414       continue;
13415     }
13416 
13417     // For SELECT and SELECT_CC nodes, we do a similar check for any
13418     // to-be-promoted comparison inputs.
13419     if (PromOp.getOpcode() == ISD::SELECT ||
13420         PromOp.getOpcode() == ISD::SELECT_CC) {
13421       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13422            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13423           (SelectTruncOp[1].count(PromOp.getNode()) &&
13424            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13425         PromOpHandles.emplace_front(PromOp);
13426         continue;
13427       }
13428     }
13429 
13430     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13431                                 PromOp.getNode()->op_end());
13432 
13433     // If this node has constant inputs, then they'll need to be promoted here.
13434     for (unsigned i = 0; i < 2; ++i) {
13435       if (!isa<ConstantSDNode>(Ops[C+i]))
13436         continue;
13437       if (Ops[C+i].getValueType() == N->getValueType(0))
13438         continue;
13439 
13440       if (N->getOpcode() == ISD::SIGN_EXTEND)
13441         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13442       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13443         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13444       else
13445         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13446     }
13447 
13448     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13449     // truncate them again to the original value type.
13450     if (PromOp.getOpcode() == ISD::SELECT ||
13451         PromOp.getOpcode() == ISD::SELECT_CC) {
13452       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13453       if (SI0 != SelectTruncOp[0].end())
13454         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13455       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13456       if (SI1 != SelectTruncOp[1].end())
13457         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13458     }
13459 
13460     DAG.ReplaceAllUsesOfValueWith(PromOp,
13461       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13462   }
13463 
13464   // Now we're left with the initial extension itself.
13465   if (!ReallyNeedsExt)
13466     return N->getOperand(0);
13467 
13468   // To zero extend, just mask off everything except for the first bit (in the
13469   // i1 case).
13470   if (N->getOpcode() == ISD::ZERO_EXTEND)
13471     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13472                        DAG.getConstant(APInt::getLowBitsSet(
13473                                          N->getValueSizeInBits(0), PromBits),
13474                                        dl, N->getValueType(0)));
13475 
13476   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13477          "Invalid extension type");
13478   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13479   SDValue ShiftCst =
13480       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13481   return DAG.getNode(
13482       ISD::SRA, dl, N->getValueType(0),
13483       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13484       ShiftCst);
13485 }
13486 
13487 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13488                                         DAGCombinerInfo &DCI) const {
13489   assert(N->getOpcode() == ISD::SETCC &&
13490          "Should be called with a SETCC node");
13491 
13492   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13493   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13494     SDValue LHS = N->getOperand(0);
13495     SDValue RHS = N->getOperand(1);
13496 
13497     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13498     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13499         LHS.hasOneUse())
13500       std::swap(LHS, RHS);
13501 
13502     // x == 0-y --> x+y == 0
13503     // x != 0-y --> x+y != 0
13504     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13505         RHS.hasOneUse()) {
13506       SDLoc DL(N);
13507       SelectionDAG &DAG = DCI.DAG;
13508       EVT VT = N->getValueType(0);
13509       EVT OpVT = LHS.getValueType();
13510       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13511       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13512     }
13513   }
13514 
13515   return DAGCombineTruncBoolExt(N, DCI);
13516 }
13517 
13518 // Is this an extending load from an f32 to an f64?
13519 static bool isFPExtLoad(SDValue Op) {
13520   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13521     return LD->getExtensionType() == ISD::EXTLOAD &&
13522       Op.getValueType() == MVT::f64;
13523   return false;
13524 }
13525 
13526 /// Reduces the number of fp-to-int conversion when building a vector.
13527 ///
13528 /// If this vector is built out of floating to integer conversions,
13529 /// transform it to a vector built out of floating point values followed by a
13530 /// single floating to integer conversion of the vector.
13531 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13532 /// becomes (fptosi (build_vector ($A, $B, ...)))
13533 SDValue PPCTargetLowering::
13534 combineElementTruncationToVectorTruncation(SDNode *N,
13535                                            DAGCombinerInfo &DCI) const {
13536   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13537          "Should be called with a BUILD_VECTOR node");
13538 
13539   SelectionDAG &DAG = DCI.DAG;
13540   SDLoc dl(N);
13541 
13542   SDValue FirstInput = N->getOperand(0);
13543   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13544          "The input operand must be an fp-to-int conversion.");
13545 
13546   // This combine happens after legalization so the fp_to_[su]i nodes are
13547   // already converted to PPCSISD nodes.
13548   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13549   if (FirstConversion == PPCISD::FCTIDZ ||
13550       FirstConversion == PPCISD::FCTIDUZ ||
13551       FirstConversion == PPCISD::FCTIWZ ||
13552       FirstConversion == PPCISD::FCTIWUZ) {
13553     bool IsSplat = true;
13554     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13555       FirstConversion == PPCISD::FCTIWUZ;
13556     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13557     SmallVector<SDValue, 4> Ops;
13558     EVT TargetVT = N->getValueType(0);
13559     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13560       SDValue NextOp = N->getOperand(i);
13561       if (NextOp.getOpcode() != PPCISD::MFVSR)
13562         return SDValue();
13563       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13564       if (NextConversion != FirstConversion)
13565         return SDValue();
13566       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13567       // This is not valid if the input was originally double precision. It is
13568       // also not profitable to do unless this is an extending load in which
13569       // case doing this combine will allow us to combine consecutive loads.
13570       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13571         return SDValue();
13572       if (N->getOperand(i) != FirstInput)
13573         IsSplat = false;
13574     }
13575 
13576     // If this is a splat, we leave it as-is since there will be only a single
13577     // fp-to-int conversion followed by a splat of the integer. This is better
13578     // for 32-bit and smaller ints and neutral for 64-bit ints.
13579     if (IsSplat)
13580       return SDValue();
13581 
13582     // Now that we know we have the right type of node, get its operands
13583     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13584       SDValue In = N->getOperand(i).getOperand(0);
13585       if (Is32Bit) {
13586         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13587         // here, we know that all inputs are extending loads so this is safe).
13588         if (In.isUndef())
13589           Ops.push_back(DAG.getUNDEF(SrcVT));
13590         else {
13591           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13592                                       MVT::f32, In.getOperand(0),
13593                                       DAG.getIntPtrConstant(1, dl));
13594           Ops.push_back(Trunc);
13595         }
13596       } else
13597         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13598     }
13599 
13600     unsigned Opcode;
13601     if (FirstConversion == PPCISD::FCTIDZ ||
13602         FirstConversion == PPCISD::FCTIWZ)
13603       Opcode = ISD::FP_TO_SINT;
13604     else
13605       Opcode = ISD::FP_TO_UINT;
13606 
13607     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13608     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13609     return DAG.getNode(Opcode, dl, TargetVT, BV);
13610   }
13611   return SDValue();
13612 }
13613 
13614 /// Reduce the number of loads when building a vector.
13615 ///
13616 /// Building a vector out of multiple loads can be converted to a load
13617 /// of the vector type if the loads are consecutive. If the loads are
13618 /// consecutive but in descending order, a shuffle is added at the end
13619 /// to reorder the vector.
13620 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13621   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13622          "Should be called with a BUILD_VECTOR node");
13623 
13624   SDLoc dl(N);
13625 
13626   // Return early for non byte-sized type, as they can't be consecutive.
13627   if (!N->getValueType(0).getVectorElementType().isByteSized())
13628     return SDValue();
13629 
13630   bool InputsAreConsecutiveLoads = true;
13631   bool InputsAreReverseConsecutive = true;
13632   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13633   SDValue FirstInput = N->getOperand(0);
13634   bool IsRoundOfExtLoad = false;
13635 
13636   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13637       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13638     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13639     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13640   }
13641   // Not a build vector of (possibly fp_rounded) loads.
13642   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13643       N->getNumOperands() == 1)
13644     return SDValue();
13645 
13646   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13647     // If any inputs are fp_round(extload), they all must be.
13648     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13649       return SDValue();
13650 
13651     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13652       N->getOperand(i);
13653     if (NextInput.getOpcode() != ISD::LOAD)
13654       return SDValue();
13655 
13656     SDValue PreviousInput =
13657       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13658     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13659     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13660 
13661     // If any inputs are fp_round(extload), they all must be.
13662     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13663       return SDValue();
13664 
13665     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13666       InputsAreConsecutiveLoads = false;
13667     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13668       InputsAreReverseConsecutive = false;
13669 
13670     // Exit early if the loads are neither consecutive nor reverse consecutive.
13671     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13672       return SDValue();
13673   }
13674 
13675   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13676          "The loads cannot be both consecutive and reverse consecutive.");
13677 
13678   SDValue FirstLoadOp =
13679     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13680   SDValue LastLoadOp =
13681     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13682                        N->getOperand(N->getNumOperands()-1);
13683 
13684   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13685   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13686   if (InputsAreConsecutiveLoads) {
13687     assert(LD1 && "Input needs to be a LoadSDNode.");
13688     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13689                        LD1->getBasePtr(), LD1->getPointerInfo(),
13690                        LD1->getAlignment());
13691   }
13692   if (InputsAreReverseConsecutive) {
13693     assert(LDL && "Input needs to be a LoadSDNode.");
13694     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13695                                LDL->getBasePtr(), LDL->getPointerInfo(),
13696                                LDL->getAlignment());
13697     SmallVector<int, 16> Ops;
13698     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13699       Ops.push_back(i);
13700 
13701     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13702                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13703   }
13704   return SDValue();
13705 }
13706 
13707 // This function adds the required vector_shuffle needed to get
13708 // the elements of the vector extract in the correct position
13709 // as specified by the CorrectElems encoding.
13710 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13711                                       SDValue Input, uint64_t Elems,
13712                                       uint64_t CorrectElems) {
13713   SDLoc dl(N);
13714 
13715   unsigned NumElems = Input.getValueType().getVectorNumElements();
13716   SmallVector<int, 16> ShuffleMask(NumElems, -1);
13717 
13718   // Knowing the element indices being extracted from the original
13719   // vector and the order in which they're being inserted, just put
13720   // them at element indices required for the instruction.
13721   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13722     if (DAG.getDataLayout().isLittleEndian())
13723       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13724     else
13725       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13726     CorrectElems = CorrectElems >> 8;
13727     Elems = Elems >> 8;
13728   }
13729 
13730   SDValue Shuffle =
13731       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13732                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13733 
13734   EVT VT = N->getValueType(0);
13735   SDValue Conv = DAG.getBitcast(VT, Shuffle);
13736 
13737   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13738                                Input.getValueType().getVectorElementType(),
13739                                VT.getVectorNumElements());
13740   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13741                      DAG.getValueType(ExtVT));
13742 }
13743 
13744 // Look for build vector patterns where input operands come from sign
13745 // extended vector_extract elements of specific indices. If the correct indices
13746 // aren't used, add a vector shuffle to fix up the indices and create
13747 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13748 // during instruction selection.
13749 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13750   // This array encodes the indices that the vector sign extend instructions
13751   // extract from when extending from one type to another for both BE and LE.
13752   // The right nibble of each byte corresponds to the LE incides.
13753   // and the left nibble of each byte corresponds to the BE incides.
13754   // For example: 0x3074B8FC  byte->word
13755   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13756   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13757   // For example: 0x000070F8  byte->double word
13758   // For LE: the allowed indices are: 0x0,0x8
13759   // For BE: the allowed indices are: 0x7,0xF
13760   uint64_t TargetElems[] = {
13761       0x3074B8FC, // b->w
13762       0x000070F8, // b->d
13763       0x10325476, // h->w
13764       0x00003074, // h->d
13765       0x00001032, // w->d
13766   };
13767 
13768   uint64_t Elems = 0;
13769   int Index;
13770   SDValue Input;
13771 
13772   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13773     if (!Op)
13774       return false;
13775     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13776         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13777       return false;
13778 
13779     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13780     // of the right width.
13781     SDValue Extract = Op.getOperand(0);
13782     if (Extract.getOpcode() == ISD::ANY_EXTEND)
13783       Extract = Extract.getOperand(0);
13784     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13785       return false;
13786 
13787     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13788     if (!ExtOp)
13789       return false;
13790 
13791     Index = ExtOp->getZExtValue();
13792     if (Input && Input != Extract.getOperand(0))
13793       return false;
13794 
13795     if (!Input)
13796       Input = Extract.getOperand(0);
13797 
13798     Elems = Elems << 8;
13799     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13800     Elems |= Index;
13801 
13802     return true;
13803   };
13804 
13805   // If the build vector operands aren't sign extended vector extracts,
13806   // of the same input vector, then return.
13807   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13808     if (!isSExtOfVecExtract(N->getOperand(i))) {
13809       return SDValue();
13810     }
13811   }
13812 
13813   // If the vector extract indicies are not correct, add the appropriate
13814   // vector_shuffle.
13815   int TgtElemArrayIdx;
13816   int InputSize = Input.getValueType().getScalarSizeInBits();
13817   int OutputSize = N->getValueType(0).getScalarSizeInBits();
13818   if (InputSize + OutputSize == 40)
13819     TgtElemArrayIdx = 0;
13820   else if (InputSize + OutputSize == 72)
13821     TgtElemArrayIdx = 1;
13822   else if (InputSize + OutputSize == 48)
13823     TgtElemArrayIdx = 2;
13824   else if (InputSize + OutputSize == 80)
13825     TgtElemArrayIdx = 3;
13826   else if (InputSize + OutputSize == 96)
13827     TgtElemArrayIdx = 4;
13828   else
13829     return SDValue();
13830 
13831   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13832   CorrectElems = DAG.getDataLayout().isLittleEndian()
13833                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13834                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13835   if (Elems != CorrectElems) {
13836     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13837   }
13838 
13839   // Regular lowering will catch cases where a shuffle is not needed.
13840   return SDValue();
13841 }
13842 
13843 // Look for the pattern of a load from a narrow width to i128, feeding
13844 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
13845 // (LXVRZX). This node represents a zero extending load that will be matched
13846 // to the Load VSX Vector Rightmost instructions.
13847 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
13848   SDLoc DL(N);
13849 
13850   // This combine is only eligible for a BUILD_VECTOR of v1i128.
13851   if (N->getValueType(0) != MVT::v1i128)
13852     return SDValue();
13853 
13854   SDValue Operand = N->getOperand(0);
13855   // Proceed with the transformation if the operand to the BUILD_VECTOR
13856   // is a load instruction.
13857   if (Operand.getOpcode() != ISD::LOAD)
13858     return SDValue();
13859 
13860   LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand);
13861   EVT MemoryType = LD->getMemoryVT();
13862 
13863   // This transformation is only valid if the we are loading either a byte,
13864   // halfword, word, or doubleword.
13865   bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
13866                      MemoryType == MVT::i32 || MemoryType == MVT::i64;
13867 
13868   // Ensure that the load from the narrow width is being zero extended to i128.
13869   if (!ValidLDType ||
13870       (LD->getExtensionType() != ISD::ZEXTLOAD &&
13871        LD->getExtensionType() != ISD::EXTLOAD))
13872     return SDValue();
13873 
13874   SDValue LoadOps[] = {
13875       LD->getChain(), LD->getBasePtr(),
13876       DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
13877 
13878   return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
13879                                  DAG.getVTList(MVT::v1i128, MVT::Other),
13880                                  LoadOps, MemoryType, LD->getMemOperand());
13881 }
13882 
13883 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13884                                                  DAGCombinerInfo &DCI) const {
13885   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13886          "Should be called with a BUILD_VECTOR node");
13887 
13888   SelectionDAG &DAG = DCI.DAG;
13889   SDLoc dl(N);
13890 
13891   if (!Subtarget.hasVSX())
13892     return SDValue();
13893 
13894   // The target independent DAG combiner will leave a build_vector of
13895   // float-to-int conversions intact. We can generate MUCH better code for
13896   // a float-to-int conversion of a vector of floats.
13897   SDValue FirstInput = N->getOperand(0);
13898   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13899     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13900     if (Reduced)
13901       return Reduced;
13902   }
13903 
13904   // If we're building a vector out of consecutive loads, just load that
13905   // vector type.
13906   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13907   if (Reduced)
13908     return Reduced;
13909 
13910   // If we're building a vector out of extended elements from another vector
13911   // we have P9 vector integer extend instructions. The code assumes legal
13912   // input types (i.e. it can't handle things like v4i16) so do not run before
13913   // legalization.
13914   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13915     Reduced = combineBVOfVecSExt(N, DAG);
13916     if (Reduced)
13917       return Reduced;
13918   }
13919 
13920   // On Power10, the Load VSX Vector Rightmost instructions can be utilized
13921   // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
13922   // is a load from <valid narrow width> to i128.
13923   if (Subtarget.isISA3_1()) {
13924     SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
13925     if (BVOfZLoad)
13926       return BVOfZLoad;
13927   }
13928 
13929   if (N->getValueType(0) != MVT::v2f64)
13930     return SDValue();
13931 
13932   // Looking for:
13933   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13934   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13935       FirstInput.getOpcode() != ISD::UINT_TO_FP)
13936     return SDValue();
13937   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13938       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13939     return SDValue();
13940   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13941     return SDValue();
13942 
13943   SDValue Ext1 = FirstInput.getOperand(0);
13944   SDValue Ext2 = N->getOperand(1).getOperand(0);
13945   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13946      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13947     return SDValue();
13948 
13949   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13950   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13951   if (!Ext1Op || !Ext2Op)
13952     return SDValue();
13953   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13954       Ext1.getOperand(0) != Ext2.getOperand(0))
13955     return SDValue();
13956 
13957   int FirstElem = Ext1Op->getZExtValue();
13958   int SecondElem = Ext2Op->getZExtValue();
13959   int SubvecIdx;
13960   if (FirstElem == 0 && SecondElem == 1)
13961     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13962   else if (FirstElem == 2 && SecondElem == 3)
13963     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13964   else
13965     return SDValue();
13966 
13967   SDValue SrcVec = Ext1.getOperand(0);
13968   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13969     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13970   return DAG.getNode(NodeType, dl, MVT::v2f64,
13971                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13972 }
13973 
13974 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13975                                               DAGCombinerInfo &DCI) const {
13976   assert((N->getOpcode() == ISD::SINT_TO_FP ||
13977           N->getOpcode() == ISD::UINT_TO_FP) &&
13978          "Need an int -> FP conversion node here");
13979 
13980   if (useSoftFloat() || !Subtarget.has64BitSupport())
13981     return SDValue();
13982 
13983   SelectionDAG &DAG = DCI.DAG;
13984   SDLoc dl(N);
13985   SDValue Op(N, 0);
13986 
13987   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13988   // from the hardware.
13989   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13990     return SDValue();
13991   if (!Op.getOperand(0).getValueType().isSimple())
13992     return SDValue();
13993   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13994       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13995     return SDValue();
13996 
13997   SDValue FirstOperand(Op.getOperand(0));
13998   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13999     (FirstOperand.getValueType() == MVT::i8 ||
14000      FirstOperand.getValueType() == MVT::i16);
14001   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
14002     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
14003     bool DstDouble = Op.getValueType() == MVT::f64;
14004     unsigned ConvOp = Signed ?
14005       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
14006       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
14007     SDValue WidthConst =
14008       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
14009                             dl, false);
14010     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
14011     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
14012     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
14013                                          DAG.getVTList(MVT::f64, MVT::Other),
14014                                          Ops, MVT::i8, LDN->getMemOperand());
14015 
14016     // For signed conversion, we need to sign-extend the value in the VSR
14017     if (Signed) {
14018       SDValue ExtOps[] = { Ld, WidthConst };
14019       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
14020       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
14021     } else
14022       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
14023   }
14024 
14025 
14026   // For i32 intermediate values, unfortunately, the conversion functions
14027   // leave the upper 32 bits of the value are undefined. Within the set of
14028   // scalar instructions, we have no method for zero- or sign-extending the
14029   // value. Thus, we cannot handle i32 intermediate values here.
14030   if (Op.getOperand(0).getValueType() == MVT::i32)
14031     return SDValue();
14032 
14033   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
14034          "UINT_TO_FP is supported only with FPCVT");
14035 
14036   // If we have FCFIDS, then use it when converting to single-precision.
14037   // Otherwise, convert to double-precision and then round.
14038   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14039                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
14040                                                             : PPCISD::FCFIDS)
14041                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
14042                                                             : PPCISD::FCFID);
14043   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14044                   ? MVT::f32
14045                   : MVT::f64;
14046 
14047   // If we're converting from a float, to an int, and back to a float again,
14048   // then we don't need the store/load pair at all.
14049   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
14050        Subtarget.hasFPCVT()) ||
14051       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
14052     SDValue Src = Op.getOperand(0).getOperand(0);
14053     if (Src.getValueType() == MVT::f32) {
14054       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
14055       DCI.AddToWorklist(Src.getNode());
14056     } else if (Src.getValueType() != MVT::f64) {
14057       // Make sure that we don't pick up a ppc_fp128 source value.
14058       return SDValue();
14059     }
14060 
14061     unsigned FCTOp =
14062       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
14063                                                         PPCISD::FCTIDUZ;
14064 
14065     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
14066     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
14067 
14068     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
14069       FP = DAG.getNode(ISD::FP_ROUND, dl,
14070                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
14071       DCI.AddToWorklist(FP.getNode());
14072     }
14073 
14074     return FP;
14075   }
14076 
14077   return SDValue();
14078 }
14079 
14080 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
14081 // builtins) into loads with swaps.
14082 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
14083                                               DAGCombinerInfo &DCI) const {
14084   SelectionDAG &DAG = DCI.DAG;
14085   SDLoc dl(N);
14086   SDValue Chain;
14087   SDValue Base;
14088   MachineMemOperand *MMO;
14089 
14090   switch (N->getOpcode()) {
14091   default:
14092     llvm_unreachable("Unexpected opcode for little endian VSX load");
14093   case ISD::LOAD: {
14094     LoadSDNode *LD = cast<LoadSDNode>(N);
14095     Chain = LD->getChain();
14096     Base = LD->getBasePtr();
14097     MMO = LD->getMemOperand();
14098     // If the MMO suggests this isn't a load of a full vector, leave
14099     // things alone.  For a built-in, we have to make the change for
14100     // correctness, so if there is a size problem that will be a bug.
14101     if (MMO->getSize() < 16)
14102       return SDValue();
14103     break;
14104   }
14105   case ISD::INTRINSIC_W_CHAIN: {
14106     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14107     Chain = Intrin->getChain();
14108     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
14109     // us what we want. Get operand 2 instead.
14110     Base = Intrin->getOperand(2);
14111     MMO = Intrin->getMemOperand();
14112     break;
14113   }
14114   }
14115 
14116   MVT VecTy = N->getValueType(0).getSimpleVT();
14117 
14118   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
14119   // aligned and the type is a vector with elements up to 4 bytes
14120   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14121       VecTy.getScalarSizeInBits() <= 32) {
14122     return SDValue();
14123   }
14124 
14125   SDValue LoadOps[] = { Chain, Base };
14126   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
14127                                          DAG.getVTList(MVT::v2f64, MVT::Other),
14128                                          LoadOps, MVT::v2f64, MMO);
14129 
14130   DCI.AddToWorklist(Load.getNode());
14131   Chain = Load.getValue(1);
14132   SDValue Swap = DAG.getNode(
14133       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14134   DCI.AddToWorklist(Swap.getNode());
14135 
14136   // Add a bitcast if the resulting load type doesn't match v2f64.
14137   if (VecTy != MVT::v2f64) {
14138     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14139     DCI.AddToWorklist(N.getNode());
14140     // Package {bitcast value, swap's chain} to match Load's shape.
14141     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14142                        N, Swap.getValue(1));
14143   }
14144 
14145   return Swap;
14146 }
14147 
14148 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14149 // builtins) into stores with swaps.
14150 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14151                                                DAGCombinerInfo &DCI) const {
14152   SelectionDAG &DAG = DCI.DAG;
14153   SDLoc dl(N);
14154   SDValue Chain;
14155   SDValue Base;
14156   unsigned SrcOpnd;
14157   MachineMemOperand *MMO;
14158 
14159   switch (N->getOpcode()) {
14160   default:
14161     llvm_unreachable("Unexpected opcode for little endian VSX store");
14162   case ISD::STORE: {
14163     StoreSDNode *ST = cast<StoreSDNode>(N);
14164     Chain = ST->getChain();
14165     Base = ST->getBasePtr();
14166     MMO = ST->getMemOperand();
14167     SrcOpnd = 1;
14168     // If the MMO suggests this isn't a store of a full vector, leave
14169     // things alone.  For a built-in, we have to make the change for
14170     // correctness, so if there is a size problem that will be a bug.
14171     if (MMO->getSize() < 16)
14172       return SDValue();
14173     break;
14174   }
14175   case ISD::INTRINSIC_VOID: {
14176     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14177     Chain = Intrin->getChain();
14178     // Intrin->getBasePtr() oddly does not get what we want.
14179     Base = Intrin->getOperand(3);
14180     MMO = Intrin->getMemOperand();
14181     SrcOpnd = 2;
14182     break;
14183   }
14184   }
14185 
14186   SDValue Src = N->getOperand(SrcOpnd);
14187   MVT VecTy = Src.getValueType().getSimpleVT();
14188 
14189   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14190   // aligned and the type is a vector with elements up to 4 bytes
14191   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14192       VecTy.getScalarSizeInBits() <= 32) {
14193     return SDValue();
14194   }
14195 
14196   // All stores are done as v2f64 and possible bit cast.
14197   if (VecTy != MVT::v2f64) {
14198     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14199     DCI.AddToWorklist(Src.getNode());
14200   }
14201 
14202   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14203                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14204   DCI.AddToWorklist(Swap.getNode());
14205   Chain = Swap.getValue(1);
14206   SDValue StoreOps[] = { Chain, Swap, Base };
14207   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14208                                           DAG.getVTList(MVT::Other),
14209                                           StoreOps, VecTy, MMO);
14210   DCI.AddToWorklist(Store.getNode());
14211   return Store;
14212 }
14213 
14214 // Handle DAG combine for STORE (FP_TO_INT F).
14215 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14216                                                DAGCombinerInfo &DCI) const {
14217 
14218   SelectionDAG &DAG = DCI.DAG;
14219   SDLoc dl(N);
14220   unsigned Opcode = N->getOperand(1).getOpcode();
14221 
14222   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
14223          && "Not a FP_TO_INT Instruction!");
14224 
14225   SDValue Val = N->getOperand(1).getOperand(0);
14226   EVT Op1VT = N->getOperand(1).getValueType();
14227   EVT ResVT = Val.getValueType();
14228 
14229   if (!isTypeLegal(ResVT))
14230     return SDValue();
14231 
14232   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14233   bool ValidTypeForStoreFltAsInt =
14234         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14235          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14236 
14237   if (ResVT == MVT::f128 && !Subtarget.hasP9Vector())
14238     return SDValue();
14239 
14240   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14241       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14242     return SDValue();
14243 
14244   // Extend f32 values to f64
14245   if (ResVT.getScalarSizeInBits() == 32) {
14246     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14247     DCI.AddToWorklist(Val.getNode());
14248   }
14249 
14250   // Set signed or unsigned conversion opcode.
14251   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14252                           PPCISD::FP_TO_SINT_IN_VSR :
14253                           PPCISD::FP_TO_UINT_IN_VSR;
14254 
14255   Val = DAG.getNode(ConvOpcode,
14256                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14257   DCI.AddToWorklist(Val.getNode());
14258 
14259   // Set number of bytes being converted.
14260   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14261   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14262                     DAG.getIntPtrConstant(ByteSize, dl, false),
14263                     DAG.getValueType(Op1VT) };
14264 
14265   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14266           DAG.getVTList(MVT::Other), Ops,
14267           cast<StoreSDNode>(N)->getMemoryVT(),
14268           cast<StoreSDNode>(N)->getMemOperand());
14269 
14270   DCI.AddToWorklist(Val.getNode());
14271   return Val;
14272 }
14273 
14274 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14275   // Check that the source of the element keeps flipping
14276   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14277   bool PrevElemFromFirstVec = Mask[0] < NumElts;
14278   for (int i = 1, e = Mask.size(); i < e; i++) {
14279     if (PrevElemFromFirstVec && Mask[i] < NumElts)
14280       return false;
14281     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14282       return false;
14283     PrevElemFromFirstVec = !PrevElemFromFirstVec;
14284   }
14285   return true;
14286 }
14287 
14288 static bool isSplatBV(SDValue Op) {
14289   if (Op.getOpcode() != ISD::BUILD_VECTOR)
14290     return false;
14291   SDValue FirstOp;
14292 
14293   // Find first non-undef input.
14294   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14295     FirstOp = Op.getOperand(i);
14296     if (!FirstOp.isUndef())
14297       break;
14298   }
14299 
14300   // All inputs are undef or the same as the first non-undef input.
14301   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14302     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14303       return false;
14304   return true;
14305 }
14306 
14307 static SDValue isScalarToVec(SDValue Op) {
14308   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14309     return Op;
14310   if (Op.getOpcode() != ISD::BITCAST)
14311     return SDValue();
14312   Op = Op.getOperand(0);
14313   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14314     return Op;
14315   return SDValue();
14316 }
14317 
14318 // Fix up the shuffle mask to account for the fact that the result of
14319 // scalar_to_vector is not in lane zero. This just takes all values in
14320 // the ranges specified by the min/max indices and adds the number of
14321 // elements required to ensure each element comes from the respective
14322 // position in the valid lane.
14323 // On little endian, that's just the corresponding element in the other
14324 // half of the vector. On big endian, it is in the same half but right
14325 // justified rather than left justified in that half.
14326 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14327                                             int LHSMaxIdx, int RHSMinIdx,
14328                                             int RHSMaxIdx, int HalfVec,
14329                                             unsigned ValidLaneWidth,
14330                                             const PPCSubtarget &Subtarget) {
14331   for (int i = 0, e = ShuffV.size(); i < e; i++) {
14332     int Idx = ShuffV[i];
14333     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14334       ShuffV[i] +=
14335           Subtarget.isLittleEndian() ? HalfVec : HalfVec - ValidLaneWidth;
14336   }
14337 }
14338 
14339 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14340 // the original is:
14341 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14342 // In such a case, just change the shuffle mask to extract the element
14343 // from the permuted index.
14344 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG,
14345                                const PPCSubtarget &Subtarget) {
14346   SDLoc dl(OrigSToV);
14347   EVT VT = OrigSToV.getValueType();
14348   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
14349          "Expecting a SCALAR_TO_VECTOR here");
14350   SDValue Input = OrigSToV.getOperand(0);
14351 
14352   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14353     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14354     SDValue OrigVector = Input.getOperand(0);
14355 
14356     // Can't handle non-const element indices or different vector types
14357     // for the input to the extract and the output of the scalar_to_vector.
14358     if (Idx && VT == OrigVector.getValueType()) {
14359       unsigned NumElts = VT.getVectorNumElements();
14360       assert(
14361           NumElts > 1 &&
14362           "Cannot produce a permuted scalar_to_vector for one element vector");
14363       SmallVector<int, 16> NewMask(NumElts, -1);
14364       unsigned ResultInElt = NumElts / 2;
14365       ResultInElt -= Subtarget.isLittleEndian() ? 0 : 1;
14366       NewMask[ResultInElt] = Idx->getZExtValue();
14367       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14368     }
14369   }
14370   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14371                      OrigSToV.getOperand(0));
14372 }
14373 
14374 // On little endian subtargets, combine shuffles such as:
14375 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14376 // into:
14377 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14378 // because the latter can be matched to a single instruction merge.
14379 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14380 // to put the value into element zero. Adjust the shuffle mask so that the
14381 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
14382 // On big endian targets, this is still useful for SCALAR_TO_VECTOR
14383 // nodes with elements smaller than doubleword because all the ways
14384 // of getting scalar data into a vector register put the value in the
14385 // rightmost element of the left half of the vector.
14386 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14387                                                 SelectionDAG &DAG) const {
14388   SDValue LHS = SVN->getOperand(0);
14389   SDValue RHS = SVN->getOperand(1);
14390   auto Mask = SVN->getMask();
14391   int NumElts = LHS.getValueType().getVectorNumElements();
14392   SDValue Res(SVN, 0);
14393   SDLoc dl(SVN);
14394   bool IsLittleEndian = Subtarget.isLittleEndian();
14395 
14396   // On little endian targets, do these combines on all VSX targets since
14397   // canonical shuffles match efficient permutes. On big endian targets,
14398   // this is only useful for targets with direct moves.
14399   if (!Subtarget.hasDirectMove() && !(IsLittleEndian && Subtarget.hasVSX()))
14400     return Res;
14401 
14402   // If this is not a shuffle of a shuffle and the first element comes from
14403   // the second vector, canonicalize to the commuted form. This will make it
14404   // more likely to match one of the single instruction patterns.
14405   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14406       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14407     std::swap(LHS, RHS);
14408     Res = DAG.getCommutedVectorShuffle(*SVN);
14409     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14410   }
14411 
14412   // Adjust the shuffle mask if either input vector comes from a
14413   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14414   // form (to prevent the need for a swap).
14415   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14416   SDValue SToVLHS = isScalarToVec(LHS);
14417   SDValue SToVRHS = isScalarToVec(RHS);
14418   if (SToVLHS || SToVRHS) {
14419     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14420                             : SToVRHS.getValueType().getVectorNumElements();
14421     int NumEltsOut = ShuffV.size();
14422     unsigned InElemSizeInBits =
14423         SToVLHS ? SToVLHS.getValueType().getScalarSizeInBits()
14424                 : SToVRHS.getValueType().getScalarSizeInBits();
14425     unsigned OutElemSizeInBits = SToVLHS
14426                                      ? LHS.getValueType().getScalarSizeInBits()
14427                                      : RHS.getValueType().getScalarSizeInBits();
14428 
14429     // The width of the "valid lane" (i.e. the lane that contains the value that
14430     // is vectorized) needs to be expressed in terms of the number of elements
14431     // of the shuffle. It is thereby the ratio of the values before and after
14432     // any bitcast.
14433     unsigned ValidLaneWidth = InElemSizeInBits / OutElemSizeInBits;
14434 
14435     // Initially assume that neither input is permuted. These will be adjusted
14436     // accordingly if either input is.
14437     int LHSMaxIdx = -1;
14438     int RHSMinIdx = -1;
14439     int RHSMaxIdx = -1;
14440     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14441 
14442     // Get the permuted scalar to vector nodes for the source(s) that come from
14443     // ISD::SCALAR_TO_VECTOR.
14444     // On big endian systems, this only makes sense for element sizes smaller
14445     // than 64 bits since for 64-bit elements, all instructions already put
14446     // the value into element zero.
14447     if (SToVLHS) {
14448       if (!IsLittleEndian && InElemSizeInBits >= 64)
14449         return Res;
14450       // Set up the values for the shuffle vector fixup.
14451       LHSMaxIdx = NumEltsOut / NumEltsIn;
14452       SToVLHS = getSToVPermuted(SToVLHS, DAG, Subtarget);
14453       if (SToVLHS.getValueType() != LHS.getValueType())
14454         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14455       LHS = SToVLHS;
14456     }
14457     if (SToVRHS) {
14458       if (!IsLittleEndian && InElemSizeInBits >= 64)
14459         return Res;
14460       RHSMinIdx = NumEltsOut;
14461       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14462       SToVRHS = getSToVPermuted(SToVRHS, DAG, Subtarget);
14463       if (SToVRHS.getValueType() != RHS.getValueType())
14464         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14465       RHS = SToVRHS;
14466     }
14467 
14468     // Fix up the shuffle mask to reflect where the desired element actually is.
14469     // The minimum and maximum indices that correspond to element zero for both
14470     // the LHS and RHS are computed and will control which shuffle mask entries
14471     // are to be changed. For example, if the RHS is permuted, any shuffle mask
14472     // entries in the range [RHSMinIdx,RHSMaxIdx) will be adjusted.
14473     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14474                                     HalfVec, ValidLaneWidth, Subtarget);
14475     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14476 
14477     // We may have simplified away the shuffle. We won't be able to do anything
14478     // further with it here.
14479     if (!isa<ShuffleVectorSDNode>(Res))
14480       return Res;
14481     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14482   }
14483 
14484   SDValue TheSplat = IsLittleEndian ? RHS : LHS;
14485   // The common case after we commuted the shuffle is that the RHS is a splat
14486   // and we have elements coming in from the splat at indices that are not
14487   // conducive to using a merge.
14488   // Example:
14489   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14490   if (!isSplatBV(TheSplat))
14491     return Res;
14492 
14493   // We are looking for a mask such that all even elements are from
14494   // one vector and all odd elements from the other.
14495   if (!isAlternatingShuffMask(Mask, NumElts))
14496     return Res;
14497 
14498   // Adjust the mask so we are pulling in the same index from the splat
14499   // as the index from the interesting vector in consecutive elements.
14500   if (IsLittleEndian) {
14501     // Example (even elements from first vector):
14502     // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14503     if (Mask[0] < NumElts)
14504       for (int i = 1, e = Mask.size(); i < e; i += 2)
14505         ShuffV[i] = (ShuffV[i - 1] + NumElts);
14506     // Example (odd elements from first vector):
14507     // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14508     else
14509       for (int i = 0, e = Mask.size(); i < e; i += 2)
14510         ShuffV[i] = (ShuffV[i + 1] + NumElts);
14511   } else {
14512     // Example (even elements from first vector):
14513     // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> <zero>, t1
14514     if (Mask[0] < NumElts)
14515       for (int i = 0, e = Mask.size(); i < e; i += 2)
14516         ShuffV[i] = ShuffV[i + 1] - NumElts;
14517     // Example (odd elements from first vector):
14518     // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> <zero>, t1
14519     else
14520       for (int i = 1, e = Mask.size(); i < e; i += 2)
14521         ShuffV[i] = ShuffV[i - 1] - NumElts;
14522   }
14523 
14524   // If the RHS has undefs, we need to remove them since we may have created
14525   // a shuffle that adds those instead of the splat value.
14526   SDValue SplatVal =
14527       cast<BuildVectorSDNode>(TheSplat.getNode())->getSplatValue();
14528   TheSplat = DAG.getSplatBuildVector(TheSplat.getValueType(), dl, SplatVal);
14529 
14530   if (IsLittleEndian)
14531     RHS = TheSplat;
14532   else
14533     LHS = TheSplat;
14534   return DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14535 }
14536 
14537 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14538                                                 LSBaseSDNode *LSBase,
14539                                                 DAGCombinerInfo &DCI) const {
14540   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14541         "Not a reverse memop pattern!");
14542 
14543   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14544     auto Mask = SVN->getMask();
14545     int i = 0;
14546     auto I = Mask.rbegin();
14547     auto E = Mask.rend();
14548 
14549     for (; I != E; ++I) {
14550       if (*I != i)
14551         return false;
14552       i++;
14553     }
14554     return true;
14555   };
14556 
14557   SelectionDAG &DAG = DCI.DAG;
14558   EVT VT = SVN->getValueType(0);
14559 
14560   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14561     return SDValue();
14562 
14563   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14564   // See comment in PPCVSXSwapRemoval.cpp.
14565   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14566   if (!Subtarget.hasP9Vector())
14567     return SDValue();
14568 
14569   if(!IsElementReverse(SVN))
14570     return SDValue();
14571 
14572   if (LSBase->getOpcode() == ISD::LOAD) {
14573     // If the load has more than one user except the shufflevector instruction,
14574     // it is not profitable to replace the shufflevector with a reverse load.
14575     if (!LSBase->hasOneUse())
14576       return SDValue();
14577 
14578     SDLoc dl(SVN);
14579     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14580     return DAG.getMemIntrinsicNode(
14581         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14582         LSBase->getMemoryVT(), LSBase->getMemOperand());
14583   }
14584 
14585   if (LSBase->getOpcode() == ISD::STORE) {
14586     // If there are other uses of the shuffle, the swap cannot be avoided.
14587     // Forcing the use of an X-Form (since swapped stores only have
14588     // X-Forms) without removing the swap is unprofitable.
14589     if (!SVN->hasOneUse())
14590       return SDValue();
14591 
14592     SDLoc dl(LSBase);
14593     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14594                           LSBase->getBasePtr()};
14595     return DAG.getMemIntrinsicNode(
14596         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14597         LSBase->getMemoryVT(), LSBase->getMemOperand());
14598   }
14599 
14600   llvm_unreachable("Expected a load or store node here");
14601 }
14602 
14603 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14604                                              DAGCombinerInfo &DCI) const {
14605   SelectionDAG &DAG = DCI.DAG;
14606   SDLoc dl(N);
14607   switch (N->getOpcode()) {
14608   default: break;
14609   case ISD::ADD:
14610     return combineADD(N, DCI);
14611   case ISD::SHL:
14612     return combineSHL(N, DCI);
14613   case ISD::SRA:
14614     return combineSRA(N, DCI);
14615   case ISD::SRL:
14616     return combineSRL(N, DCI);
14617   case ISD::MUL:
14618     return combineMUL(N, DCI);
14619   case ISD::FMA:
14620   case PPCISD::FNMSUB:
14621     return combineFMALike(N, DCI);
14622   case PPCISD::SHL:
14623     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14624         return N->getOperand(0);
14625     break;
14626   case PPCISD::SRL:
14627     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14628         return N->getOperand(0);
14629     break;
14630   case PPCISD::SRA:
14631     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14632       if (C->isNullValue() ||   //  0 >>s V -> 0.
14633           C->isAllOnesValue())    // -1 >>s V -> -1.
14634         return N->getOperand(0);
14635     }
14636     break;
14637   case ISD::SIGN_EXTEND:
14638   case ISD::ZERO_EXTEND:
14639   case ISD::ANY_EXTEND:
14640     return DAGCombineExtBoolTrunc(N, DCI);
14641   case ISD::TRUNCATE:
14642     return combineTRUNCATE(N, DCI);
14643   case ISD::SETCC:
14644     if (SDValue CSCC = combineSetCC(N, DCI))
14645       return CSCC;
14646     LLVM_FALLTHROUGH;
14647   case ISD::SELECT_CC:
14648     return DAGCombineTruncBoolExt(N, DCI);
14649   case ISD::SINT_TO_FP:
14650   case ISD::UINT_TO_FP:
14651     return combineFPToIntToFP(N, DCI);
14652   case ISD::VECTOR_SHUFFLE:
14653     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14654       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14655       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14656     }
14657     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14658   case ISD::STORE: {
14659 
14660     EVT Op1VT = N->getOperand(1).getValueType();
14661     unsigned Opcode = N->getOperand(1).getOpcode();
14662 
14663     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14664       SDValue Val= combineStoreFPToInt(N, DCI);
14665       if (Val)
14666         return Val;
14667     }
14668 
14669     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14670       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14671       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14672       if (Val)
14673         return Val;
14674     }
14675 
14676     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14677     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14678         N->getOperand(1).getNode()->hasOneUse() &&
14679         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14680          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14681 
14682       // STBRX can only handle simple types and it makes no sense to store less
14683       // two bytes in byte-reversed order.
14684       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14685       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14686         break;
14687 
14688       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14689       // Do an any-extend to 32-bits if this is a half-word input.
14690       if (BSwapOp.getValueType() == MVT::i16)
14691         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14692 
14693       // If the type of BSWAP operand is wider than stored memory width
14694       // it need to be shifted to the right side before STBRX.
14695       if (Op1VT.bitsGT(mVT)) {
14696         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14697         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14698                               DAG.getConstant(Shift, dl, MVT::i32));
14699         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14700         if (Op1VT == MVT::i64)
14701           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14702       }
14703 
14704       SDValue Ops[] = {
14705         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14706       };
14707       return
14708         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14709                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14710                                 cast<StoreSDNode>(N)->getMemOperand());
14711     }
14712 
14713     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14714     // So it can increase the chance of CSE constant construction.
14715     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14716         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14717       // Need to sign-extended to 64-bits to handle negative values.
14718       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14719       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14720                                     MemVT.getSizeInBits());
14721       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14722 
14723       // DAG.getTruncStore() can't be used here because it doesn't accept
14724       // the general (base + offset) addressing mode.
14725       // So we use UpdateNodeOperands and setTruncatingStore instead.
14726       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14727                              N->getOperand(3));
14728       cast<StoreSDNode>(N)->setTruncatingStore(true);
14729       return SDValue(N, 0);
14730     }
14731 
14732     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14733     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14734     if (Op1VT.isSimple()) {
14735       MVT StoreVT = Op1VT.getSimpleVT();
14736       if (Subtarget.needsSwapsForVSXMemOps() &&
14737           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14738            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14739         return expandVSXStoreForLE(N, DCI);
14740     }
14741     break;
14742   }
14743   case ISD::LOAD: {
14744     LoadSDNode *LD = cast<LoadSDNode>(N);
14745     EVT VT = LD->getValueType(0);
14746 
14747     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14748     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14749     if (VT.isSimple()) {
14750       MVT LoadVT = VT.getSimpleVT();
14751       if (Subtarget.needsSwapsForVSXMemOps() &&
14752           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14753            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14754         return expandVSXLoadForLE(N, DCI);
14755     }
14756 
14757     // We sometimes end up with a 64-bit integer load, from which we extract
14758     // two single-precision floating-point numbers. This happens with
14759     // std::complex<float>, and other similar structures, because of the way we
14760     // canonicalize structure copies. However, if we lack direct moves,
14761     // then the final bitcasts from the extracted integer values to the
14762     // floating-point numbers turn into store/load pairs. Even with direct moves,
14763     // just loading the two floating-point numbers is likely better.
14764     auto ReplaceTwoFloatLoad = [&]() {
14765       if (VT != MVT::i64)
14766         return false;
14767 
14768       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14769           LD->isVolatile())
14770         return false;
14771 
14772       //  We're looking for a sequence like this:
14773       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14774       //      t16: i64 = srl t13, Constant:i32<32>
14775       //    t17: i32 = truncate t16
14776       //  t18: f32 = bitcast t17
14777       //    t19: i32 = truncate t13
14778       //  t20: f32 = bitcast t19
14779 
14780       if (!LD->hasNUsesOfValue(2, 0))
14781         return false;
14782 
14783       auto UI = LD->use_begin();
14784       while (UI.getUse().getResNo() != 0) ++UI;
14785       SDNode *Trunc = *UI++;
14786       while (UI.getUse().getResNo() != 0) ++UI;
14787       SDNode *RightShift = *UI;
14788       if (Trunc->getOpcode() != ISD::TRUNCATE)
14789         std::swap(Trunc, RightShift);
14790 
14791       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14792           Trunc->getValueType(0) != MVT::i32 ||
14793           !Trunc->hasOneUse())
14794         return false;
14795       if (RightShift->getOpcode() != ISD::SRL ||
14796           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14797           RightShift->getConstantOperandVal(1) != 32 ||
14798           !RightShift->hasOneUse())
14799         return false;
14800 
14801       SDNode *Trunc2 = *RightShift->use_begin();
14802       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14803           Trunc2->getValueType(0) != MVT::i32 ||
14804           !Trunc2->hasOneUse())
14805         return false;
14806 
14807       SDNode *Bitcast = *Trunc->use_begin();
14808       SDNode *Bitcast2 = *Trunc2->use_begin();
14809 
14810       if (Bitcast->getOpcode() != ISD::BITCAST ||
14811           Bitcast->getValueType(0) != MVT::f32)
14812         return false;
14813       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14814           Bitcast2->getValueType(0) != MVT::f32)
14815         return false;
14816 
14817       if (Subtarget.isLittleEndian())
14818         std::swap(Bitcast, Bitcast2);
14819 
14820       // Bitcast has the second float (in memory-layout order) and Bitcast2
14821       // has the first one.
14822 
14823       SDValue BasePtr = LD->getBasePtr();
14824       if (LD->isIndexed()) {
14825         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14826                "Non-pre-inc AM on PPC?");
14827         BasePtr =
14828           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14829                       LD->getOffset());
14830       }
14831 
14832       auto MMOFlags =
14833           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14834       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14835                                       LD->getPointerInfo(), LD->getAlignment(),
14836                                       MMOFlags, LD->getAAInfo());
14837       SDValue AddPtr =
14838         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14839                     BasePtr, DAG.getIntPtrConstant(4, dl));
14840       SDValue FloatLoad2 = DAG.getLoad(
14841           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14842           LD->getPointerInfo().getWithOffset(4),
14843           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14844 
14845       if (LD->isIndexed()) {
14846         // Note that DAGCombine should re-form any pre-increment load(s) from
14847         // what is produced here if that makes sense.
14848         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14849       }
14850 
14851       DCI.CombineTo(Bitcast2, FloatLoad);
14852       DCI.CombineTo(Bitcast, FloatLoad2);
14853 
14854       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14855                                     SDValue(FloatLoad2.getNode(), 1));
14856       return true;
14857     };
14858 
14859     if (ReplaceTwoFloatLoad())
14860       return SDValue(N, 0);
14861 
14862     EVT MemVT = LD->getMemoryVT();
14863     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
14864     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
14865     if (LD->isUnindexed() && VT.isVector() &&
14866         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
14867           // P8 and later hardware should just use LOAD.
14868           !Subtarget.hasP8Vector() &&
14869           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
14870            VT == MVT::v4f32))) &&
14871         LD->getAlign() < ABIAlignment) {
14872       // This is a type-legal unaligned Altivec load.
14873       SDValue Chain = LD->getChain();
14874       SDValue Ptr = LD->getBasePtr();
14875       bool isLittleEndian = Subtarget.isLittleEndian();
14876 
14877       // This implements the loading of unaligned vectors as described in
14878       // the venerable Apple Velocity Engine overview. Specifically:
14879       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
14880       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
14881       //
14882       // The general idea is to expand a sequence of one or more unaligned
14883       // loads into an alignment-based permutation-control instruction (lvsl
14884       // or lvsr), a series of regular vector loads (which always truncate
14885       // their input address to an aligned address), and a series of
14886       // permutations.  The results of these permutations are the requested
14887       // loaded values.  The trick is that the last "extra" load is not taken
14888       // from the address you might suspect (sizeof(vector) bytes after the
14889       // last requested load), but rather sizeof(vector) - 1 bytes after the
14890       // last requested vector. The point of this is to avoid a page fault if
14891       // the base address happened to be aligned. This works because if the
14892       // base address is aligned, then adding less than a full vector length
14893       // will cause the last vector in the sequence to be (re)loaded.
14894       // Otherwise, the next vector will be fetched as you might suspect was
14895       // necessary.
14896 
14897       // We might be able to reuse the permutation generation from
14898       // a different base address offset from this one by an aligned amount.
14899       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14900       // optimization later.
14901       Intrinsic::ID Intr, IntrLD, IntrPerm;
14902       MVT PermCntlTy, PermTy, LDTy;
14903       Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14904                             : Intrinsic::ppc_altivec_lvsl;
14905       IntrLD = Intrinsic::ppc_altivec_lvx;
14906       IntrPerm = Intrinsic::ppc_altivec_vperm;
14907       PermCntlTy = MVT::v16i8;
14908       PermTy = MVT::v4i32;
14909       LDTy = MVT::v4i32;
14910 
14911       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14912 
14913       // Create the new MMO for the new base load. It is like the original MMO,
14914       // but represents an area in memory almost twice the vector size centered
14915       // on the original address. If the address is unaligned, we might start
14916       // reading up to (sizeof(vector)-1) bytes below the address of the
14917       // original unaligned load.
14918       MachineFunction &MF = DAG.getMachineFunction();
14919       MachineMemOperand *BaseMMO =
14920         MF.getMachineMemOperand(LD->getMemOperand(),
14921                                 -(long)MemVT.getStoreSize()+1,
14922                                 2*MemVT.getStoreSize()-1);
14923 
14924       // Create the new base load.
14925       SDValue LDXIntID =
14926           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14927       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14928       SDValue BaseLoad =
14929         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14930                                 DAG.getVTList(PermTy, MVT::Other),
14931                                 BaseLoadOps, LDTy, BaseMMO);
14932 
14933       // Note that the value of IncOffset (which is provided to the next
14934       // load's pointer info offset value, and thus used to calculate the
14935       // alignment), and the value of IncValue (which is actually used to
14936       // increment the pointer value) are different! This is because we
14937       // require the next load to appear to be aligned, even though it
14938       // is actually offset from the base pointer by a lesser amount.
14939       int IncOffset = VT.getSizeInBits() / 8;
14940       int IncValue = IncOffset;
14941 
14942       // Walk (both up and down) the chain looking for another load at the real
14943       // (aligned) offset (the alignment of the other load does not matter in
14944       // this case). If found, then do not use the offset reduction trick, as
14945       // that will prevent the loads from being later combined (as they would
14946       // otherwise be duplicates).
14947       if (!findConsecutiveLoad(LD, DAG))
14948         --IncValue;
14949 
14950       SDValue Increment =
14951           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14952       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14953 
14954       MachineMemOperand *ExtraMMO =
14955         MF.getMachineMemOperand(LD->getMemOperand(),
14956                                 1, 2*MemVT.getStoreSize()-1);
14957       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14958       SDValue ExtraLoad =
14959         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14960                                 DAG.getVTList(PermTy, MVT::Other),
14961                                 ExtraLoadOps, LDTy, ExtraMMO);
14962 
14963       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14964         BaseLoad.getValue(1), ExtraLoad.getValue(1));
14965 
14966       // Because vperm has a big-endian bias, we must reverse the order
14967       // of the input vectors and complement the permute control vector
14968       // when generating little endian code.  We have already handled the
14969       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14970       // and ExtraLoad here.
14971       SDValue Perm;
14972       if (isLittleEndian)
14973         Perm = BuildIntrinsicOp(IntrPerm,
14974                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14975       else
14976         Perm = BuildIntrinsicOp(IntrPerm,
14977                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14978 
14979       if (VT != PermTy)
14980         Perm = Subtarget.hasAltivec()
14981                    ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
14982                    : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
14983                                  DAG.getTargetConstant(1, dl, MVT::i64));
14984                                // second argument is 1 because this rounding
14985                                // is always exact.
14986 
14987       // The output of the permutation is our loaded result, the TokenFactor is
14988       // our new chain.
14989       DCI.CombineTo(N, Perm, TF);
14990       return SDValue(N, 0);
14991     }
14992     }
14993     break;
14994     case ISD::INTRINSIC_WO_CHAIN: {
14995       bool isLittleEndian = Subtarget.isLittleEndian();
14996       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14997       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14998                                            : Intrinsic::ppc_altivec_lvsl);
14999       if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
15000         SDValue Add = N->getOperand(1);
15001 
15002         int Bits = 4 /* 16 byte alignment */;
15003 
15004         if (DAG.MaskedValueIsZero(Add->getOperand(1),
15005                                   APInt::getAllOnesValue(Bits /* alignment */)
15006                                       .zext(Add.getScalarValueSizeInBits()))) {
15007           SDNode *BasePtr = Add->getOperand(0).getNode();
15008           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15009                                     UE = BasePtr->use_end();
15010                UI != UE; ++UI) {
15011             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15012                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
15013                     IID) {
15014               // We've found another LVSL/LVSR, and this address is an aligned
15015               // multiple of that one. The results will be the same, so use the
15016               // one we've just found instead.
15017 
15018               return SDValue(*UI, 0);
15019             }
15020           }
15021         }
15022 
15023         if (isa<ConstantSDNode>(Add->getOperand(1))) {
15024           SDNode *BasePtr = Add->getOperand(0).getNode();
15025           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15026                UE = BasePtr->use_end(); UI != UE; ++UI) {
15027             if (UI->getOpcode() == ISD::ADD &&
15028                 isa<ConstantSDNode>(UI->getOperand(1)) &&
15029                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
15030                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
15031                 (1ULL << Bits) == 0) {
15032               SDNode *OtherAdd = *UI;
15033               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
15034                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
15035                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15036                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
15037                   return SDValue(*VI, 0);
15038                 }
15039               }
15040             }
15041           }
15042         }
15043       }
15044 
15045       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
15046       // Expose the vabsduw/h/b opportunity for down stream
15047       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
15048           (IID == Intrinsic::ppc_altivec_vmaxsw ||
15049            IID == Intrinsic::ppc_altivec_vmaxsh ||
15050            IID == Intrinsic::ppc_altivec_vmaxsb)) {
15051         SDValue V1 = N->getOperand(1);
15052         SDValue V2 = N->getOperand(2);
15053         if ((V1.getSimpleValueType() == MVT::v4i32 ||
15054              V1.getSimpleValueType() == MVT::v8i16 ||
15055              V1.getSimpleValueType() == MVT::v16i8) &&
15056             V1.getSimpleValueType() == V2.getSimpleValueType()) {
15057           // (0-a, a)
15058           if (V1.getOpcode() == ISD::SUB &&
15059               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
15060               V1.getOperand(1) == V2) {
15061             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
15062           }
15063           // (a, 0-a)
15064           if (V2.getOpcode() == ISD::SUB &&
15065               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
15066               V2.getOperand(1) == V1) {
15067             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15068           }
15069           // (x-y, y-x)
15070           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
15071               V1.getOperand(0) == V2.getOperand(1) &&
15072               V1.getOperand(1) == V2.getOperand(0)) {
15073             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15074           }
15075         }
15076       }
15077     }
15078 
15079     break;
15080   case ISD::INTRINSIC_W_CHAIN:
15081     // For little endian, VSX loads require generating lxvd2x/xxswapd.
15082     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
15083     if (Subtarget.needsSwapsForVSXMemOps()) {
15084       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15085       default:
15086         break;
15087       case Intrinsic::ppc_vsx_lxvw4x:
15088       case Intrinsic::ppc_vsx_lxvd2x:
15089         return expandVSXLoadForLE(N, DCI);
15090       }
15091     }
15092     break;
15093   case ISD::INTRINSIC_VOID:
15094     // For little endian, VSX stores require generating xxswapd/stxvd2x.
15095     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
15096     if (Subtarget.needsSwapsForVSXMemOps()) {
15097       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15098       default:
15099         break;
15100       case Intrinsic::ppc_vsx_stxvw4x:
15101       case Intrinsic::ppc_vsx_stxvd2x:
15102         return expandVSXStoreForLE(N, DCI);
15103       }
15104     }
15105     break;
15106   case ISD::BSWAP:
15107     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
15108     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
15109         N->getOperand(0).hasOneUse() &&
15110         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
15111          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
15112           N->getValueType(0) == MVT::i64))) {
15113       SDValue Load = N->getOperand(0);
15114       LoadSDNode *LD = cast<LoadSDNode>(Load);
15115       // Create the byte-swapping load.
15116       SDValue Ops[] = {
15117         LD->getChain(),    // Chain
15118         LD->getBasePtr(),  // Ptr
15119         DAG.getValueType(N->getValueType(0)) // VT
15120       };
15121       SDValue BSLoad =
15122         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
15123                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
15124                                               MVT::i64 : MVT::i32, MVT::Other),
15125                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
15126 
15127       // If this is an i16 load, insert the truncate.
15128       SDValue ResVal = BSLoad;
15129       if (N->getValueType(0) == MVT::i16)
15130         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
15131 
15132       // First, combine the bswap away.  This makes the value produced by the
15133       // load dead.
15134       DCI.CombineTo(N, ResVal);
15135 
15136       // Next, combine the load away, we give it a bogus result value but a real
15137       // chain result.  The result value is dead because the bswap is dead.
15138       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
15139 
15140       // Return N so it doesn't get rechecked!
15141       return SDValue(N, 0);
15142     }
15143     break;
15144   case PPCISD::VCMP:
15145     // If a VCMP_rec node already exists with exactly the same operands as this
15146     // node, use its result instead of this node (VCMP_rec computes both a CR6
15147     // and a normal output).
15148     //
15149     if (!N->getOperand(0).hasOneUse() &&
15150         !N->getOperand(1).hasOneUse() &&
15151         !N->getOperand(2).hasOneUse()) {
15152 
15153       // Scan all of the users of the LHS, looking for VCMP_rec's that match.
15154       SDNode *VCMPrecNode = nullptr;
15155 
15156       SDNode *LHSN = N->getOperand(0).getNode();
15157       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
15158            UI != E; ++UI)
15159         if (UI->getOpcode() == PPCISD::VCMP_rec &&
15160             UI->getOperand(1) == N->getOperand(1) &&
15161             UI->getOperand(2) == N->getOperand(2) &&
15162             UI->getOperand(0) == N->getOperand(0)) {
15163           VCMPrecNode = *UI;
15164           break;
15165         }
15166 
15167       // If there is no VCMP_rec node, or if the flag value has a single use,
15168       // don't transform this.
15169       if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1))
15170         break;
15171 
15172       // Look at the (necessarily single) use of the flag value.  If it has a
15173       // chain, this transformation is more complex.  Note that multiple things
15174       // could use the value result, which we should ignore.
15175       SDNode *FlagUser = nullptr;
15176       for (SDNode::use_iterator UI = VCMPrecNode->use_begin();
15177            FlagUser == nullptr; ++UI) {
15178         assert(UI != VCMPrecNode->use_end() && "Didn't find user!");
15179         SDNode *User = *UI;
15180         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
15181           if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) {
15182             FlagUser = User;
15183             break;
15184           }
15185         }
15186       }
15187 
15188       // If the user is a MFOCRF instruction, we know this is safe.
15189       // Otherwise we give up for right now.
15190       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
15191         return SDValue(VCMPrecNode, 0);
15192     }
15193     break;
15194   case ISD::BRCOND: {
15195     SDValue Cond = N->getOperand(1);
15196     SDValue Target = N->getOperand(2);
15197 
15198     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15199         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
15200           Intrinsic::loop_decrement) {
15201 
15202       // We now need to make the intrinsic dead (it cannot be instruction
15203       // selected).
15204       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
15205       assert(Cond.getNode()->hasOneUse() &&
15206              "Counter decrement has more than one use");
15207 
15208       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15209                          N->getOperand(0), Target);
15210     }
15211   }
15212   break;
15213   case ISD::BR_CC: {
15214     // If this is a branch on an altivec predicate comparison, lower this so
15215     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
15216     // lowering is done pre-legalize, because the legalizer lowers the predicate
15217     // compare down to code that is difficult to reassemble.
15218     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15219     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15220 
15221     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15222     // value. If so, pass-through the AND to get to the intrinsic.
15223     if (LHS.getOpcode() == ISD::AND &&
15224         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15225         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15226           Intrinsic::loop_decrement &&
15227         isa<ConstantSDNode>(LHS.getOperand(1)) &&
15228         !isNullConstant(LHS.getOperand(1)))
15229       LHS = LHS.getOperand(0);
15230 
15231     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15232         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15233           Intrinsic::loop_decrement &&
15234         isa<ConstantSDNode>(RHS)) {
15235       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
15236              "Counter decrement comparison is not EQ or NE");
15237 
15238       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15239       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15240                     (CC == ISD::SETNE && !Val);
15241 
15242       // We now need to make the intrinsic dead (it cannot be instruction
15243       // selected).
15244       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15245       assert(LHS.getNode()->hasOneUse() &&
15246              "Counter decrement has more than one use");
15247 
15248       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15249                          N->getOperand(0), N->getOperand(4));
15250     }
15251 
15252     int CompareOpc;
15253     bool isDot;
15254 
15255     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15256         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15257         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15258       assert(isDot && "Can't compare against a vector result!");
15259 
15260       // If this is a comparison against something other than 0/1, then we know
15261       // that the condition is never/always true.
15262       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15263       if (Val != 0 && Val != 1) {
15264         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
15265           return N->getOperand(0);
15266         // Always !=, turn it into an unconditional branch.
15267         return DAG.getNode(ISD::BR, dl, MVT::Other,
15268                            N->getOperand(0), N->getOperand(4));
15269       }
15270 
15271       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15272 
15273       // Create the PPCISD altivec 'dot' comparison node.
15274       SDValue Ops[] = {
15275         LHS.getOperand(2),  // LHS of compare
15276         LHS.getOperand(3),  // RHS of compare
15277         DAG.getConstant(CompareOpc, dl, MVT::i32)
15278       };
15279       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15280       SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
15281 
15282       // Unpack the result based on how the target uses it.
15283       PPC::Predicate CompOpc;
15284       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15285       default:  // Can't happen, don't crash on invalid number though.
15286       case 0:   // Branch on the value of the EQ bit of CR6.
15287         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15288         break;
15289       case 1:   // Branch on the inverted value of the EQ bit of CR6.
15290         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15291         break;
15292       case 2:   // Branch on the value of the LT bit of CR6.
15293         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15294         break;
15295       case 3:   // Branch on the inverted value of the LT bit of CR6.
15296         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15297         break;
15298       }
15299 
15300       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15301                          DAG.getConstant(CompOpc, dl, MVT::i32),
15302                          DAG.getRegister(PPC::CR6, MVT::i32),
15303                          N->getOperand(4), CompNode.getValue(1));
15304     }
15305     break;
15306   }
15307   case ISD::BUILD_VECTOR:
15308     return DAGCombineBuildVector(N, DCI);
15309   case ISD::ABS:
15310     return combineABS(N, DCI);
15311   case ISD::VSELECT:
15312     return combineVSelect(N, DCI);
15313   }
15314 
15315   return SDValue();
15316 }
15317 
15318 SDValue
15319 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15320                                  SelectionDAG &DAG,
15321                                  SmallVectorImpl<SDNode *> &Created) const {
15322   // fold (sdiv X, pow2)
15323   EVT VT = N->getValueType(0);
15324   if (VT == MVT::i64 && !Subtarget.isPPC64())
15325     return SDValue();
15326   if ((VT != MVT::i32 && VT != MVT::i64) ||
15327       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15328     return SDValue();
15329 
15330   SDLoc DL(N);
15331   SDValue N0 = N->getOperand(0);
15332 
15333   bool IsNegPow2 = (-Divisor).isPowerOf2();
15334   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15335   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15336 
15337   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15338   Created.push_back(Op.getNode());
15339 
15340   if (IsNegPow2) {
15341     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15342     Created.push_back(Op.getNode());
15343   }
15344 
15345   return Op;
15346 }
15347 
15348 //===----------------------------------------------------------------------===//
15349 // Inline Assembly Support
15350 //===----------------------------------------------------------------------===//
15351 
15352 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15353                                                       KnownBits &Known,
15354                                                       const APInt &DemandedElts,
15355                                                       const SelectionDAG &DAG,
15356                                                       unsigned Depth) const {
15357   Known.resetAll();
15358   switch (Op.getOpcode()) {
15359   default: break;
15360   case PPCISD::LBRX: {
15361     // lhbrx is known to have the top bits cleared out.
15362     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15363       Known.Zero = 0xFFFF0000;
15364     break;
15365   }
15366   case ISD::INTRINSIC_WO_CHAIN: {
15367     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15368     default: break;
15369     case Intrinsic::ppc_altivec_vcmpbfp_p:
15370     case Intrinsic::ppc_altivec_vcmpeqfp_p:
15371     case Intrinsic::ppc_altivec_vcmpequb_p:
15372     case Intrinsic::ppc_altivec_vcmpequh_p:
15373     case Intrinsic::ppc_altivec_vcmpequw_p:
15374     case Intrinsic::ppc_altivec_vcmpequd_p:
15375     case Intrinsic::ppc_altivec_vcmpequq_p:
15376     case Intrinsic::ppc_altivec_vcmpgefp_p:
15377     case Intrinsic::ppc_altivec_vcmpgtfp_p:
15378     case Intrinsic::ppc_altivec_vcmpgtsb_p:
15379     case Intrinsic::ppc_altivec_vcmpgtsh_p:
15380     case Intrinsic::ppc_altivec_vcmpgtsw_p:
15381     case Intrinsic::ppc_altivec_vcmpgtsd_p:
15382     case Intrinsic::ppc_altivec_vcmpgtsq_p:
15383     case Intrinsic::ppc_altivec_vcmpgtub_p:
15384     case Intrinsic::ppc_altivec_vcmpgtuh_p:
15385     case Intrinsic::ppc_altivec_vcmpgtuw_p:
15386     case Intrinsic::ppc_altivec_vcmpgtud_p:
15387     case Intrinsic::ppc_altivec_vcmpgtuq_p:
15388       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
15389       break;
15390     }
15391   }
15392   }
15393 }
15394 
15395 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15396   switch (Subtarget.getCPUDirective()) {
15397   default: break;
15398   case PPC::DIR_970:
15399   case PPC::DIR_PWR4:
15400   case PPC::DIR_PWR5:
15401   case PPC::DIR_PWR5X:
15402   case PPC::DIR_PWR6:
15403   case PPC::DIR_PWR6X:
15404   case PPC::DIR_PWR7:
15405   case PPC::DIR_PWR8:
15406   case PPC::DIR_PWR9:
15407   case PPC::DIR_PWR10:
15408   case PPC::DIR_PWR_FUTURE: {
15409     if (!ML)
15410       break;
15411 
15412     if (!DisableInnermostLoopAlign32) {
15413       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15414       // so that we can decrease cache misses and branch-prediction misses.
15415       // Actual alignment of the loop will depend on the hotness check and other
15416       // logic in alignBlocks.
15417       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15418         return Align(32);
15419     }
15420 
15421     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15422 
15423     // For small loops (between 5 and 8 instructions), align to a 32-byte
15424     // boundary so that the entire loop fits in one instruction-cache line.
15425     uint64_t LoopSize = 0;
15426     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15427       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15428         LoopSize += TII->getInstSizeInBytes(*J);
15429         if (LoopSize > 32)
15430           break;
15431       }
15432 
15433     if (LoopSize > 16 && LoopSize <= 32)
15434       return Align(32);
15435 
15436     break;
15437   }
15438   }
15439 
15440   return TargetLowering::getPrefLoopAlignment(ML);
15441 }
15442 
15443 /// getConstraintType - Given a constraint, return the type of
15444 /// constraint it is for this target.
15445 PPCTargetLowering::ConstraintType
15446 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15447   if (Constraint.size() == 1) {
15448     switch (Constraint[0]) {
15449     default: break;
15450     case 'b':
15451     case 'r':
15452     case 'f':
15453     case 'd':
15454     case 'v':
15455     case 'y':
15456       return C_RegisterClass;
15457     case 'Z':
15458       // FIXME: While Z does indicate a memory constraint, it specifically
15459       // indicates an r+r address (used in conjunction with the 'y' modifier
15460       // in the replacement string). Currently, we're forcing the base
15461       // register to be r0 in the asm printer (which is interpreted as zero)
15462       // and forming the complete address in the second register. This is
15463       // suboptimal.
15464       return C_Memory;
15465     }
15466   } else if (Constraint == "wc") { // individual CR bits.
15467     return C_RegisterClass;
15468   } else if (Constraint == "wa" || Constraint == "wd" ||
15469              Constraint == "wf" || Constraint == "ws" ||
15470              Constraint == "wi" || Constraint == "ww") {
15471     return C_RegisterClass; // VSX registers.
15472   }
15473   return TargetLowering::getConstraintType(Constraint);
15474 }
15475 
15476 /// Examine constraint type and operand type and determine a weight value.
15477 /// This object must already have been set up with the operand type
15478 /// and the current alternative constraint selected.
15479 TargetLowering::ConstraintWeight
15480 PPCTargetLowering::getSingleConstraintMatchWeight(
15481     AsmOperandInfo &info, const char *constraint) const {
15482   ConstraintWeight weight = CW_Invalid;
15483   Value *CallOperandVal = info.CallOperandVal;
15484     // If we don't have a value, we can't do a match,
15485     // but allow it at the lowest weight.
15486   if (!CallOperandVal)
15487     return CW_Default;
15488   Type *type = CallOperandVal->getType();
15489 
15490   // Look at the constraint type.
15491   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15492     return CW_Register; // an individual CR bit.
15493   else if ((StringRef(constraint) == "wa" ||
15494             StringRef(constraint) == "wd" ||
15495             StringRef(constraint) == "wf") &&
15496            type->isVectorTy())
15497     return CW_Register;
15498   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15499     return CW_Register; // just hold 64-bit integers data.
15500   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15501     return CW_Register;
15502   else if (StringRef(constraint) == "ww" && type->isFloatTy())
15503     return CW_Register;
15504 
15505   switch (*constraint) {
15506   default:
15507     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15508     break;
15509   case 'b':
15510     if (type->isIntegerTy())
15511       weight = CW_Register;
15512     break;
15513   case 'f':
15514     if (type->isFloatTy())
15515       weight = CW_Register;
15516     break;
15517   case 'd':
15518     if (type->isDoubleTy())
15519       weight = CW_Register;
15520     break;
15521   case 'v':
15522     if (type->isVectorTy())
15523       weight = CW_Register;
15524     break;
15525   case 'y':
15526     weight = CW_Register;
15527     break;
15528   case 'Z':
15529     weight = CW_Memory;
15530     break;
15531   }
15532   return weight;
15533 }
15534 
15535 std::pair<unsigned, const TargetRegisterClass *>
15536 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15537                                                 StringRef Constraint,
15538                                                 MVT VT) const {
15539   if (Constraint.size() == 1) {
15540     // GCC RS6000 Constraint Letters
15541     switch (Constraint[0]) {
15542     case 'b':   // R1-R31
15543       if (VT == MVT::i64 && Subtarget.isPPC64())
15544         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15545       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15546     case 'r':   // R0-R31
15547       if (VT == MVT::i64 && Subtarget.isPPC64())
15548         return std::make_pair(0U, &PPC::G8RCRegClass);
15549       return std::make_pair(0U, &PPC::GPRCRegClass);
15550     // 'd' and 'f' constraints are both defined to be "the floating point
15551     // registers", where one is for 32-bit and the other for 64-bit. We don't
15552     // really care overly much here so just give them all the same reg classes.
15553     case 'd':
15554     case 'f':
15555       if (Subtarget.hasSPE()) {
15556         if (VT == MVT::f32 || VT == MVT::i32)
15557           return std::make_pair(0U, &PPC::GPRCRegClass);
15558         if (VT == MVT::f64 || VT == MVT::i64)
15559           return std::make_pair(0U, &PPC::SPERCRegClass);
15560       } else {
15561         if (VT == MVT::f32 || VT == MVT::i32)
15562           return std::make_pair(0U, &PPC::F4RCRegClass);
15563         if (VT == MVT::f64 || VT == MVT::i64)
15564           return std::make_pair(0U, &PPC::F8RCRegClass);
15565       }
15566       break;
15567     case 'v':
15568       if (Subtarget.hasAltivec())
15569         return std::make_pair(0U, &PPC::VRRCRegClass);
15570       break;
15571     case 'y':   // crrc
15572       return std::make_pair(0U, &PPC::CRRCRegClass);
15573     }
15574   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15575     // An individual CR bit.
15576     return std::make_pair(0U, &PPC::CRBITRCRegClass);
15577   } else if ((Constraint == "wa" || Constraint == "wd" ||
15578              Constraint == "wf" || Constraint == "wi") &&
15579              Subtarget.hasVSX()) {
15580     return std::make_pair(0U, &PPC::VSRCRegClass);
15581   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15582     if (VT == MVT::f32 && Subtarget.hasP8Vector())
15583       return std::make_pair(0U, &PPC::VSSRCRegClass);
15584     else
15585       return std::make_pair(0U, &PPC::VSFRCRegClass);
15586   }
15587 
15588   // Handle special cases of physical registers that are not properly handled
15589   // by the base class.
15590   if (Constraint[0] == '{' && Constraint[Constraint.size() - 1] == '}') {
15591     // If we name a VSX register, we can't defer to the base class because it
15592     // will not recognize the correct register (their names will be VSL{0-31}
15593     // and V{0-31} so they won't match). So we match them here.
15594     if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15595       int VSNum = atoi(Constraint.data() + 3);
15596       assert(VSNum >= 0 && VSNum <= 63 &&
15597              "Attempted to access a vsr out of range");
15598       if (VSNum < 32)
15599         return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15600       return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15601     }
15602 
15603     // For float registers, we can't defer to the base class as it will match
15604     // the SPILLTOVSRRC class.
15605     if (Constraint.size() > 3 && Constraint[1] == 'f') {
15606       int RegNum = atoi(Constraint.data() + 2);
15607       if (RegNum > 31 || RegNum < 0)
15608         report_fatal_error("Invalid floating point register number");
15609       if (VT == MVT::f32 || VT == MVT::i32)
15610         return Subtarget.hasSPE()
15611                    ? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass)
15612                    : std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass);
15613       if (VT == MVT::f64 || VT == MVT::i64)
15614         return Subtarget.hasSPE()
15615                    ? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass)
15616                    : std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass);
15617     }
15618   }
15619 
15620   std::pair<unsigned, const TargetRegisterClass *> R =
15621       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15622 
15623   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15624   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15625   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15626   // register.
15627   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15628   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15629   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15630       PPC::GPRCRegClass.contains(R.first))
15631     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15632                             PPC::sub_32, &PPC::G8RCRegClass),
15633                           &PPC::G8RCRegClass);
15634 
15635   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15636   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15637     R.first = PPC::CR0;
15638     R.second = &PPC::CRRCRegClass;
15639   }
15640   // FIXME: This warning should ideally be emitted in the front end.
15641   const auto &TM = getTargetMachine();
15642   if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) {
15643     if (((R.first >= PPC::V20 && R.first <= PPC::V31) ||
15644          (R.first >= PPC::VF20 && R.first <= PPC::VF31)) &&
15645         (R.second == &PPC::VSRCRegClass || R.second == &PPC::VSFRCRegClass))
15646       errs() << "warning: vector registers 20 to 32 are reserved in the "
15647                 "default AIX AltiVec ABI and cannot be used\n";
15648   }
15649 
15650   return R;
15651 }
15652 
15653 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15654 /// vector.  If it is invalid, don't add anything to Ops.
15655 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15656                                                      std::string &Constraint,
15657                                                      std::vector<SDValue>&Ops,
15658                                                      SelectionDAG &DAG) const {
15659   SDValue Result;
15660 
15661   // Only support length 1 constraints.
15662   if (Constraint.length() > 1) return;
15663 
15664   char Letter = Constraint[0];
15665   switch (Letter) {
15666   default: break;
15667   case 'I':
15668   case 'J':
15669   case 'K':
15670   case 'L':
15671   case 'M':
15672   case 'N':
15673   case 'O':
15674   case 'P': {
15675     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15676     if (!CST) return; // Must be an immediate to match.
15677     SDLoc dl(Op);
15678     int64_t Value = CST->getSExtValue();
15679     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15680                          // numbers are printed as such.
15681     switch (Letter) {
15682     default: llvm_unreachable("Unknown constraint letter!");
15683     case 'I':  // "I" is a signed 16-bit constant.
15684       if (isInt<16>(Value))
15685         Result = DAG.getTargetConstant(Value, dl, TCVT);
15686       break;
15687     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15688       if (isShiftedUInt<16, 16>(Value))
15689         Result = DAG.getTargetConstant(Value, dl, TCVT);
15690       break;
15691     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15692       if (isShiftedInt<16, 16>(Value))
15693         Result = DAG.getTargetConstant(Value, dl, TCVT);
15694       break;
15695     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15696       if (isUInt<16>(Value))
15697         Result = DAG.getTargetConstant(Value, dl, TCVT);
15698       break;
15699     case 'M':  // "M" is a constant that is greater than 31.
15700       if (Value > 31)
15701         Result = DAG.getTargetConstant(Value, dl, TCVT);
15702       break;
15703     case 'N':  // "N" is a positive constant that is an exact power of two.
15704       if (Value > 0 && isPowerOf2_64(Value))
15705         Result = DAG.getTargetConstant(Value, dl, TCVT);
15706       break;
15707     case 'O':  // "O" is the constant zero.
15708       if (Value == 0)
15709         Result = DAG.getTargetConstant(Value, dl, TCVT);
15710       break;
15711     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15712       if (isInt<16>(-Value))
15713         Result = DAG.getTargetConstant(Value, dl, TCVT);
15714       break;
15715     }
15716     break;
15717   }
15718   }
15719 
15720   if (Result.getNode()) {
15721     Ops.push_back(Result);
15722     return;
15723   }
15724 
15725   // Handle standard constraint letters.
15726   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15727 }
15728 
15729 // isLegalAddressingMode - Return true if the addressing mode represented
15730 // by AM is legal for this target, for a load/store of the specified type.
15731 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15732                                               const AddrMode &AM, Type *Ty,
15733                                               unsigned AS,
15734                                               Instruction *I) const {
15735   // Vector type r+i form is supported since power9 as DQ form. We don't check
15736   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
15737   // imm form is preferred and the offset can be adjusted to use imm form later
15738   // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
15739   // max offset to check legal addressing mode, we should be a little aggressive
15740   // to contain other offsets for that LSRUse.
15741   if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
15742     return false;
15743 
15744   // PPC allows a sign-extended 16-bit immediate field.
15745   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15746     return false;
15747 
15748   // No global is ever allowed as a base.
15749   if (AM.BaseGV)
15750     return false;
15751 
15752   // PPC only support r+r,
15753   switch (AM.Scale) {
15754   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15755     break;
15756   case 1:
15757     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15758       return false;
15759     // Otherwise we have r+r or r+i.
15760     break;
15761   case 2:
15762     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15763       return false;
15764     // Allow 2*r as r+r.
15765     break;
15766   default:
15767     // No other scales are supported.
15768     return false;
15769   }
15770 
15771   return true;
15772 }
15773 
15774 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15775                                            SelectionDAG &DAG) const {
15776   MachineFunction &MF = DAG.getMachineFunction();
15777   MachineFrameInfo &MFI = MF.getFrameInfo();
15778   MFI.setReturnAddressIsTaken(true);
15779 
15780   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15781     return SDValue();
15782 
15783   SDLoc dl(Op);
15784   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15785 
15786   // Make sure the function does not optimize away the store of the RA to
15787   // the stack.
15788   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15789   FuncInfo->setLRStoreRequired();
15790   bool isPPC64 = Subtarget.isPPC64();
15791   auto PtrVT = getPointerTy(MF.getDataLayout());
15792 
15793   if (Depth > 0) {
15794     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15795     SDValue Offset =
15796         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15797                         isPPC64 ? MVT::i64 : MVT::i32);
15798     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15799                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15800                        MachinePointerInfo());
15801   }
15802 
15803   // Just load the return address off the stack.
15804   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15805   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15806                      MachinePointerInfo());
15807 }
15808 
15809 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15810                                           SelectionDAG &DAG) const {
15811   SDLoc dl(Op);
15812   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15813 
15814   MachineFunction &MF = DAG.getMachineFunction();
15815   MachineFrameInfo &MFI = MF.getFrameInfo();
15816   MFI.setFrameAddressIsTaken(true);
15817 
15818   EVT PtrVT = getPointerTy(MF.getDataLayout());
15819   bool isPPC64 = PtrVT == MVT::i64;
15820 
15821   // Naked functions never have a frame pointer, and so we use r1. For all
15822   // other functions, this decision must be delayed until during PEI.
15823   unsigned FrameReg;
15824   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15825     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15826   else
15827     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15828 
15829   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15830                                          PtrVT);
15831   while (Depth--)
15832     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15833                             FrameAddr, MachinePointerInfo());
15834   return FrameAddr;
15835 }
15836 
15837 // FIXME? Maybe this could be a TableGen attribute on some registers and
15838 // this table could be generated automatically from RegInfo.
15839 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15840                                               const MachineFunction &MF) const {
15841   bool isPPC64 = Subtarget.isPPC64();
15842 
15843   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15844   if (!is64Bit && VT != LLT::scalar(32))
15845     report_fatal_error("Invalid register global variable type");
15846 
15847   Register Reg = StringSwitch<Register>(RegName)
15848                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15849                      .Case("r2", isPPC64 ? Register() : PPC::R2)
15850                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15851                      .Default(Register());
15852 
15853   if (Reg)
15854     return Reg;
15855   report_fatal_error("Invalid register name global variable");
15856 }
15857 
15858 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15859   // 32-bit SVR4 ABI access everything as got-indirect.
15860   if (Subtarget.is32BitELFABI())
15861     return true;
15862 
15863   // AIX accesses everything indirectly through the TOC, which is similar to
15864   // the GOT.
15865   if (Subtarget.isAIXABI())
15866     return true;
15867 
15868   CodeModel::Model CModel = getTargetMachine().getCodeModel();
15869   // If it is small or large code model, module locals are accessed
15870   // indirectly by loading their address from .toc/.got.
15871   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15872     return true;
15873 
15874   // JumpTable and BlockAddress are accessed as got-indirect.
15875   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15876     return true;
15877 
15878   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15879     return Subtarget.isGVIndirectSymbol(G->getGlobal());
15880 
15881   return false;
15882 }
15883 
15884 bool
15885 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15886   // The PowerPC target isn't yet aware of offsets.
15887   return false;
15888 }
15889 
15890 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15891                                            const CallInst &I,
15892                                            MachineFunction &MF,
15893                                            unsigned Intrinsic) const {
15894   switch (Intrinsic) {
15895   case Intrinsic::ppc_altivec_lvx:
15896   case Intrinsic::ppc_altivec_lvxl:
15897   case Intrinsic::ppc_altivec_lvebx:
15898   case Intrinsic::ppc_altivec_lvehx:
15899   case Intrinsic::ppc_altivec_lvewx:
15900   case Intrinsic::ppc_vsx_lxvd2x:
15901   case Intrinsic::ppc_vsx_lxvw4x:
15902   case Intrinsic::ppc_vsx_lxvd2x_be:
15903   case Intrinsic::ppc_vsx_lxvw4x_be:
15904   case Intrinsic::ppc_vsx_lxvl:
15905   case Intrinsic::ppc_vsx_lxvll: {
15906     EVT VT;
15907     switch (Intrinsic) {
15908     case Intrinsic::ppc_altivec_lvebx:
15909       VT = MVT::i8;
15910       break;
15911     case Intrinsic::ppc_altivec_lvehx:
15912       VT = MVT::i16;
15913       break;
15914     case Intrinsic::ppc_altivec_lvewx:
15915       VT = MVT::i32;
15916       break;
15917     case Intrinsic::ppc_vsx_lxvd2x:
15918     case Intrinsic::ppc_vsx_lxvd2x_be:
15919       VT = MVT::v2f64;
15920       break;
15921     default:
15922       VT = MVT::v4i32;
15923       break;
15924     }
15925 
15926     Info.opc = ISD::INTRINSIC_W_CHAIN;
15927     Info.memVT = VT;
15928     Info.ptrVal = I.getArgOperand(0);
15929     Info.offset = -VT.getStoreSize()+1;
15930     Info.size = 2*VT.getStoreSize()-1;
15931     Info.align = Align(1);
15932     Info.flags = MachineMemOperand::MOLoad;
15933     return true;
15934   }
15935   case Intrinsic::ppc_altivec_stvx:
15936   case Intrinsic::ppc_altivec_stvxl:
15937   case Intrinsic::ppc_altivec_stvebx:
15938   case Intrinsic::ppc_altivec_stvehx:
15939   case Intrinsic::ppc_altivec_stvewx:
15940   case Intrinsic::ppc_vsx_stxvd2x:
15941   case Intrinsic::ppc_vsx_stxvw4x:
15942   case Intrinsic::ppc_vsx_stxvd2x_be:
15943   case Intrinsic::ppc_vsx_stxvw4x_be:
15944   case Intrinsic::ppc_vsx_stxvl:
15945   case Intrinsic::ppc_vsx_stxvll: {
15946     EVT VT;
15947     switch (Intrinsic) {
15948     case Intrinsic::ppc_altivec_stvebx:
15949       VT = MVT::i8;
15950       break;
15951     case Intrinsic::ppc_altivec_stvehx:
15952       VT = MVT::i16;
15953       break;
15954     case Intrinsic::ppc_altivec_stvewx:
15955       VT = MVT::i32;
15956       break;
15957     case Intrinsic::ppc_vsx_stxvd2x:
15958     case Intrinsic::ppc_vsx_stxvd2x_be:
15959       VT = MVT::v2f64;
15960       break;
15961     default:
15962       VT = MVT::v4i32;
15963       break;
15964     }
15965 
15966     Info.opc = ISD::INTRINSIC_VOID;
15967     Info.memVT = VT;
15968     Info.ptrVal = I.getArgOperand(1);
15969     Info.offset = -VT.getStoreSize()+1;
15970     Info.size = 2*VT.getStoreSize()-1;
15971     Info.align = Align(1);
15972     Info.flags = MachineMemOperand::MOStore;
15973     return true;
15974   }
15975   default:
15976     break;
15977   }
15978 
15979   return false;
15980 }
15981 
15982 /// It returns EVT::Other if the type should be determined using generic
15983 /// target-independent logic.
15984 EVT PPCTargetLowering::getOptimalMemOpType(
15985     const MemOp &Op, const AttributeList &FuncAttributes) const {
15986   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15987     // We should use Altivec/VSX loads and stores when available. For unaligned
15988     // addresses, unaligned VSX loads are only fast starting with the P8.
15989     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
15990         (Op.isAligned(Align(16)) ||
15991          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15992       return MVT::v4i32;
15993   }
15994 
15995   if (Subtarget.isPPC64()) {
15996     return MVT::i64;
15997   }
15998 
15999   return MVT::i32;
16000 }
16001 
16002 /// Returns true if it is beneficial to convert a load of a constant
16003 /// to just the constant itself.
16004 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
16005                                                           Type *Ty) const {
16006   assert(Ty->isIntegerTy());
16007 
16008   unsigned BitSize = Ty->getPrimitiveSizeInBits();
16009   return !(BitSize == 0 || BitSize > 64);
16010 }
16011 
16012 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
16013   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16014     return false;
16015   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
16016   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
16017   return NumBits1 == 64 && NumBits2 == 32;
16018 }
16019 
16020 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
16021   if (!VT1.isInteger() || !VT2.isInteger())
16022     return false;
16023   unsigned NumBits1 = VT1.getSizeInBits();
16024   unsigned NumBits2 = VT2.getSizeInBits();
16025   return NumBits1 == 64 && NumBits2 == 32;
16026 }
16027 
16028 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16029   // Generally speaking, zexts are not free, but they are free when they can be
16030   // folded with other operations.
16031   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
16032     EVT MemVT = LD->getMemoryVT();
16033     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
16034          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
16035         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
16036          LD->getExtensionType() == ISD::ZEXTLOAD))
16037       return true;
16038   }
16039 
16040   // FIXME: Add other cases...
16041   //  - 32-bit shifts with a zext to i64
16042   //  - zext after ctlz, bswap, etc.
16043   //  - zext after and by a constant mask
16044 
16045   return TargetLowering::isZExtFree(Val, VT2);
16046 }
16047 
16048 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
16049   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
16050          "invalid fpext types");
16051   // Extending to float128 is not free.
16052   if (DestVT == MVT::f128)
16053     return false;
16054   return true;
16055 }
16056 
16057 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16058   return isInt<16>(Imm) || isUInt<16>(Imm);
16059 }
16060 
16061 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16062   return isInt<16>(Imm) || isUInt<16>(Imm);
16063 }
16064 
16065 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
16066                                                        MachineMemOperand::Flags,
16067                                                        bool *Fast) const {
16068   if (DisablePPCUnaligned)
16069     return false;
16070 
16071   // PowerPC supports unaligned memory access for simple non-vector types.
16072   // Although accessing unaligned addresses is not as efficient as accessing
16073   // aligned addresses, it is generally more efficient than manual expansion,
16074   // and generally only traps for software emulation when crossing page
16075   // boundaries.
16076 
16077   if (!VT.isSimple())
16078     return false;
16079 
16080   if (VT.isFloatingPoint() && !VT.isVector() &&
16081       !Subtarget.allowsUnalignedFPAccess())
16082     return false;
16083 
16084   if (VT.getSimpleVT().isVector()) {
16085     if (Subtarget.hasVSX()) {
16086       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
16087           VT != MVT::v4f32 && VT != MVT::v4i32)
16088         return false;
16089     } else {
16090       return false;
16091     }
16092   }
16093 
16094   if (VT == MVT::ppcf128)
16095     return false;
16096 
16097   if (Fast)
16098     *Fast = true;
16099 
16100   return true;
16101 }
16102 
16103 bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
16104                                                SDValue C) const {
16105   // Check integral scalar types.
16106   if (!VT.isScalarInteger())
16107     return false;
16108   if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
16109     if (!ConstNode->getAPIntValue().isSignedIntN(64))
16110       return false;
16111     // This transformation will generate >= 2 operations. But the following
16112     // cases will generate <= 2 instructions during ISEL. So exclude them.
16113     // 1. If the constant multiplier fits 16 bits, it can be handled by one
16114     // HW instruction, ie. MULLI
16115     // 2. If the multiplier after shifted fits 16 bits, an extra shift
16116     // instruction is needed than case 1, ie. MULLI and RLDICR
16117     int64_t Imm = ConstNode->getSExtValue();
16118     unsigned Shift = countTrailingZeros<uint64_t>(Imm);
16119     Imm >>= Shift;
16120     if (isInt<16>(Imm))
16121       return false;
16122     uint64_t UImm = static_cast<uint64_t>(Imm);
16123     if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) ||
16124         isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm))
16125       return true;
16126   }
16127   return false;
16128 }
16129 
16130 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16131                                                    EVT VT) const {
16132   return isFMAFasterThanFMulAndFAdd(
16133       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
16134 }
16135 
16136 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
16137                                                    Type *Ty) const {
16138   switch (Ty->getScalarType()->getTypeID()) {
16139   case Type::FloatTyID:
16140   case Type::DoubleTyID:
16141     return true;
16142   case Type::FP128TyID:
16143     return Subtarget.hasP9Vector();
16144   default:
16145     return false;
16146   }
16147 }
16148 
16149 // FIXME: add more patterns which are not profitable to hoist.
16150 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
16151   if (!I->hasOneUse())
16152     return true;
16153 
16154   Instruction *User = I->user_back();
16155   assert(User && "A single use instruction with no uses.");
16156 
16157   switch (I->getOpcode()) {
16158   case Instruction::FMul: {
16159     // Don't break FMA, PowerPC prefers FMA.
16160     if (User->getOpcode() != Instruction::FSub &&
16161         User->getOpcode() != Instruction::FAdd)
16162       return true;
16163 
16164     const TargetOptions &Options = getTargetMachine().Options;
16165     const Function *F = I->getFunction();
16166     const DataLayout &DL = F->getParent()->getDataLayout();
16167     Type *Ty = User->getOperand(0)->getType();
16168 
16169     return !(
16170         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
16171         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
16172         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
16173   }
16174   case Instruction::Load: {
16175     // Don't break "store (load float*)" pattern, this pattern will be combined
16176     // to "store (load int32)" in later InstCombine pass. See function
16177     // combineLoadToOperationType. On PowerPC, loading a float point takes more
16178     // cycles than loading a 32 bit integer.
16179     LoadInst *LI = cast<LoadInst>(I);
16180     // For the loads that combineLoadToOperationType does nothing, like
16181     // ordered load, it should be profitable to hoist them.
16182     // For swifterror load, it can only be used for pointer to pointer type, so
16183     // later type check should get rid of this case.
16184     if (!LI->isUnordered())
16185       return true;
16186 
16187     if (User->getOpcode() != Instruction::Store)
16188       return true;
16189 
16190     if (I->getType()->getTypeID() != Type::FloatTyID)
16191       return true;
16192 
16193     return false;
16194   }
16195   default:
16196     return true;
16197   }
16198   return true;
16199 }
16200 
16201 const MCPhysReg *
16202 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
16203   // LR is a callee-save register, but we must treat it as clobbered by any call
16204   // site. Hence we include LR in the scratch registers, which are in turn added
16205   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
16206   // to CTR, which is used by any indirect call.
16207   static const MCPhysReg ScratchRegs[] = {
16208     PPC::X12, PPC::LR8, PPC::CTR8, 0
16209   };
16210 
16211   return ScratchRegs;
16212 }
16213 
16214 Register PPCTargetLowering::getExceptionPointerRegister(
16215     const Constant *PersonalityFn) const {
16216   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
16217 }
16218 
16219 Register PPCTargetLowering::getExceptionSelectorRegister(
16220     const Constant *PersonalityFn) const {
16221   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
16222 }
16223 
16224 bool
16225 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
16226                      EVT VT , unsigned DefinedValues) const {
16227   if (VT == MVT::v2i64)
16228     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
16229 
16230   if (Subtarget.hasVSX())
16231     return true;
16232 
16233   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
16234 }
16235 
16236 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
16237   if (DisableILPPref || Subtarget.enableMachineScheduler())
16238     return TargetLowering::getSchedulingPreference(N);
16239 
16240   return Sched::ILP;
16241 }
16242 
16243 // Create a fast isel object.
16244 FastISel *
16245 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
16246                                   const TargetLibraryInfo *LibInfo) const {
16247   return PPC::createFastISel(FuncInfo, LibInfo);
16248 }
16249 
16250 // 'Inverted' means the FMA opcode after negating one multiplicand.
16251 // For example, (fma -a b c) = (fnmsub a b c)
16252 static unsigned invertFMAOpcode(unsigned Opc) {
16253   switch (Opc) {
16254   default:
16255     llvm_unreachable("Invalid FMA opcode for PowerPC!");
16256   case ISD::FMA:
16257     return PPCISD::FNMSUB;
16258   case PPCISD::FNMSUB:
16259     return ISD::FMA;
16260   }
16261 }
16262 
16263 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
16264                                                 bool LegalOps, bool OptForSize,
16265                                                 NegatibleCost &Cost,
16266                                                 unsigned Depth) const {
16267   if (Depth > SelectionDAG::MaxRecursionDepth)
16268     return SDValue();
16269 
16270   unsigned Opc = Op.getOpcode();
16271   EVT VT = Op.getValueType();
16272   SDNodeFlags Flags = Op.getNode()->getFlags();
16273 
16274   switch (Opc) {
16275   case PPCISD::FNMSUB:
16276     if (!Op.hasOneUse() || !isTypeLegal(VT))
16277       break;
16278 
16279     const TargetOptions &Options = getTargetMachine().Options;
16280     SDValue N0 = Op.getOperand(0);
16281     SDValue N1 = Op.getOperand(1);
16282     SDValue N2 = Op.getOperand(2);
16283     SDLoc Loc(Op);
16284 
16285     NegatibleCost N2Cost = NegatibleCost::Expensive;
16286     SDValue NegN2 =
16287         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16288 
16289     if (!NegN2)
16290       return SDValue();
16291 
16292     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16293     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16294     // These transformations may change sign of zeroes. For example,
16295     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16296     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16297       // Try and choose the cheaper one to negate.
16298       NegatibleCost N0Cost = NegatibleCost::Expensive;
16299       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16300                                            N0Cost, Depth + 1);
16301 
16302       NegatibleCost N1Cost = NegatibleCost::Expensive;
16303       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16304                                            N1Cost, Depth + 1);
16305 
16306       if (NegN0 && N0Cost <= N1Cost) {
16307         Cost = std::min(N0Cost, N2Cost);
16308         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16309       } else if (NegN1) {
16310         Cost = std::min(N1Cost, N2Cost);
16311         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16312       }
16313     }
16314 
16315     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16316     if (isOperationLegal(ISD::FMA, VT)) {
16317       Cost = N2Cost;
16318       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16319     }
16320 
16321     break;
16322   }
16323 
16324   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16325                                               Cost, Depth);
16326 }
16327 
16328 // Override to enable LOAD_STACK_GUARD lowering on Linux.
16329 bool PPCTargetLowering::useLoadStackGuardNode() const {
16330   if (!Subtarget.isTargetLinux())
16331     return TargetLowering::useLoadStackGuardNode();
16332   return true;
16333 }
16334 
16335 // Override to disable global variable loading on Linux.
16336 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16337   if (!Subtarget.isTargetLinux())
16338     return TargetLowering::insertSSPDeclarations(M);
16339 }
16340 
16341 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16342                                      bool ForCodeSize) const {
16343   if (!VT.isSimple() || !Subtarget.hasVSX())
16344     return false;
16345 
16346   switch(VT.getSimpleVT().SimpleTy) {
16347   default:
16348     // For FP types that are currently not supported by PPC backend, return
16349     // false. Examples: f16, f80.
16350     return false;
16351   case MVT::f32:
16352   case MVT::f64:
16353     if (Subtarget.hasPrefixInstrs()) {
16354       // we can materialize all immediatess via XXSPLTI32DX and XXSPLTIDP.
16355       return true;
16356     }
16357     LLVM_FALLTHROUGH;
16358   case MVT::ppcf128:
16359     return Imm.isPosZero();
16360   }
16361 }
16362 
16363 // For vector shift operation op, fold
16364 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
16365 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16366                                   SelectionDAG &DAG) {
16367   SDValue N0 = N->getOperand(0);
16368   SDValue N1 = N->getOperand(1);
16369   EVT VT = N0.getValueType();
16370   unsigned OpSizeInBits = VT.getScalarSizeInBits();
16371   unsigned Opcode = N->getOpcode();
16372   unsigned TargetOpcode;
16373 
16374   switch (Opcode) {
16375   default:
16376     llvm_unreachable("Unexpected shift operation");
16377   case ISD::SHL:
16378     TargetOpcode = PPCISD::SHL;
16379     break;
16380   case ISD::SRL:
16381     TargetOpcode = PPCISD::SRL;
16382     break;
16383   case ISD::SRA:
16384     TargetOpcode = PPCISD::SRA;
16385     break;
16386   }
16387 
16388   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16389       N1->getOpcode() == ISD::AND)
16390     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16391       if (Mask->getZExtValue() == OpSizeInBits - 1)
16392         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16393 
16394   return SDValue();
16395 }
16396 
16397 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16398   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16399     return Value;
16400 
16401   SDValue N0 = N->getOperand(0);
16402   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16403   if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
16404       N0.getOpcode() != ISD::SIGN_EXTEND ||
16405       N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr ||
16406       N->getValueType(0) != MVT::i64)
16407     return SDValue();
16408 
16409   // We can't save an operation here if the value is already extended, and
16410   // the existing shift is easier to combine.
16411   SDValue ExtsSrc = N0.getOperand(0);
16412   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16413       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16414     return SDValue();
16415 
16416   SDLoc DL(N0);
16417   SDValue ShiftBy = SDValue(CN1, 0);
16418   // We want the shift amount to be i32 on the extswli, but the shift could
16419   // have an i64.
16420   if (ShiftBy.getValueType() == MVT::i64)
16421     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16422 
16423   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16424                          ShiftBy);
16425 }
16426 
16427 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16428   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16429     return Value;
16430 
16431   return SDValue();
16432 }
16433 
16434 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16435   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16436     return Value;
16437 
16438   return SDValue();
16439 }
16440 
16441 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16442 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16443 // When C is zero, the equation (addi Z, -C) can be simplified to Z
16444 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
16445 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16446                                  const PPCSubtarget &Subtarget) {
16447   if (!Subtarget.isPPC64())
16448     return SDValue();
16449 
16450   SDValue LHS = N->getOperand(0);
16451   SDValue RHS = N->getOperand(1);
16452 
16453   auto isZextOfCompareWithConstant = [](SDValue Op) {
16454     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16455         Op.getValueType() != MVT::i64)
16456       return false;
16457 
16458     SDValue Cmp = Op.getOperand(0);
16459     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16460         Cmp.getOperand(0).getValueType() != MVT::i64)
16461       return false;
16462 
16463     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16464       int64_t NegConstant = 0 - Constant->getSExtValue();
16465       // Due to the limitations of the addi instruction,
16466       // -C is required to be [-32768, 32767].
16467       return isInt<16>(NegConstant);
16468     }
16469 
16470     return false;
16471   };
16472 
16473   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16474   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16475 
16476   // If there is a pattern, canonicalize a zext operand to the RHS.
16477   if (LHSHasPattern && !RHSHasPattern)
16478     std::swap(LHS, RHS);
16479   else if (!LHSHasPattern && !RHSHasPattern)
16480     return SDValue();
16481 
16482   SDLoc DL(N);
16483   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16484   SDValue Cmp = RHS.getOperand(0);
16485   SDValue Z = Cmp.getOperand(0);
16486   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16487 
16488   assert(Constant && "Constant Should not be a null pointer.");
16489   int64_t NegConstant = 0 - Constant->getSExtValue();
16490 
16491   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16492   default: break;
16493   case ISD::SETNE: {
16494     //                                 when C == 0
16495     //                             --> addze X, (addic Z, -1).carry
16496     //                            /
16497     // add X, (zext(setne Z, C))--
16498     //                            \    when -32768 <= -C <= 32767 && C != 0
16499     //                             --> addze X, (addic (addi Z, -C), -1).carry
16500     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16501                               DAG.getConstant(NegConstant, DL, MVT::i64));
16502     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16503     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16504                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16505     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16506                        SDValue(Addc.getNode(), 1));
16507     }
16508   case ISD::SETEQ: {
16509     //                                 when C == 0
16510     //                             --> addze X, (subfic Z, 0).carry
16511     //                            /
16512     // add X, (zext(sete  Z, C))--
16513     //                            \    when -32768 <= -C <= 32767 && C != 0
16514     //                             --> addze X, (subfic (addi Z, -C), 0).carry
16515     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16516                               DAG.getConstant(NegConstant, DL, MVT::i64));
16517     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16518     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16519                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16520     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16521                        SDValue(Subc.getNode(), 1));
16522     }
16523   }
16524 
16525   return SDValue();
16526 }
16527 
16528 // Transform
16529 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16530 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16531 // In this case both C1 and C2 must be known constants.
16532 // C1+C2 must fit into a 34 bit signed integer.
16533 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16534                                           const PPCSubtarget &Subtarget) {
16535   if (!Subtarget.isUsingPCRelativeCalls())
16536     return SDValue();
16537 
16538   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16539   // If we find that node try to cast the Global Address and the Constant.
16540   SDValue LHS = N->getOperand(0);
16541   SDValue RHS = N->getOperand(1);
16542 
16543   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16544     std::swap(LHS, RHS);
16545 
16546   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16547     return SDValue();
16548 
16549   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16550   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16551   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16552 
16553   // Check that both casts succeeded.
16554   if (!GSDN || !ConstNode)
16555     return SDValue();
16556 
16557   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16558   SDLoc DL(GSDN);
16559 
16560   // The signed int offset needs to fit in 34 bits.
16561   if (!isInt<34>(NewOffset))
16562     return SDValue();
16563 
16564   // The new global address is a copy of the old global address except
16565   // that it has the updated Offset.
16566   SDValue GA =
16567       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16568                                  NewOffset, GSDN->getTargetFlags());
16569   SDValue MatPCRel =
16570       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16571   return MatPCRel;
16572 }
16573 
16574 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16575   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16576     return Value;
16577 
16578   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16579     return Value;
16580 
16581   return SDValue();
16582 }
16583 
16584 // Detect TRUNCATE operations on bitcasts of float128 values.
16585 // What we are looking for here is the situtation where we extract a subset
16586 // of bits from a 128 bit float.
16587 // This can be of two forms:
16588 // 1) BITCAST of f128 feeding TRUNCATE
16589 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16590 // The reason this is required is because we do not have a legal i128 type
16591 // and so we want to prevent having to store the f128 and then reload part
16592 // of it.
16593 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16594                                            DAGCombinerInfo &DCI) const {
16595   // If we are using CRBits then try that first.
16596   if (Subtarget.useCRBits()) {
16597     // Check if CRBits did anything and return that if it did.
16598     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16599       return CRTruncValue;
16600   }
16601 
16602   SDLoc dl(N);
16603   SDValue Op0 = N->getOperand(0);
16604 
16605   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16606   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16607     EVT VT = N->getValueType(0);
16608     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16609       return SDValue();
16610     SDValue Sub = Op0.getOperand(0);
16611     if (Sub.getOpcode() == ISD::SUB) {
16612       SDValue SubOp0 = Sub.getOperand(0);
16613       SDValue SubOp1 = Sub.getOperand(1);
16614       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16615           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16616         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16617                                SubOp1.getOperand(0),
16618                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16619       }
16620     }
16621   }
16622 
16623   // Looking for a truncate of i128 to i64.
16624   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16625     return SDValue();
16626 
16627   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16628 
16629   // SRL feeding TRUNCATE.
16630   if (Op0.getOpcode() == ISD::SRL) {
16631     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16632     // The right shift has to be by 64 bits.
16633     if (!ConstNode || ConstNode->getZExtValue() != 64)
16634       return SDValue();
16635 
16636     // Switch the element number to extract.
16637     EltToExtract = EltToExtract ? 0 : 1;
16638     // Update Op0 past the SRL.
16639     Op0 = Op0.getOperand(0);
16640   }
16641 
16642   // BITCAST feeding a TRUNCATE possibly via SRL.
16643   if (Op0.getOpcode() == ISD::BITCAST &&
16644       Op0.getValueType() == MVT::i128 &&
16645       Op0.getOperand(0).getValueType() == MVT::f128) {
16646     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16647     return DCI.DAG.getNode(
16648         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16649         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16650   }
16651   return SDValue();
16652 }
16653 
16654 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16655   SelectionDAG &DAG = DCI.DAG;
16656 
16657   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16658   if (!ConstOpOrElement)
16659     return SDValue();
16660 
16661   // An imul is usually smaller than the alternative sequence for legal type.
16662   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16663       isOperationLegal(ISD::MUL, N->getValueType(0)))
16664     return SDValue();
16665 
16666   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16667     switch (this->Subtarget.getCPUDirective()) {
16668     default:
16669       // TODO: enhance the condition for subtarget before pwr8
16670       return false;
16671     case PPC::DIR_PWR8:
16672       //  type        mul     add    shl
16673       // scalar        4       1      1
16674       // vector        7       2      2
16675       return true;
16676     case PPC::DIR_PWR9:
16677     case PPC::DIR_PWR10:
16678     case PPC::DIR_PWR_FUTURE:
16679       //  type        mul     add    shl
16680       // scalar        5       2      2
16681       // vector        7       2      2
16682 
16683       // The cycle RATIO of related operations are showed as a table above.
16684       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16685       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16686       // are 4, it is always profitable; but for 3 instrs patterns
16687       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16688       // So we should only do it for vector type.
16689       return IsAddOne && IsNeg ? VT.isVector() : true;
16690     }
16691   };
16692 
16693   EVT VT = N->getValueType(0);
16694   SDLoc DL(N);
16695 
16696   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16697   bool IsNeg = MulAmt.isNegative();
16698   APInt MulAmtAbs = MulAmt.abs();
16699 
16700   if ((MulAmtAbs - 1).isPowerOf2()) {
16701     // (mul x, 2^N + 1) => (add (shl x, N), x)
16702     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16703 
16704     if (!IsProfitable(IsNeg, true, VT))
16705       return SDValue();
16706 
16707     SDValue Op0 = N->getOperand(0);
16708     SDValue Op1 =
16709         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16710                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16711     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16712 
16713     if (!IsNeg)
16714       return Res;
16715 
16716     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16717   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16718     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16719     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16720 
16721     if (!IsProfitable(IsNeg, false, VT))
16722       return SDValue();
16723 
16724     SDValue Op0 = N->getOperand(0);
16725     SDValue Op1 =
16726         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16727                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16728 
16729     if (!IsNeg)
16730       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16731     else
16732       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16733 
16734   } else {
16735     return SDValue();
16736   }
16737 }
16738 
16739 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16740 // in combiner since we need to check SD flags and other subtarget features.
16741 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16742                                           DAGCombinerInfo &DCI) const {
16743   SDValue N0 = N->getOperand(0);
16744   SDValue N1 = N->getOperand(1);
16745   SDValue N2 = N->getOperand(2);
16746   SDNodeFlags Flags = N->getFlags();
16747   EVT VT = N->getValueType(0);
16748   SelectionDAG &DAG = DCI.DAG;
16749   const TargetOptions &Options = getTargetMachine().Options;
16750   unsigned Opc = N->getOpcode();
16751   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16752   bool LegalOps = !DCI.isBeforeLegalizeOps();
16753   SDLoc Loc(N);
16754 
16755   if (!isOperationLegal(ISD::FMA, VT))
16756     return SDValue();
16757 
16758   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16759   // since (fnmsub a b c)=-0 while c-ab=+0.
16760   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16761     return SDValue();
16762 
16763   // (fma (fneg a) b c) => (fnmsub a b c)
16764   // (fnmsub (fneg a) b c) => (fma a b c)
16765   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16766     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16767 
16768   // (fma a (fneg b) c) => (fnmsub a b c)
16769   // (fnmsub a (fneg b) c) => (fma a b c)
16770   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16771     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16772 
16773   return SDValue();
16774 }
16775 
16776 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16777   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16778   if (!Subtarget.is64BitELFABI())
16779     return false;
16780 
16781   // If not a tail call then no need to proceed.
16782   if (!CI->isTailCall())
16783     return false;
16784 
16785   // If sibling calls have been disabled and tail-calls aren't guaranteed
16786   // there is no reason to duplicate.
16787   auto &TM = getTargetMachine();
16788   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16789     return false;
16790 
16791   // Can't tail call a function called indirectly, or if it has variadic args.
16792   const Function *Callee = CI->getCalledFunction();
16793   if (!Callee || Callee->isVarArg())
16794     return false;
16795 
16796   // Make sure the callee and caller calling conventions are eligible for tco.
16797   const Function *Caller = CI->getParent()->getParent();
16798   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
16799                                            CI->getCallingConv()))
16800       return false;
16801 
16802   // If the function is local then we have a good chance at tail-calling it
16803   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
16804 }
16805 
16806 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
16807   if (!Subtarget.hasVSX())
16808     return false;
16809   if (Subtarget.hasP9Vector() && VT == MVT::f128)
16810     return true;
16811   return VT == MVT::f32 || VT == MVT::f64 ||
16812     VT == MVT::v4f32 || VT == MVT::v2f64;
16813 }
16814 
16815 bool PPCTargetLowering::
16816 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
16817   const Value *Mask = AndI.getOperand(1);
16818   // If the mask is suitable for andi. or andis. we should sink the and.
16819   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
16820     // Can't handle constants wider than 64-bits.
16821     if (CI->getBitWidth() > 64)
16822       return false;
16823     int64_t ConstVal = CI->getZExtValue();
16824     return isUInt<16>(ConstVal) ||
16825       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16826   }
16827 
16828   // For non-constant masks, we can always use the record-form and.
16829   return true;
16830 }
16831 
16832 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16833 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16834 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16835 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16836 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
16837 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16838   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
16839   assert(Subtarget.hasP9Altivec() &&
16840          "Only combine this when P9 altivec supported!");
16841   EVT VT = N->getValueType(0);
16842   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16843     return SDValue();
16844 
16845   SelectionDAG &DAG = DCI.DAG;
16846   SDLoc dl(N);
16847   if (N->getOperand(0).getOpcode() == ISD::SUB) {
16848     // Even for signed integers, if it's known to be positive (as signed
16849     // integer) due to zero-extended inputs.
16850     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16851     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16852     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16853          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16854         (SubOpcd1 == ISD::ZERO_EXTEND ||
16855          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16856       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16857                          N->getOperand(0)->getOperand(0),
16858                          N->getOperand(0)->getOperand(1),
16859                          DAG.getTargetConstant(0, dl, MVT::i32));
16860     }
16861 
16862     // For type v4i32, it can be optimized with xvnegsp + vabsduw
16863     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16864         N->getOperand(0).hasOneUse()) {
16865       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16866                          N->getOperand(0)->getOperand(0),
16867                          N->getOperand(0)->getOperand(1),
16868                          DAG.getTargetConstant(1, dl, MVT::i32));
16869     }
16870   }
16871 
16872   return SDValue();
16873 }
16874 
16875 // For type v4i32/v8ii16/v16i8, transform
16876 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16877 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16878 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16879 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
16880 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16881                                           DAGCombinerInfo &DCI) const {
16882   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
16883   assert(Subtarget.hasP9Altivec() &&
16884          "Only combine this when P9 altivec supported!");
16885 
16886   SelectionDAG &DAG = DCI.DAG;
16887   SDLoc dl(N);
16888   SDValue Cond = N->getOperand(0);
16889   SDValue TrueOpnd = N->getOperand(1);
16890   SDValue FalseOpnd = N->getOperand(2);
16891   EVT VT = N->getOperand(1).getValueType();
16892 
16893   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
16894       FalseOpnd.getOpcode() != ISD::SUB)
16895     return SDValue();
16896 
16897   // ABSD only available for type v4i32/v8i16/v16i8
16898   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16899     return SDValue();
16900 
16901   // At least to save one more dependent computation
16902   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
16903     return SDValue();
16904 
16905   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
16906 
16907   // Can only handle unsigned comparison here
16908   switch (CC) {
16909   default:
16910     return SDValue();
16911   case ISD::SETUGT:
16912   case ISD::SETUGE:
16913     break;
16914   case ISD::SETULT:
16915   case ISD::SETULE:
16916     std::swap(TrueOpnd, FalseOpnd);
16917     break;
16918   }
16919 
16920   SDValue CmpOpnd1 = Cond.getOperand(0);
16921   SDValue CmpOpnd2 = Cond.getOperand(1);
16922 
16923   // SETCC CmpOpnd1 CmpOpnd2 cond
16924   // TrueOpnd = CmpOpnd1 - CmpOpnd2
16925   // FalseOpnd = CmpOpnd2 - CmpOpnd1
16926   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
16927       TrueOpnd.getOperand(1) == CmpOpnd2 &&
16928       FalseOpnd.getOperand(0) == CmpOpnd2 &&
16929       FalseOpnd.getOperand(1) == CmpOpnd1) {
16930     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
16931                        CmpOpnd1, CmpOpnd2,
16932                        DAG.getTargetConstant(0, dl, MVT::i32));
16933   }
16934 
16935   return SDValue();
16936 }
16937 
16938 /// getAddrModeForFlags - Based on the set of address flags, select the most
16939 /// optimal instruction format to match by.
16940 PPC::AddrMode PPCTargetLowering::getAddrModeForFlags(unsigned Flags) const {
16941   // This is not a node we should be handling here.
16942   if (Flags == PPC::MOF_None)
16943     return PPC::AM_None;
16944   // Unaligned D-Forms are tried first, followed by the aligned D-Forms.
16945   for (auto FlagSet : AddrModesMap.at(PPC::AM_DForm))
16946     if ((Flags & FlagSet) == FlagSet)
16947       return PPC::AM_DForm;
16948   for (auto FlagSet : AddrModesMap.at(PPC::AM_DSForm))
16949     if ((Flags & FlagSet) == FlagSet)
16950       return PPC::AM_DSForm;
16951   for (auto FlagSet : AddrModesMap.at(PPC::AM_DQForm))
16952     if ((Flags & FlagSet) == FlagSet)
16953       return PPC::AM_DQForm;
16954   // If no other forms are selected, return an X-Form as it is the most
16955   // general addressing mode.
16956   return PPC::AM_XForm;
16957 }
16958 
16959 /// Set alignment flags based on whether or not the Frame Index is aligned.
16960 /// Utilized when computing flags for address computation when selecting
16961 /// load and store instructions.
16962 static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet,
16963                                SelectionDAG &DAG) {
16964   bool IsAdd = ((N.getOpcode() == ISD::ADD) || (N.getOpcode() == ISD::OR));
16965   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(IsAdd ? N.getOperand(0) : N);
16966   if (!FI)
16967     return;
16968   const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
16969   unsigned FrameIndexAlign = MFI.getObjectAlign(FI->getIndex()).value();
16970   // If this is (add $FI, $S16Imm), the alignment flags are already set
16971   // based on the immediate. We just need to clear the alignment flags
16972   // if the FI alignment is weaker.
16973   if ((FrameIndexAlign % 4) != 0)
16974     FlagSet &= ~PPC::MOF_RPlusSImm16Mult4;
16975   if ((FrameIndexAlign % 16) != 0)
16976     FlagSet &= ~PPC::MOF_RPlusSImm16Mult16;
16977   // If the address is a plain FrameIndex, set alignment flags based on
16978   // FI alignment.
16979   if (!IsAdd) {
16980     if ((FrameIndexAlign % 4) == 0)
16981       FlagSet |= PPC::MOF_RPlusSImm16Mult4;
16982     if ((FrameIndexAlign % 16) == 0)
16983       FlagSet |= PPC::MOF_RPlusSImm16Mult16;
16984   }
16985 }
16986 
16987 /// Given a node, compute flags that are used for address computation when
16988 /// selecting load and store instructions. The flags computed are stored in
16989 /// FlagSet. This function takes into account whether the node is a constant,
16990 /// an ADD, OR, or a constant, and computes the address flags accordingly.
16991 static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet,
16992                                               SelectionDAG &DAG) {
16993   // Set the alignment flags for the node depending on if the node is
16994   // 4-byte or 16-byte aligned.
16995   auto SetAlignFlagsForImm = [&](uint64_t Imm) {
16996     if ((Imm & 0x3) == 0)
16997       FlagSet |= PPC::MOF_RPlusSImm16Mult4;
16998     if ((Imm & 0xf) == 0)
16999       FlagSet |= PPC::MOF_RPlusSImm16Mult16;
17000   };
17001 
17002   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
17003     // All 32-bit constants can be computed as LIS + Disp.
17004     const APInt &ConstImm = CN->getAPIntValue();
17005     if (ConstImm.isSignedIntN(32)) { // Flag to handle 32-bit constants.
17006       FlagSet |= PPC::MOF_AddrIsSImm32;
17007       SetAlignFlagsForImm(ConstImm.getZExtValue());
17008       setAlignFlagsForFI(N, FlagSet, DAG);
17009     }
17010     if (ConstImm.isSignedIntN(34)) // Flag to handle 34-bit constants.
17011       FlagSet |= PPC::MOF_RPlusSImm34;
17012     else // Let constant materialization handle large constants.
17013       FlagSet |= PPC::MOF_NotAddNorCst;
17014   } else if (N.getOpcode() == ISD::ADD || provablyDisjointOr(DAG, N)) {
17015     // This address can be represented as an addition of:
17016     // - Register + Imm16 (possibly a multiple of 4/16)
17017     // - Register + Imm34
17018     // - Register + PPCISD::Lo
17019     // - Register + Register
17020     // In any case, we won't have to match this as Base + Zero.
17021     SDValue RHS = N.getOperand(1);
17022     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
17023       const APInt &ConstImm = CN->getAPIntValue();
17024       if (ConstImm.isSignedIntN(16)) {
17025         FlagSet |= PPC::MOF_RPlusSImm16; // Signed 16-bit immediates.
17026         SetAlignFlagsForImm(ConstImm.getZExtValue());
17027         setAlignFlagsForFI(N, FlagSet, DAG);
17028       }
17029       if (ConstImm.isSignedIntN(34))
17030         FlagSet |= PPC::MOF_RPlusSImm34; // Signed 34-bit immediates.
17031       else
17032         FlagSet |= PPC::MOF_RPlusR; // Register.
17033     } else if (RHS.getOpcode() == PPCISD::Lo &&
17034                !cast<ConstantSDNode>(RHS.getOperand(1))->getZExtValue())
17035       FlagSet |= PPC::MOF_RPlusLo; // PPCISD::Lo.
17036     else
17037       FlagSet |= PPC::MOF_RPlusR;
17038   } else { // The address computation is not a constant or an addition.
17039     setAlignFlagsForFI(N, FlagSet, DAG);
17040     FlagSet |= PPC::MOF_NotAddNorCst;
17041   }
17042 }
17043 
17044 /// computeMOFlags - Given a node N and it's Parent (a MemSDNode), compute
17045 /// the address flags of the load/store instruction that is to be matched.
17046 unsigned PPCTargetLowering::computeMOFlags(const SDNode *Parent, SDValue N,
17047                                            SelectionDAG &DAG) const {
17048   unsigned FlagSet = PPC::MOF_None;
17049 
17050   // Compute subtarget flags.
17051   if (!Subtarget.hasP9Vector())
17052     FlagSet |= PPC::MOF_SubtargetBeforeP9;
17053   else {
17054     FlagSet |= PPC::MOF_SubtargetP9;
17055     if (Subtarget.hasPrefixInstrs())
17056       FlagSet |= PPC::MOF_SubtargetP10;
17057   }
17058   if (Subtarget.hasSPE())
17059     FlagSet |= PPC::MOF_SubtargetSPE;
17060 
17061   // Mark this as something we don't want to handle here if it is atomic
17062   // or pre-increment instruction.
17063   if (const LSBaseSDNode *LSB = dyn_cast<LSBaseSDNode>(Parent))
17064     if (LSB->isIndexed())
17065       return PPC::MOF_None;
17066 
17067   // Compute in-memory type flags. This is based on if there are scalars,
17068   // floats or vectors.
17069   const MemSDNode *MN = dyn_cast<MemSDNode>(Parent);
17070   assert(MN && "Parent should be a MemSDNode!");
17071   EVT MemVT = MN->getMemoryVT();
17072   unsigned Size = MemVT.getSizeInBits();
17073   if (MemVT.isScalarInteger()) {
17074     assert(Size <= 64 && "Not expecting scalar integers larger than 8 bytes!");
17075     if (Size < 32)
17076       FlagSet |= PPC::MOF_SubWordInt;
17077     else if (Size == 32)
17078       FlagSet |= PPC::MOF_WordInt;
17079     else
17080       FlagSet |= PPC::MOF_DoubleWordInt;
17081   } else if (MemVT.isVector() && !MemVT.isFloatingPoint()) { // Integer vectors.
17082     if (Size == 128)
17083       FlagSet |= PPC::MOF_Vector;
17084     else if (Size == 256)
17085       FlagSet |= PPC::MOF_Vector256;
17086     else
17087       llvm_unreachable("Not expecting illegal vectors!");
17088   } else { // Floating point type: can be scalar, f128 or vector types.
17089     if (Size == 32 || Size == 64)
17090       FlagSet |= PPC::MOF_ScalarFloat;
17091     else if (MemVT == MVT::f128 || MemVT.isVector())
17092       FlagSet |= PPC::MOF_Vector;
17093     else
17094       llvm_unreachable("Not expecting illegal scalar floats!");
17095   }
17096 
17097   // Compute flags for address computation.
17098   computeFlagsForAddressComputation(N, FlagSet, DAG);
17099 
17100   // Compute type extension flags.
17101   if (const LoadSDNode *LN = dyn_cast<LoadSDNode>(Parent)) {
17102     switch (LN->getExtensionType()) {
17103     case ISD::SEXTLOAD:
17104       FlagSet |= PPC::MOF_SExt;
17105       break;
17106     case ISD::EXTLOAD:
17107     case ISD::ZEXTLOAD:
17108       FlagSet |= PPC::MOF_ZExt;
17109       break;
17110     case ISD::NON_EXTLOAD:
17111       FlagSet |= PPC::MOF_NoExt;
17112       break;
17113     }
17114   } else
17115     FlagSet |= PPC::MOF_NoExt;
17116 
17117   // For integers, no extension is the same as zero extension.
17118   // We set the extension mode to zero extension so we don't have
17119   // to add separate entries in AddrModesMap for loads and stores.
17120   if (MemVT.isScalarInteger() && (FlagSet & PPC::MOF_NoExt)) {
17121     FlagSet |= PPC::MOF_ZExt;
17122     FlagSet &= ~PPC::MOF_NoExt;
17123   }
17124 
17125   // If we don't have prefixed instructions, 34-bit constants should be
17126   // treated as PPC::MOF_NotAddNorCst so they can match D-Forms.
17127   bool IsNonP1034BitConst =
17128       ((PPC::MOF_RPlusSImm34 | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubtargetP10) &
17129        FlagSet) == PPC::MOF_RPlusSImm34;
17130   if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::OR &&
17131       IsNonP1034BitConst)
17132     FlagSet |= PPC::MOF_NotAddNorCst;
17133 
17134   return FlagSet;
17135 }
17136 
17137 /// SelectForceXFormMode - Given the specified address, force it to be
17138 /// represented as an indexed [r+r] operation (an XForm instruction).
17139 PPC::AddrMode PPCTargetLowering::SelectForceXFormMode(SDValue N, SDValue &Disp,
17140                                                       SDValue &Base,
17141                                                       SelectionDAG &DAG) const {
17142 
17143   PPC::AddrMode Mode = PPC::AM_XForm;
17144   int16_t ForceXFormImm = 0;
17145   if (provablyDisjointOr(DAG, N) &&
17146       !isIntS16Immediate(N.getOperand(1), ForceXFormImm)) {
17147     Disp = N.getOperand(0);
17148     Base = N.getOperand(1);
17149     return Mode;
17150   }
17151 
17152   // If the address is the result of an add, we will utilize the fact that the
17153   // address calculation includes an implicit add.  However, we can reduce
17154   // register pressure if we do not materialize a constant just for use as the
17155   // index register.  We only get rid of the add if it is not an add of a
17156   // value and a 16-bit signed constant and both have a single use.
17157   if (N.getOpcode() == ISD::ADD &&
17158       (!isIntS16Immediate(N.getOperand(1), ForceXFormImm) ||
17159        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
17160     Disp = N.getOperand(0);
17161     Base = N.getOperand(1);
17162     return Mode;
17163   }
17164 
17165   // Otherwise, use R0 as the base register.
17166   Disp = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
17167                          N.getValueType());
17168   Base = N;
17169 
17170   return Mode;
17171 }
17172 
17173 /// SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode),
17174 /// compute the address flags of the node, get the optimal address mode based
17175 /// on the flags, and set the Base and Disp based on the address mode.
17176 PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent,
17177                                                        SDValue N, SDValue &Disp,
17178                                                        SDValue &Base,
17179                                                        SelectionDAG &DAG,
17180                                                        MaybeAlign Align) const {
17181   SDLoc DL(Parent);
17182 
17183   // Compute the address flags.
17184   unsigned Flags = computeMOFlags(Parent, N, DAG);
17185 
17186   // Get the optimal address mode based on the Flags.
17187   PPC::AddrMode Mode = getAddrModeForFlags(Flags);
17188 
17189   // Set Base and Disp accordingly depending on the address mode.
17190   switch (Mode) {
17191   case PPC::AM_DForm:
17192   case PPC::AM_DSForm:
17193   case PPC::AM_DQForm: {
17194     // This is a register plus a 16-bit immediate. The base will be the
17195     // register and the displacement will be the immediate unless it
17196     // isn't sufficiently aligned.
17197     if (Flags & PPC::MOF_RPlusSImm16) {
17198       SDValue Op0 = N.getOperand(0);
17199       SDValue Op1 = N.getOperand(1);
17200       ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1);
17201       int16_t Imm = CN->getAPIntValue().getZExtValue();
17202       if (!Align || isAligned(*Align, Imm)) {
17203         Disp = DAG.getTargetConstant(Imm, DL, N.getValueType());
17204         Base = Op0;
17205         if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op0)) {
17206           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
17207           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
17208         }
17209         break;
17210       }
17211     }
17212     // This is a register plus the @lo relocation. The base is the register
17213     // and the displacement is the global address.
17214     else if (Flags & PPC::MOF_RPlusLo) {
17215       Disp = N.getOperand(1).getOperand(0); // The global address.
17216       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
17217              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
17218              Disp.getOpcode() == ISD::TargetConstantPool ||
17219              Disp.getOpcode() == ISD::TargetJumpTable);
17220       Base = N.getOperand(0);
17221       break;
17222     }
17223     // This is a constant address at most 32 bits. The base will be
17224     // zero or load-immediate-shifted and the displacement will be
17225     // the low 16 bits of the address.
17226     else if (Flags & PPC::MOF_AddrIsSImm32) {
17227       ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
17228       EVT CNType = CN->getValueType(0);
17229       uint64_t CNImm = CN->getZExtValue();
17230       // If this address fits entirely in a 16-bit sext immediate field, codegen
17231       // this as "d, 0".
17232       int16_t Imm;
17233       if (isIntS16Immediate(CN, Imm) && (!Align || isAligned(*Align, Imm))) {
17234         Disp = DAG.getTargetConstant(Imm, DL, CNType);
17235         Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
17236                                CNType);
17237         break;
17238       }
17239       // Handle 32-bit sext immediate with LIS + Addr mode.
17240       if ((CNType == MVT::i32 || isInt<32>(CNImm)) &&
17241           (!Align || isAligned(*Align, CNImm))) {
17242         int32_t Addr = (int32_t)CNImm;
17243         // Otherwise, break this down into LIS + Disp.
17244         Disp = DAG.getTargetConstant((int16_t)Addr, DL, MVT::i32);
17245         Base =
17246             DAG.getTargetConstant((Addr - (int16_t)Addr) >> 16, DL, MVT::i32);
17247         uint32_t LIS = CNType == MVT::i32 ? PPC::LIS : PPC::LIS8;
17248         Base = SDValue(DAG.getMachineNode(LIS, DL, CNType, Base), 0);
17249         break;
17250       }
17251     }
17252     // Otherwise, the PPC:MOF_NotAdd flag is set. Load/Store is Non-foldable.
17253     Disp = DAG.getTargetConstant(0, DL, getPointerTy(DAG.getDataLayout()));
17254     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
17255       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
17256       fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
17257     } else
17258       Base = N;
17259     break;
17260   }
17261   case PPC::AM_None:
17262     break;
17263   default: { // By default, X-Form is always available to be selected.
17264     // When a frame index is not aligned, we also match by XForm.
17265     FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N);
17266     Base = FI ? N : N.getOperand(1);
17267     Disp = FI ? DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
17268                                 N.getValueType())
17269               : N.getOperand(0);
17270     break;
17271   }
17272   }
17273   return Mode;
17274 }
17275