1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSectionXCOFF.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <list>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "ppc-lowering"
105 
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 
121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123 
124 // TODO - Remove this option if soft fp128 has been fully supported .
125 static cl::opt<bool>
126     EnableSoftFP128("enable-soft-fp128",
127                     cl::desc("temp option to enable soft fp128"), cl::Hidden);
128 
129 STATISTIC(NumTailCalls, "Number of tail calls");
130 STATISTIC(NumSiblingCalls, "Number of sibling calls");
131 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
132 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
133 
134 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
135 
136 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
137 
138 // FIXME: Remove this once the bug has been fixed!
139 extern cl::opt<bool> ANDIGlueBug;
140 
141 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
142                                      const PPCSubtarget &STI)
143     : TargetLowering(TM), Subtarget(STI) {
144   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
145   // arguments are at least 4/8 bytes aligned.
146   bool isPPC64 = Subtarget.isPPC64();
147   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
148 
149   // Set up the register classes.
150   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
151   if (!useSoftFloat()) {
152     if (hasSPE()) {
153       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
154       // EFPU2 APU only supports f32
155       if (!Subtarget.hasEFPU2())
156         addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
157     } else {
158       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
159       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
160     }
161   }
162 
163   // Match BITREVERSE to customized fast code sequence in the td file.
164   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
165   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
166 
167   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
168   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
169 
170   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
171   for (MVT VT : MVT::integer_valuetypes()) {
172     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
173     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
174   }
175 
176   if (Subtarget.isISA3_0()) {
177     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
178     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
179     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
180     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
181   } else {
182     // No extending loads from f16 or HW conversions back and forth.
183     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
184     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
185     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
186     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
187     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
188     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
189     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
190     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
191   }
192 
193   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
194 
195   // PowerPC has pre-inc load and store's.
196   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
197   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
198   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
199   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
200   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
201   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
202   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
203   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
204   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
205   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
206   if (!Subtarget.hasSPE()) {
207     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
208     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
209     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
210     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
211   }
212 
213   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
214   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
215   for (MVT VT : ScalarIntVTs) {
216     setOperationAction(ISD::ADDC, VT, Legal);
217     setOperationAction(ISD::ADDE, VT, Legal);
218     setOperationAction(ISD::SUBC, VT, Legal);
219     setOperationAction(ISD::SUBE, VT, Legal);
220   }
221 
222   if (Subtarget.useCRBits()) {
223     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
224 
225     if (isPPC64 || Subtarget.hasFPCVT()) {
226       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
227       AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
228                         isPPC64 ? MVT::i64 : MVT::i32);
229       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
230       AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
231                         isPPC64 ? MVT::i64 : MVT::i32);
232 
233       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
234       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
235                          isPPC64 ? MVT::i64 : MVT::i32);
236       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
237       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
238                         isPPC64 ? MVT::i64 : MVT::i32);
239 
240       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote);
241       AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1,
242                         isPPC64 ? MVT::i64 : MVT::i32);
243       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote);
244       AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1,
245                         isPPC64 ? MVT::i64 : MVT::i32);
246 
247       setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
248       AddPromotedToType(ISD::FP_TO_SINT, MVT::i1,
249                         isPPC64 ? MVT::i64 : MVT::i32);
250       setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
251       AddPromotedToType(ISD::FP_TO_UINT, MVT::i1,
252                         isPPC64 ? MVT::i64 : MVT::i32);
253     } else {
254       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
255       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
256       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
257       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
258     }
259 
260     // PowerPC does not support direct load/store of condition registers.
261     setOperationAction(ISD::LOAD, MVT::i1, Custom);
262     setOperationAction(ISD::STORE, MVT::i1, Custom);
263 
264     // FIXME: Remove this once the ANDI glue bug is fixed:
265     if (ANDIGlueBug)
266       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
267 
268     for (MVT VT : MVT::integer_valuetypes()) {
269       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
270       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
271       setTruncStoreAction(VT, MVT::i1, Expand);
272     }
273 
274     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
275   }
276 
277   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
278   // PPC (the libcall is not available).
279   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
280   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
281   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom);
282   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom);
283 
284   // We do not currently implement these libm ops for PowerPC.
285   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
286   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
287   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
288   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
289   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
290   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
291 
292   // PowerPC has no SREM/UREM instructions unless we are on P9
293   // On P9 we may use a hardware instruction to compute the remainder.
294   // When the result of both the remainder and the division is required it is
295   // more efficient to compute the remainder from the result of the division
296   // rather than use the remainder instruction. The instructions are legalized
297   // directly because the DivRemPairsPass performs the transformation at the IR
298   // level.
299   if (Subtarget.isISA3_0()) {
300     setOperationAction(ISD::SREM, MVT::i32, Legal);
301     setOperationAction(ISD::UREM, MVT::i32, Legal);
302     setOperationAction(ISD::SREM, MVT::i64, Legal);
303     setOperationAction(ISD::UREM, MVT::i64, Legal);
304   } else {
305     setOperationAction(ISD::SREM, MVT::i32, Expand);
306     setOperationAction(ISD::UREM, MVT::i32, Expand);
307     setOperationAction(ISD::SREM, MVT::i64, Expand);
308     setOperationAction(ISD::UREM, MVT::i64, Expand);
309   }
310 
311   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
312   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
313   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
314   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
315   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
316   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
317   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
318   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
319   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
320 
321   // Handle constrained floating-point operations of scalar.
322   // TODO: Handle SPE specific operation.
323   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
324   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
325   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
326   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
327   setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
328   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
329 
330   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
331   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
332   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
333   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
334   setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
335   if (Subtarget.hasVSX()) {
336     setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal);
337     setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal);
338   }
339 
340   if (Subtarget.hasFSQRT()) {
341     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
342     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
343   }
344 
345   if (Subtarget.hasFPRND()) {
346     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
347     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
348     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
349     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
350 
351     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
352     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
353     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
354     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
355   }
356 
357   // We don't support sin/cos/sqrt/fmod/pow
358   setOperationAction(ISD::FSIN , MVT::f64, Expand);
359   setOperationAction(ISD::FCOS , MVT::f64, Expand);
360   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
361   setOperationAction(ISD::FREM , MVT::f64, Expand);
362   setOperationAction(ISD::FPOW , MVT::f64, Expand);
363   setOperationAction(ISD::FSIN , MVT::f32, Expand);
364   setOperationAction(ISD::FCOS , MVT::f32, Expand);
365   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
366   setOperationAction(ISD::FREM , MVT::f32, Expand);
367   setOperationAction(ISD::FPOW , MVT::f32, Expand);
368   if (Subtarget.hasSPE()) {
369     setOperationAction(ISD::FMA  , MVT::f64, Expand);
370     setOperationAction(ISD::FMA  , MVT::f32, Expand);
371   } else {
372     setOperationAction(ISD::FMA  , MVT::f64, Legal);
373     setOperationAction(ISD::FMA  , MVT::f32, Legal);
374   }
375 
376   if (Subtarget.hasSPE())
377     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
378 
379   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
380 
381   // If we're enabling GP optimizations, use hardware square root
382   if (!Subtarget.hasFSQRT() &&
383       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
384         Subtarget.hasFRE()))
385     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
386 
387   if (!Subtarget.hasFSQRT() &&
388       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
389         Subtarget.hasFRES()))
390     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
391 
392   if (Subtarget.hasFCPSGN()) {
393     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
394     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
395   } else {
396     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
397     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
398   }
399 
400   if (Subtarget.hasFPRND()) {
401     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
402     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
403     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
404     setOperationAction(ISD::FROUND, MVT::f64, Legal);
405 
406     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
407     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
408     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
409     setOperationAction(ISD::FROUND, MVT::f32, Legal);
410   }
411 
412   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
413   // to speed up scalar BSWAP64.
414   // CTPOP or CTTZ were introduced in P8/P9 respectively
415   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
416   if (Subtarget.hasP9Vector())
417     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
418   else
419     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
420   if (Subtarget.isISA3_0()) {
421     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
422     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
423   } else {
424     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
425     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
426   }
427 
428   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
429     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
430     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
431   } else {
432     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
433     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
434   }
435 
436   // PowerPC does not have ROTR
437   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
438   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
439 
440   if (!Subtarget.useCRBits()) {
441     // PowerPC does not have Select
442     setOperationAction(ISD::SELECT, MVT::i32, Expand);
443     setOperationAction(ISD::SELECT, MVT::i64, Expand);
444     setOperationAction(ISD::SELECT, MVT::f32, Expand);
445     setOperationAction(ISD::SELECT, MVT::f64, Expand);
446   }
447 
448   // PowerPC wants to turn select_cc of FP into fsel when possible.
449   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
450   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
451 
452   // PowerPC wants to optimize integer setcc a bit
453   if (!Subtarget.useCRBits())
454     setOperationAction(ISD::SETCC, MVT::i32, Custom);
455 
456   if (Subtarget.hasFPU()) {
457     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
458     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
459     setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
460 
461     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
462     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
463     setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
464   }
465 
466   // PowerPC does not have BRCOND which requires SetCC
467   if (!Subtarget.useCRBits())
468     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
469 
470   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
471 
472   if (Subtarget.hasSPE()) {
473     // SPE has built-in conversions
474     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
475     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
476     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
477     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
478     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
479     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
480   } else {
481     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
482     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
483     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
484 
485     // PowerPC does not have [U|S]INT_TO_FP
486     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
487     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
488     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
489     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
490   }
491 
492   if (Subtarget.hasDirectMove() && isPPC64) {
493     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
494     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
495     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
496     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
497     if (TM.Options.UnsafeFPMath) {
498       setOperationAction(ISD::LRINT, MVT::f64, Legal);
499       setOperationAction(ISD::LRINT, MVT::f32, Legal);
500       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
501       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
502       setOperationAction(ISD::LROUND, MVT::f64, Legal);
503       setOperationAction(ISD::LROUND, MVT::f32, Legal);
504       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
505       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
506     }
507   } else {
508     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
509     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
510     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
511     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
512   }
513 
514   // We cannot sextinreg(i1).  Expand to shifts.
515   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
516 
517   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
518   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
519   // support continuation, user-level threading, and etc.. As a result, no
520   // other SjLj exception interfaces are implemented and please don't build
521   // your own exception handling based on them.
522   // LLVM/Clang supports zero-cost DWARF exception handling.
523   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
524   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
525 
526   // We want to legalize GlobalAddress and ConstantPool nodes into the
527   // appropriate instructions to materialize the address.
528   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
529   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
530   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
531   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
532   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
533   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
534   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
535   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
536   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
537   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
538 
539   // TRAP is legal.
540   setOperationAction(ISD::TRAP, MVT::Other, Legal);
541 
542   // TRAMPOLINE is custom lowered.
543   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
544   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
545 
546   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
547   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
548 
549   if (Subtarget.is64BitELFABI()) {
550     // VAARG always uses double-word chunks, so promote anything smaller.
551     setOperationAction(ISD::VAARG, MVT::i1, Promote);
552     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
553     setOperationAction(ISD::VAARG, MVT::i8, Promote);
554     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
555     setOperationAction(ISD::VAARG, MVT::i16, Promote);
556     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
557     setOperationAction(ISD::VAARG, MVT::i32, Promote);
558     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
559     setOperationAction(ISD::VAARG, MVT::Other, Expand);
560   } else if (Subtarget.is32BitELFABI()) {
561     // VAARG is custom lowered with the 32-bit SVR4 ABI.
562     setOperationAction(ISD::VAARG, MVT::Other, Custom);
563     setOperationAction(ISD::VAARG, MVT::i64, Custom);
564   } else
565     setOperationAction(ISD::VAARG, MVT::Other, Expand);
566 
567   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
568   if (Subtarget.is32BitELFABI())
569     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
570   else
571     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
572 
573   // Use the default implementation.
574   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
575   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
576   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
577   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
578   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
579   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
580   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
581   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
582   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
583 
584   // We want to custom lower some of our intrinsics.
585   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
586 
587   // To handle counter-based loop conditions.
588   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
589 
590   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
591   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
592   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
593   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
594 
595   // Comparisons that require checking two conditions.
596   if (Subtarget.hasSPE()) {
597     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
598     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
599     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
600     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
601   }
602   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
603   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
604   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
605   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
606   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
607   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
608   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
609   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
610   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
611   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
612   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
613   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
614 
615   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
616   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
617 
618   if (Subtarget.has64BitSupport()) {
619     // They also have instructions for converting between i64 and fp.
620     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
621     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
622     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
623     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
624     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
625     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
626     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
627     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
628     // This is just the low 32 bits of a (signed) fp->i64 conversion.
629     // We cannot do this with Promote because i64 is not a legal type.
630     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
631     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
632 
633     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
634       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
635       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
636     }
637   } else {
638     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
639     if (Subtarget.hasSPE()) {
640       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
641       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
642     } else {
643       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
644       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
645     }
646   }
647 
648   // With the instructions enabled under FPCVT, we can do everything.
649   if (Subtarget.hasFPCVT()) {
650     if (Subtarget.has64BitSupport()) {
651       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
652       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
653       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
654       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
655       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
656       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
657       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
658       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
659     }
660 
661     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
662     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
663     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
664     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
665     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
666     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
667     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
668     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
669   }
670 
671   if (Subtarget.use64BitRegs()) {
672     // 64-bit PowerPC implementations can support i64 types directly
673     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
674     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
675     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
676     // 64-bit PowerPC wants to expand i128 shifts itself.
677     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
678     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
679     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
680   } else {
681     // 32-bit PowerPC wants to expand i64 shifts itself.
682     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
683     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
684     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
685   }
686 
687   // PowerPC has better expansions for funnel shifts than the generic
688   // TargetLowering::expandFunnelShift.
689   if (Subtarget.has64BitSupport()) {
690     setOperationAction(ISD::FSHL, MVT::i64, Custom);
691     setOperationAction(ISD::FSHR, MVT::i64, Custom);
692   }
693   setOperationAction(ISD::FSHL, MVT::i32, Custom);
694   setOperationAction(ISD::FSHR, MVT::i32, Custom);
695 
696   if (Subtarget.hasVSX()) {
697     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
698     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
699     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
700     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
701   }
702 
703   if (Subtarget.hasAltivec()) {
704     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
705       setOperationAction(ISD::SADDSAT, VT, Legal);
706       setOperationAction(ISD::SSUBSAT, VT, Legal);
707       setOperationAction(ISD::UADDSAT, VT, Legal);
708       setOperationAction(ISD::USUBSAT, VT, Legal);
709     }
710     // First set operation action for all vector types to expand. Then we
711     // will selectively turn on ones that can be effectively codegen'd.
712     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
713       // add/sub are legal for all supported vector VT's.
714       setOperationAction(ISD::ADD, VT, Legal);
715       setOperationAction(ISD::SUB, VT, Legal);
716 
717       // For v2i64, these are only valid with P8Vector. This is corrected after
718       // the loop.
719       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
720         setOperationAction(ISD::SMAX, VT, Legal);
721         setOperationAction(ISD::SMIN, VT, Legal);
722         setOperationAction(ISD::UMAX, VT, Legal);
723         setOperationAction(ISD::UMIN, VT, Legal);
724       }
725       else {
726         setOperationAction(ISD::SMAX, VT, Expand);
727         setOperationAction(ISD::SMIN, VT, Expand);
728         setOperationAction(ISD::UMAX, VT, Expand);
729         setOperationAction(ISD::UMIN, VT, Expand);
730       }
731 
732       if (Subtarget.hasVSX()) {
733         setOperationAction(ISD::FMAXNUM, VT, Legal);
734         setOperationAction(ISD::FMINNUM, VT, Legal);
735       }
736 
737       // Vector instructions introduced in P8
738       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
739         setOperationAction(ISD::CTPOP, VT, Legal);
740         setOperationAction(ISD::CTLZ, VT, Legal);
741       }
742       else {
743         setOperationAction(ISD::CTPOP, VT, Expand);
744         setOperationAction(ISD::CTLZ, VT, Expand);
745       }
746 
747       // Vector instructions introduced in P9
748       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
749         setOperationAction(ISD::CTTZ, VT, Legal);
750       else
751         setOperationAction(ISD::CTTZ, VT, Expand);
752 
753       // We promote all shuffles to v16i8.
754       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
755       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
756 
757       // We promote all non-typed operations to v4i32.
758       setOperationAction(ISD::AND   , VT, Promote);
759       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
760       setOperationAction(ISD::OR    , VT, Promote);
761       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
762       setOperationAction(ISD::XOR   , VT, Promote);
763       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
764       setOperationAction(ISD::LOAD  , VT, Promote);
765       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
766       setOperationAction(ISD::SELECT, VT, Promote);
767       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
768       setOperationAction(ISD::VSELECT, VT, Legal);
769       setOperationAction(ISD::SELECT_CC, VT, Promote);
770       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
771       setOperationAction(ISD::STORE, VT, Promote);
772       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
773 
774       // No other operations are legal.
775       setOperationAction(ISD::MUL , VT, Expand);
776       setOperationAction(ISD::SDIV, VT, Expand);
777       setOperationAction(ISD::SREM, VT, Expand);
778       setOperationAction(ISD::UDIV, VT, Expand);
779       setOperationAction(ISD::UREM, VT, Expand);
780       setOperationAction(ISD::FDIV, VT, Expand);
781       setOperationAction(ISD::FREM, VT, Expand);
782       setOperationAction(ISD::FNEG, VT, Expand);
783       setOperationAction(ISD::FSQRT, VT, Expand);
784       setOperationAction(ISD::FLOG, VT, Expand);
785       setOperationAction(ISD::FLOG10, VT, Expand);
786       setOperationAction(ISD::FLOG2, VT, Expand);
787       setOperationAction(ISD::FEXP, VT, Expand);
788       setOperationAction(ISD::FEXP2, VT, Expand);
789       setOperationAction(ISD::FSIN, VT, Expand);
790       setOperationAction(ISD::FCOS, VT, Expand);
791       setOperationAction(ISD::FABS, VT, Expand);
792       setOperationAction(ISD::FFLOOR, VT, Expand);
793       setOperationAction(ISD::FCEIL,  VT, Expand);
794       setOperationAction(ISD::FTRUNC, VT, Expand);
795       setOperationAction(ISD::FRINT,  VT, Expand);
796       setOperationAction(ISD::FNEARBYINT, VT, Expand);
797       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
798       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
799       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
800       setOperationAction(ISD::MULHU, VT, Expand);
801       setOperationAction(ISD::MULHS, VT, Expand);
802       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
803       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
804       setOperationAction(ISD::UDIVREM, VT, Expand);
805       setOperationAction(ISD::SDIVREM, VT, Expand);
806       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
807       setOperationAction(ISD::FPOW, VT, Expand);
808       setOperationAction(ISD::BSWAP, VT, Expand);
809       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
810       setOperationAction(ISD::ROTL, VT, Expand);
811       setOperationAction(ISD::ROTR, VT, Expand);
812 
813       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
814         setTruncStoreAction(VT, InnerVT, Expand);
815         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
816         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
817         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
818       }
819     }
820     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
821     if (!Subtarget.hasP8Vector()) {
822       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
823       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
824       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
825       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
826     }
827 
828     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
829     // with merges, splats, etc.
830     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
831 
832     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
833     // are cheap, so handle them before they get expanded to scalar.
834     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
835     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
836     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
837     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
838     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
839 
840     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
841     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
842     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
843     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
844     setOperationAction(ISD::SELECT, MVT::v4i32,
845                        Subtarget.useCRBits() ? Legal : Expand);
846     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
847     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
848     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
849     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
850     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
851     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
852     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
853     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
854     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
855     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
856     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
857     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
858     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
859 
860     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
861     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
862     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
863     if (Subtarget.hasAltivec())
864       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
865         setOperationAction(ISD::ROTL, VT, Legal);
866     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
867     if (Subtarget.hasP8Altivec())
868       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
869 
870     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
871     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
872     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
873     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
874 
875     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
876     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
877 
878     if (Subtarget.hasVSX()) {
879       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
880       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
881     }
882 
883     if (Subtarget.hasP8Altivec())
884       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
885     else
886       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
887 
888     if (Subtarget.isISA3_1()) {
889       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
890       setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
891       setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
892       setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
893       setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
894       setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
895       setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
896       setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
897       setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
898       setOperationAction(ISD::UREM, MVT::v2i64, Legal);
899       setOperationAction(ISD::SREM, MVT::v2i64, Legal);
900       setOperationAction(ISD::UREM, MVT::v4i32, Legal);
901       setOperationAction(ISD::SREM, MVT::v4i32, Legal);
902       setOperationAction(ISD::UREM, MVT::v1i128, Legal);
903       setOperationAction(ISD::SREM, MVT::v1i128, Legal);
904       setOperationAction(ISD::UDIV, MVT::v1i128, Legal);
905       setOperationAction(ISD::SDIV, MVT::v1i128, Legal);
906       setOperationAction(ISD::ROTL, MVT::v1i128, Legal);
907     }
908 
909     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
910     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
911 
912     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
913     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
914 
915     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
916     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
917     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
918     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
919 
920     // Altivec does not contain unordered floating-point compare instructions
921     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
922     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
923     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
924     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
925 
926     if (Subtarget.hasVSX()) {
927       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
928       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
929       if (Subtarget.hasP8Vector()) {
930         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
931         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
932       }
933       if (Subtarget.hasDirectMove() && isPPC64) {
934         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
935         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
936         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
937         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
938         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
939         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
940         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
941         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
942       }
943       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
944 
945       // The nearbyint variants are not allowed to raise the inexact exception
946       // so we can only code-gen them with unsafe math.
947       if (TM.Options.UnsafeFPMath) {
948         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
949         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
950       }
951 
952       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
953       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
954       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
955       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
956       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
957       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
958       setOperationAction(ISD::FROUND, MVT::f64, Legal);
959       setOperationAction(ISD::FRINT, MVT::f64, Legal);
960 
961       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
962       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
963       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
964       setOperationAction(ISD::FROUND, MVT::f32, Legal);
965       setOperationAction(ISD::FRINT, MVT::f32, Legal);
966 
967       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
968       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
969 
970       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
971       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
972 
973       // Share the Altivec comparison restrictions.
974       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
975       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
976       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
977       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
978 
979       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
980       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
981 
982       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
983 
984       if (Subtarget.hasP8Vector())
985         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
986 
987       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
988 
989       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
990       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
991       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
992 
993       if (Subtarget.hasP8Altivec()) {
994         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
995         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
996         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
997 
998         // 128 bit shifts can be accomplished via 3 instructions for SHL and
999         // SRL, but not for SRA because of the instructions available:
1000         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
1001         // doing
1002         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
1003         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
1004         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1005 
1006         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
1007       }
1008       else {
1009         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
1010         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
1011         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
1012 
1013         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
1014 
1015         // VSX v2i64 only supports non-arithmetic operations.
1016         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
1017         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
1018       }
1019 
1020       if (Subtarget.isISA3_1())
1021         setOperationAction(ISD::SETCC, MVT::v1i128, Legal);
1022       else
1023         setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1024 
1025       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1026       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1027       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
1028       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1029 
1030       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
1031 
1032       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
1033       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
1034       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
1035       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
1036       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1037       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1038       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1039       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1040 
1041       // Custom handling for partial vectors of integers converted to
1042       // floating point. We already have optimal handling for v2i32 through
1043       // the DAG combine, so those aren't necessary.
1044       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
1045       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
1046       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
1047       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
1048       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
1049       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
1050       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
1051       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
1052       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
1053       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1054       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
1055       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1056       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
1057       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
1058       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
1059       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
1060 
1061       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1062       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1063       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1064       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1065       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1066       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
1067 
1068       if (Subtarget.hasDirectMove())
1069         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1070       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1071 
1072       // Handle constrained floating-point operations of vector.
1073       // The predictor is `hasVSX` because altivec instruction has
1074       // no exception but VSX vector instruction has.
1075       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1076       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1077       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1078       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1079       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1080       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1081       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1082       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1083       setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
1084       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1085       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
1086       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1087       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1088 
1089       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1090       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1091       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1092       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1093       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1094       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1095       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1096       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1097       setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
1098       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1099       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
1100       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1101       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1102 
1103       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1104     }
1105 
1106     if (Subtarget.hasP8Altivec()) {
1107       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1108       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1109     }
1110 
1111     if (Subtarget.hasP9Vector()) {
1112       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1113       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1114 
1115       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1116       // SRL, but not for SRA because of the instructions available:
1117       // VS{RL} and VS{RL}O.
1118       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1119       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1120       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1121 
1122       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1123       setOperationAction(ISD::FADD, MVT::f128, Legal);
1124       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1125       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1126       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1127       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1128       // No extending loads to f128 on PPC.
1129       for (MVT FPT : MVT::fp_valuetypes())
1130         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1131       setOperationAction(ISD::FMA, MVT::f128, Legal);
1132       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1133       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1134       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1135       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1136       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1137       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1138 
1139       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1140       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1141       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1142       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1143       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1144       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1145 
1146       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1147       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1148       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1149       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1150       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1151       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1152       // No implementation for these ops for PowerPC.
1153       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1154       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1155       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1156       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1157       setOperationAction(ISD::FREM, MVT::f128, Expand);
1158 
1159       // Handle constrained floating-point operations of fp128
1160       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1161       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1162       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1163       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1164       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1165       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1166       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1167       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1168       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1169       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1170       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1171       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1172       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1173       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1174       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1175       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1176       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1177       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1178       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1179       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1180     } else if (Subtarget.hasAltivec() && EnableSoftFP128) {
1181       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1182 
1183       for (MVT FPT : MVT::fp_valuetypes())
1184         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1185 
1186       setOperationAction(ISD::LOAD, MVT::f128, Promote);
1187       setOperationAction(ISD::STORE, MVT::f128, Promote);
1188 
1189       AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32);
1190       AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32);
1191 
1192       // Set FADD/FSUB as libcall to avoid the legalizer to expand the
1193       // fp_to_uint and int_to_fp.
1194       setOperationAction(ISD::FADD, MVT::f128, LibCall);
1195       setOperationAction(ISD::FSUB, MVT::f128, LibCall);
1196 
1197       setOperationAction(ISD::FMUL, MVT::f128, Expand);
1198       setOperationAction(ISD::FDIV, MVT::f128, Expand);
1199       setOperationAction(ISD::FNEG, MVT::f128, Expand);
1200       setOperationAction(ISD::FABS, MVT::f128, Expand);
1201       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1202       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1203       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1204       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1205       setOperationAction(ISD::FREM, MVT::f128, Expand);
1206       setOperationAction(ISD::FSQRT, MVT::f128, Expand);
1207       setOperationAction(ISD::FMA, MVT::f128, Expand);
1208       setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1209 
1210       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1211       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1212 
1213       // Expand the fp_extend if the target type is fp128.
1214       setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
1215       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand);
1216 
1217       // Expand the fp_round if the source type is fp128.
1218       for (MVT VT : {MVT::f32, MVT::f64}) {
1219         setOperationAction(ISD::FP_ROUND, VT, Custom);
1220         setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
1221       }
1222     }
1223 
1224     if (Subtarget.hasP9Altivec()) {
1225       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1226       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1227 
1228       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1229       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1230       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1231       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1232       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1233       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1234       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1235     }
1236   }
1237 
1238   if (Subtarget.pairedVectorMemops()) {
1239     addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass);
1240     setOperationAction(ISD::LOAD, MVT::v256i1, Custom);
1241     setOperationAction(ISD::STORE, MVT::v256i1, Custom);
1242   }
1243   if (Subtarget.hasMMA()) {
1244     addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass);
1245     setOperationAction(ISD::LOAD, MVT::v512i1, Custom);
1246     setOperationAction(ISD::STORE, MVT::v512i1, Custom);
1247     setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom);
1248   }
1249 
1250   if (Subtarget.has64BitSupport())
1251     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1252 
1253   if (Subtarget.isISA3_1())
1254     setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1255 
1256   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1257 
1258   if (!isPPC64) {
1259     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1260     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1261   }
1262 
1263   setBooleanContents(ZeroOrOneBooleanContent);
1264 
1265   if (Subtarget.hasAltivec()) {
1266     // Altivec instructions set fields to all zeros or all ones.
1267     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1268   }
1269 
1270   if (!isPPC64) {
1271     // These libcalls are not available in 32-bit.
1272     setLibcallName(RTLIB::SHL_I128, nullptr);
1273     setLibcallName(RTLIB::SRL_I128, nullptr);
1274     setLibcallName(RTLIB::SRA_I128, nullptr);
1275   }
1276 
1277   if (!isPPC64)
1278     setMaxAtomicSizeInBitsSupported(32);
1279 
1280   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1281 
1282   // We have target-specific dag combine patterns for the following nodes:
1283   setTargetDAGCombine(ISD::ADD);
1284   setTargetDAGCombine(ISD::SHL);
1285   setTargetDAGCombine(ISD::SRA);
1286   setTargetDAGCombine(ISD::SRL);
1287   setTargetDAGCombine(ISD::MUL);
1288   setTargetDAGCombine(ISD::FMA);
1289   setTargetDAGCombine(ISD::SINT_TO_FP);
1290   setTargetDAGCombine(ISD::BUILD_VECTOR);
1291   if (Subtarget.hasFPCVT())
1292     setTargetDAGCombine(ISD::UINT_TO_FP);
1293   setTargetDAGCombine(ISD::LOAD);
1294   setTargetDAGCombine(ISD::STORE);
1295   setTargetDAGCombine(ISD::BR_CC);
1296   if (Subtarget.useCRBits())
1297     setTargetDAGCombine(ISD::BRCOND);
1298   setTargetDAGCombine(ISD::BSWAP);
1299   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1300   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1301   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1302 
1303   setTargetDAGCombine(ISD::SIGN_EXTEND);
1304   setTargetDAGCombine(ISD::ZERO_EXTEND);
1305   setTargetDAGCombine(ISD::ANY_EXTEND);
1306 
1307   setTargetDAGCombine(ISD::TRUNCATE);
1308   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1309 
1310 
1311   if (Subtarget.useCRBits()) {
1312     setTargetDAGCombine(ISD::TRUNCATE);
1313     setTargetDAGCombine(ISD::SETCC);
1314     setTargetDAGCombine(ISD::SELECT_CC);
1315   }
1316 
1317   if (Subtarget.hasP9Altivec()) {
1318     setTargetDAGCombine(ISD::ABS);
1319     setTargetDAGCombine(ISD::VSELECT);
1320   }
1321 
1322   setLibcallName(RTLIB::LOG_F128, "logf128");
1323   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1324   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1325   setLibcallName(RTLIB::EXP_F128, "expf128");
1326   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1327   setLibcallName(RTLIB::SIN_F128, "sinf128");
1328   setLibcallName(RTLIB::COS_F128, "cosf128");
1329   setLibcallName(RTLIB::POW_F128, "powf128");
1330   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1331   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1332   setLibcallName(RTLIB::REM_F128, "fmodf128");
1333   setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
1334   setLibcallName(RTLIB::CEIL_F128, "ceilf128");
1335   setLibcallName(RTLIB::FLOOR_F128, "floorf128");
1336   setLibcallName(RTLIB::TRUNC_F128, "truncf128");
1337   setLibcallName(RTLIB::ROUND_F128, "roundf128");
1338   setLibcallName(RTLIB::LROUND_F128, "lroundf128");
1339   setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
1340   setLibcallName(RTLIB::RINT_F128, "rintf128");
1341   setLibcallName(RTLIB::LRINT_F128, "lrintf128");
1342   setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
1343   setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
1344   setLibcallName(RTLIB::FMA_F128, "fmaf128");
1345 
1346   // With 32 condition bits, we don't need to sink (and duplicate) compares
1347   // aggressively in CodeGenPrep.
1348   if (Subtarget.useCRBits()) {
1349     setHasMultipleConditionRegisters();
1350     setJumpIsExpensive();
1351   }
1352 
1353   setMinFunctionAlignment(Align(4));
1354 
1355   switch (Subtarget.getCPUDirective()) {
1356   default: break;
1357   case PPC::DIR_970:
1358   case PPC::DIR_A2:
1359   case PPC::DIR_E500:
1360   case PPC::DIR_E500mc:
1361   case PPC::DIR_E5500:
1362   case PPC::DIR_PWR4:
1363   case PPC::DIR_PWR5:
1364   case PPC::DIR_PWR5X:
1365   case PPC::DIR_PWR6:
1366   case PPC::DIR_PWR6X:
1367   case PPC::DIR_PWR7:
1368   case PPC::DIR_PWR8:
1369   case PPC::DIR_PWR9:
1370   case PPC::DIR_PWR10:
1371   case PPC::DIR_PWR_FUTURE:
1372     setPrefLoopAlignment(Align(16));
1373     setPrefFunctionAlignment(Align(16));
1374     break;
1375   }
1376 
1377   if (Subtarget.enableMachineScheduler())
1378     setSchedulingPreference(Sched::Source);
1379   else
1380     setSchedulingPreference(Sched::Hybrid);
1381 
1382   computeRegisterProperties(STI.getRegisterInfo());
1383 
1384   // The Freescale cores do better with aggressive inlining of memcpy and
1385   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1386   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1387       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1388     MaxStoresPerMemset = 32;
1389     MaxStoresPerMemsetOptSize = 16;
1390     MaxStoresPerMemcpy = 32;
1391     MaxStoresPerMemcpyOptSize = 8;
1392     MaxStoresPerMemmove = 32;
1393     MaxStoresPerMemmoveOptSize = 8;
1394   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1395     // The A2 also benefits from (very) aggressive inlining of memcpy and
1396     // friends. The overhead of a the function call, even when warm, can be
1397     // over one hundred cycles.
1398     MaxStoresPerMemset = 128;
1399     MaxStoresPerMemcpy = 128;
1400     MaxStoresPerMemmove = 128;
1401     MaxLoadsPerMemcmp = 128;
1402   } else {
1403     MaxLoadsPerMemcmp = 8;
1404     MaxLoadsPerMemcmpOptSize = 4;
1405   }
1406 
1407   IsStrictFPEnabled = true;
1408 
1409   // Let the subtarget (CPU) decide if a predictable select is more expensive
1410   // than the corresponding branch. This information is used in CGP to decide
1411   // when to convert selects into branches.
1412   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1413 }
1414 
1415 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1416 /// the desired ByVal argument alignment.
1417 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1418   if (MaxAlign == MaxMaxAlign)
1419     return;
1420   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1421     if (MaxMaxAlign >= 32 &&
1422         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1423       MaxAlign = Align(32);
1424     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1425              MaxAlign < 16)
1426       MaxAlign = Align(16);
1427   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1428     Align EltAlign;
1429     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1430     if (EltAlign > MaxAlign)
1431       MaxAlign = EltAlign;
1432   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1433     for (auto *EltTy : STy->elements()) {
1434       Align EltAlign;
1435       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1436       if (EltAlign > MaxAlign)
1437         MaxAlign = EltAlign;
1438       if (MaxAlign == MaxMaxAlign)
1439         break;
1440     }
1441   }
1442 }
1443 
1444 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1445 /// function arguments in the caller parameter area.
1446 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1447                                                   const DataLayout &DL) const {
1448   // 16byte and wider vectors are passed on 16byte boundary.
1449   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1450   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1451   if (Subtarget.hasAltivec())
1452     getMaxByValAlign(Ty, Alignment, Align(16));
1453   return Alignment.value();
1454 }
1455 
1456 bool PPCTargetLowering::useSoftFloat() const {
1457   return Subtarget.useSoftFloat();
1458 }
1459 
1460 bool PPCTargetLowering::hasSPE() const {
1461   return Subtarget.hasSPE();
1462 }
1463 
1464 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1465   return VT.isScalarInteger();
1466 }
1467 
1468 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1469   switch ((PPCISD::NodeType)Opcode) {
1470   case PPCISD::FIRST_NUMBER:    break;
1471   case PPCISD::FSEL:            return "PPCISD::FSEL";
1472   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1473   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1474   case PPCISD::FCFID:           return "PPCISD::FCFID";
1475   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1476   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1477   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1478   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1479   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1480   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1481   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1482   case PPCISD::FP_TO_UINT_IN_VSR:
1483                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1484   case PPCISD::FP_TO_SINT_IN_VSR:
1485                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1486   case PPCISD::FRE:             return "PPCISD::FRE";
1487   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1488   case PPCISD::FTSQRT:
1489     return "PPCISD::FTSQRT";
1490   case PPCISD::FSQRT:
1491     return "PPCISD::FSQRT";
1492   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1493   case PPCISD::VPERM:           return "PPCISD::VPERM";
1494   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1495   case PPCISD::XXSPLTI_SP_TO_DP:
1496     return "PPCISD::XXSPLTI_SP_TO_DP";
1497   case PPCISD::XXSPLTI32DX:
1498     return "PPCISD::XXSPLTI32DX";
1499   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1500   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1501   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1502   case PPCISD::CMPB:            return "PPCISD::CMPB";
1503   case PPCISD::Hi:              return "PPCISD::Hi";
1504   case PPCISD::Lo:              return "PPCISD::Lo";
1505   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1506   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1507   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1508   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1509   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1510   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1511   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1512   case PPCISD::SRL:             return "PPCISD::SRL";
1513   case PPCISD::SRA:             return "PPCISD::SRA";
1514   case PPCISD::SHL:             return "PPCISD::SHL";
1515   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1516   case PPCISD::CALL:            return "PPCISD::CALL";
1517   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1518   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1519   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1520   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1521   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1522   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1523   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1524   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1525   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1526   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1527   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1528   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1529   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1530   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1531   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1532   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1533     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1534   case PPCISD::ANDI_rec_1_EQ_BIT:
1535     return "PPCISD::ANDI_rec_1_EQ_BIT";
1536   case PPCISD::ANDI_rec_1_GT_BIT:
1537     return "PPCISD::ANDI_rec_1_GT_BIT";
1538   case PPCISD::VCMP:            return "PPCISD::VCMP";
1539   case PPCISD::VCMP_rec:        return "PPCISD::VCMP_rec";
1540   case PPCISD::LBRX:            return "PPCISD::LBRX";
1541   case PPCISD::STBRX:           return "PPCISD::STBRX";
1542   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1543   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1544   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1545   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1546   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1547   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1548   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1549   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1550   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1551   case PPCISD::ST_VSR_SCAL_INT:
1552                                 return "PPCISD::ST_VSR_SCAL_INT";
1553   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1554   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1555   case PPCISD::BDZ:             return "PPCISD::BDZ";
1556   case PPCISD::MFFS:            return "PPCISD::MFFS";
1557   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1558   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1559   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1560   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1561   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1562   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1563   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1564   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1565   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1566   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1567   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1568   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1569   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1570   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1571   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1572   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1573   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1574   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1575   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1576   case PPCISD::PADDI_DTPREL:
1577     return "PPCISD::PADDI_DTPREL";
1578   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1579   case PPCISD::SC:              return "PPCISD::SC";
1580   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1581   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1582   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1583   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1584   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1585   case PPCISD::VABSD:           return "PPCISD::VABSD";
1586   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1587   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1588   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1589   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1590   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1591   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1592   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1593   case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
1594     return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1595   case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR:
1596     return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1597   case PPCISD::ACC_BUILD:       return "PPCISD::ACC_BUILD";
1598   case PPCISD::PAIR_BUILD:      return "PPCISD::PAIR_BUILD";
1599   case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG";
1600   case PPCISD::XXMFACC:         return "PPCISD::XXMFACC";
1601   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1602   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1603   case PPCISD::STRICT_FADDRTZ:
1604     return "PPCISD::STRICT_FADDRTZ";
1605   case PPCISD::STRICT_FCTIDZ:
1606     return "PPCISD::STRICT_FCTIDZ";
1607   case PPCISD::STRICT_FCTIWZ:
1608     return "PPCISD::STRICT_FCTIWZ";
1609   case PPCISD::STRICT_FCTIDUZ:
1610     return "PPCISD::STRICT_FCTIDUZ";
1611   case PPCISD::STRICT_FCTIWUZ:
1612     return "PPCISD::STRICT_FCTIWUZ";
1613   case PPCISD::STRICT_FCFID:
1614     return "PPCISD::STRICT_FCFID";
1615   case PPCISD::STRICT_FCFIDU:
1616     return "PPCISD::STRICT_FCFIDU";
1617   case PPCISD::STRICT_FCFIDS:
1618     return "PPCISD::STRICT_FCFIDS";
1619   case PPCISD::STRICT_FCFIDUS:
1620     return "PPCISD::STRICT_FCFIDUS";
1621   case PPCISD::LXVRZX:          return "PPCISD::LXVRZX";
1622   }
1623   return nullptr;
1624 }
1625 
1626 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1627                                           EVT VT) const {
1628   if (!VT.isVector())
1629     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1630 
1631   return VT.changeVectorElementTypeToInteger();
1632 }
1633 
1634 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1635   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1636   return true;
1637 }
1638 
1639 //===----------------------------------------------------------------------===//
1640 // Node matching predicates, for use by the tblgen matching code.
1641 //===----------------------------------------------------------------------===//
1642 
1643 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1644 static bool isFloatingPointZero(SDValue Op) {
1645   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1646     return CFP->getValueAPF().isZero();
1647   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1648     // Maybe this has already been legalized into the constant pool?
1649     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1650       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1651         return CFP->getValueAPF().isZero();
1652   }
1653   return false;
1654 }
1655 
1656 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1657 /// true if Op is undef or if it matches the specified value.
1658 static bool isConstantOrUndef(int Op, int Val) {
1659   return Op < 0 || Op == Val;
1660 }
1661 
1662 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1663 /// VPKUHUM instruction.
1664 /// The ShuffleKind distinguishes between big-endian operations with
1665 /// two different inputs (0), either-endian operations with two identical
1666 /// inputs (1), and little-endian operations with two different inputs (2).
1667 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1668 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1669                                SelectionDAG &DAG) {
1670   bool IsLE = DAG.getDataLayout().isLittleEndian();
1671   if (ShuffleKind == 0) {
1672     if (IsLE)
1673       return false;
1674     for (unsigned i = 0; i != 16; ++i)
1675       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1676         return false;
1677   } else if (ShuffleKind == 2) {
1678     if (!IsLE)
1679       return false;
1680     for (unsigned i = 0; i != 16; ++i)
1681       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1682         return false;
1683   } else if (ShuffleKind == 1) {
1684     unsigned j = IsLE ? 0 : 1;
1685     for (unsigned i = 0; i != 8; ++i)
1686       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1687           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1688         return false;
1689   }
1690   return true;
1691 }
1692 
1693 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1694 /// VPKUWUM instruction.
1695 /// The ShuffleKind distinguishes between big-endian operations with
1696 /// two different inputs (0), either-endian operations with two identical
1697 /// inputs (1), and little-endian operations with two different inputs (2).
1698 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1699 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1700                                SelectionDAG &DAG) {
1701   bool IsLE = DAG.getDataLayout().isLittleEndian();
1702   if (ShuffleKind == 0) {
1703     if (IsLE)
1704       return false;
1705     for (unsigned i = 0; i != 16; i += 2)
1706       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1707           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1708         return false;
1709   } else if (ShuffleKind == 2) {
1710     if (!IsLE)
1711       return false;
1712     for (unsigned i = 0; i != 16; i += 2)
1713       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1714           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1715         return false;
1716   } else if (ShuffleKind == 1) {
1717     unsigned j = IsLE ? 0 : 2;
1718     for (unsigned i = 0; i != 8; i += 2)
1719       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1720           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1721           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1722           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1723         return false;
1724   }
1725   return true;
1726 }
1727 
1728 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1729 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1730 /// current subtarget.
1731 ///
1732 /// The ShuffleKind distinguishes between big-endian operations with
1733 /// two different inputs (0), either-endian operations with two identical
1734 /// inputs (1), and little-endian operations with two different inputs (2).
1735 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1736 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1737                                SelectionDAG &DAG) {
1738   const PPCSubtarget& Subtarget =
1739       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1740   if (!Subtarget.hasP8Vector())
1741     return false;
1742 
1743   bool IsLE = DAG.getDataLayout().isLittleEndian();
1744   if (ShuffleKind == 0) {
1745     if (IsLE)
1746       return false;
1747     for (unsigned i = 0; i != 16; i += 4)
1748       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1749           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1750           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1751           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1752         return false;
1753   } else if (ShuffleKind == 2) {
1754     if (!IsLE)
1755       return false;
1756     for (unsigned i = 0; i != 16; i += 4)
1757       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1758           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1759           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1760           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1761         return false;
1762   } else if (ShuffleKind == 1) {
1763     unsigned j = IsLE ? 0 : 4;
1764     for (unsigned i = 0; i != 8; i += 4)
1765       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1766           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1767           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1768           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1769           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1770           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1771           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1772           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1773         return false;
1774   }
1775   return true;
1776 }
1777 
1778 /// isVMerge - Common function, used to match vmrg* shuffles.
1779 ///
1780 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1781                      unsigned LHSStart, unsigned RHSStart) {
1782   if (N->getValueType(0) != MVT::v16i8)
1783     return false;
1784   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1785          "Unsupported merge size!");
1786 
1787   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1788     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1789       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1790                              LHSStart+j+i*UnitSize) ||
1791           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1792                              RHSStart+j+i*UnitSize))
1793         return false;
1794     }
1795   return true;
1796 }
1797 
1798 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1799 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1800 /// The ShuffleKind distinguishes between big-endian merges with two
1801 /// different inputs (0), either-endian merges with two identical inputs (1),
1802 /// and little-endian merges with two different inputs (2).  For the latter,
1803 /// the input operands are swapped (see PPCInstrAltivec.td).
1804 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1805                              unsigned ShuffleKind, SelectionDAG &DAG) {
1806   if (DAG.getDataLayout().isLittleEndian()) {
1807     if (ShuffleKind == 1) // unary
1808       return isVMerge(N, UnitSize, 0, 0);
1809     else if (ShuffleKind == 2) // swapped
1810       return isVMerge(N, UnitSize, 0, 16);
1811     else
1812       return false;
1813   } else {
1814     if (ShuffleKind == 1) // unary
1815       return isVMerge(N, UnitSize, 8, 8);
1816     else if (ShuffleKind == 0) // normal
1817       return isVMerge(N, UnitSize, 8, 24);
1818     else
1819       return false;
1820   }
1821 }
1822 
1823 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1824 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1825 /// The ShuffleKind distinguishes between big-endian merges with two
1826 /// different inputs (0), either-endian merges with two identical inputs (1),
1827 /// and little-endian merges with two different inputs (2).  For the latter,
1828 /// the input operands are swapped (see PPCInstrAltivec.td).
1829 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1830                              unsigned ShuffleKind, SelectionDAG &DAG) {
1831   if (DAG.getDataLayout().isLittleEndian()) {
1832     if (ShuffleKind == 1) // unary
1833       return isVMerge(N, UnitSize, 8, 8);
1834     else if (ShuffleKind == 2) // swapped
1835       return isVMerge(N, UnitSize, 8, 24);
1836     else
1837       return false;
1838   } else {
1839     if (ShuffleKind == 1) // unary
1840       return isVMerge(N, UnitSize, 0, 0);
1841     else if (ShuffleKind == 0) // normal
1842       return isVMerge(N, UnitSize, 0, 16);
1843     else
1844       return false;
1845   }
1846 }
1847 
1848 /**
1849  * Common function used to match vmrgew and vmrgow shuffles
1850  *
1851  * The indexOffset determines whether to look for even or odd words in
1852  * the shuffle mask. This is based on the of the endianness of the target
1853  * machine.
1854  *   - Little Endian:
1855  *     - Use offset of 0 to check for odd elements
1856  *     - Use offset of 4 to check for even elements
1857  *   - Big Endian:
1858  *     - Use offset of 0 to check for even elements
1859  *     - Use offset of 4 to check for odd elements
1860  * A detailed description of the vector element ordering for little endian and
1861  * big endian can be found at
1862  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1863  * Targeting your applications - what little endian and big endian IBM XL C/C++
1864  * compiler differences mean to you
1865  *
1866  * The mask to the shuffle vector instruction specifies the indices of the
1867  * elements from the two input vectors to place in the result. The elements are
1868  * numbered in array-access order, starting with the first vector. These vectors
1869  * are always of type v16i8, thus each vector will contain 16 elements of size
1870  * 8. More info on the shuffle vector can be found in the
1871  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1872  * Language Reference.
1873  *
1874  * The RHSStartValue indicates whether the same input vectors are used (unary)
1875  * or two different input vectors are used, based on the following:
1876  *   - If the instruction uses the same vector for both inputs, the range of the
1877  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1878  *     be 0.
1879  *   - If the instruction has two different vectors then the range of the
1880  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1881  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1882  *     to 31 specify elements in the second vector).
1883  *
1884  * \param[in] N The shuffle vector SD Node to analyze
1885  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1886  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1887  * vector to the shuffle_vector instruction
1888  * \return true iff this shuffle vector represents an even or odd word merge
1889  */
1890 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1891                      unsigned RHSStartValue) {
1892   if (N->getValueType(0) != MVT::v16i8)
1893     return false;
1894 
1895   for (unsigned i = 0; i < 2; ++i)
1896     for (unsigned j = 0; j < 4; ++j)
1897       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1898                              i*RHSStartValue+j+IndexOffset) ||
1899           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1900                              i*RHSStartValue+j+IndexOffset+8))
1901         return false;
1902   return true;
1903 }
1904 
1905 /**
1906  * Determine if the specified shuffle mask is suitable for the vmrgew or
1907  * vmrgow instructions.
1908  *
1909  * \param[in] N The shuffle vector SD Node to analyze
1910  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1911  * \param[in] ShuffleKind Identify the type of merge:
1912  *   - 0 = big-endian merge with two different inputs;
1913  *   - 1 = either-endian merge with two identical inputs;
1914  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1915  *     little-endian merges).
1916  * \param[in] DAG The current SelectionDAG
1917  * \return true iff this shuffle mask
1918  */
1919 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1920                               unsigned ShuffleKind, SelectionDAG &DAG) {
1921   if (DAG.getDataLayout().isLittleEndian()) {
1922     unsigned indexOffset = CheckEven ? 4 : 0;
1923     if (ShuffleKind == 1) // Unary
1924       return isVMerge(N, indexOffset, 0);
1925     else if (ShuffleKind == 2) // swapped
1926       return isVMerge(N, indexOffset, 16);
1927     else
1928       return false;
1929   }
1930   else {
1931     unsigned indexOffset = CheckEven ? 0 : 4;
1932     if (ShuffleKind == 1) // Unary
1933       return isVMerge(N, indexOffset, 0);
1934     else if (ShuffleKind == 0) // Normal
1935       return isVMerge(N, indexOffset, 16);
1936     else
1937       return false;
1938   }
1939   return false;
1940 }
1941 
1942 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1943 /// amount, otherwise return -1.
1944 /// The ShuffleKind distinguishes between big-endian operations with two
1945 /// different inputs (0), either-endian operations with two identical inputs
1946 /// (1), and little-endian operations with two different inputs (2).  For the
1947 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1948 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1949                              SelectionDAG &DAG) {
1950   if (N->getValueType(0) != MVT::v16i8)
1951     return -1;
1952 
1953   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1954 
1955   // Find the first non-undef value in the shuffle mask.
1956   unsigned i;
1957   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1958     /*search*/;
1959 
1960   if (i == 16) return -1;  // all undef.
1961 
1962   // Otherwise, check to see if the rest of the elements are consecutively
1963   // numbered from this value.
1964   unsigned ShiftAmt = SVOp->getMaskElt(i);
1965   if (ShiftAmt < i) return -1;
1966 
1967   ShiftAmt -= i;
1968   bool isLE = DAG.getDataLayout().isLittleEndian();
1969 
1970   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1971     // Check the rest of the elements to see if they are consecutive.
1972     for (++i; i != 16; ++i)
1973       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1974         return -1;
1975   } else if (ShuffleKind == 1) {
1976     // Check the rest of the elements to see if they are consecutive.
1977     for (++i; i != 16; ++i)
1978       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1979         return -1;
1980   } else
1981     return -1;
1982 
1983   if (isLE)
1984     ShiftAmt = 16 - ShiftAmt;
1985 
1986   return ShiftAmt;
1987 }
1988 
1989 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1990 /// specifies a splat of a single element that is suitable for input to
1991 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1992 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1993   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1994          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1995 
1996   // The consecutive indices need to specify an element, not part of two
1997   // different elements.  So abandon ship early if this isn't the case.
1998   if (N->getMaskElt(0) % EltSize != 0)
1999     return false;
2000 
2001   // This is a splat operation if each element of the permute is the same, and
2002   // if the value doesn't reference the second vector.
2003   unsigned ElementBase = N->getMaskElt(0);
2004 
2005   // FIXME: Handle UNDEF elements too!
2006   if (ElementBase >= 16)
2007     return false;
2008 
2009   // Check that the indices are consecutive, in the case of a multi-byte element
2010   // splatted with a v16i8 mask.
2011   for (unsigned i = 1; i != EltSize; ++i)
2012     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
2013       return false;
2014 
2015   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2016     if (N->getMaskElt(i) < 0) continue;
2017     for (unsigned j = 0; j != EltSize; ++j)
2018       if (N->getMaskElt(i+j) != N->getMaskElt(j))
2019         return false;
2020   }
2021   return true;
2022 }
2023 
2024 /// Check that the mask is shuffling N byte elements. Within each N byte
2025 /// element of the mask, the indices could be either in increasing or
2026 /// decreasing order as long as they are consecutive.
2027 /// \param[in] N the shuffle vector SD Node to analyze
2028 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
2029 /// Word/DoubleWord/QuadWord).
2030 /// \param[in] StepLen the delta indices number among the N byte element, if
2031 /// the mask is in increasing/decreasing order then it is 1/-1.
2032 /// \return true iff the mask is shuffling N byte elements.
2033 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
2034                                    int StepLen) {
2035   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2036          "Unexpected element width.");
2037   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
2038 
2039   unsigned NumOfElem = 16 / Width;
2040   unsigned MaskVal[16]; //  Width is never greater than 16
2041   for (unsigned i = 0; i < NumOfElem; ++i) {
2042     MaskVal[0] = N->getMaskElt(i * Width);
2043     if ((StepLen == 1) && (MaskVal[0] % Width)) {
2044       return false;
2045     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2046       return false;
2047     }
2048 
2049     for (unsigned int j = 1; j < Width; ++j) {
2050       MaskVal[j] = N->getMaskElt(i * Width + j);
2051       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2052         return false;
2053       }
2054     }
2055   }
2056 
2057   return true;
2058 }
2059 
2060 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2061                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2062   if (!isNByteElemShuffleMask(N, 4, 1))
2063     return false;
2064 
2065   // Now we look at mask elements 0,4,8,12
2066   unsigned M0 = N->getMaskElt(0) / 4;
2067   unsigned M1 = N->getMaskElt(4) / 4;
2068   unsigned M2 = N->getMaskElt(8) / 4;
2069   unsigned M3 = N->getMaskElt(12) / 4;
2070   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2071   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2072 
2073   // Below, let H and L be arbitrary elements of the shuffle mask
2074   // where H is in the range [4,7] and L is in the range [0,3].
2075   // H, 1, 2, 3 or L, 5, 6, 7
2076   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2077       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2078     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2079     InsertAtByte = IsLE ? 12 : 0;
2080     Swap = M0 < 4;
2081     return true;
2082   }
2083   // 0, H, 2, 3 or 4, L, 6, 7
2084   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2085       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2086     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2087     InsertAtByte = IsLE ? 8 : 4;
2088     Swap = M1 < 4;
2089     return true;
2090   }
2091   // 0, 1, H, 3 or 4, 5, L, 7
2092   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2093       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2094     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2095     InsertAtByte = IsLE ? 4 : 8;
2096     Swap = M2 < 4;
2097     return true;
2098   }
2099   // 0, 1, 2, H or 4, 5, 6, L
2100   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2101       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2102     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2103     InsertAtByte = IsLE ? 0 : 12;
2104     Swap = M3 < 4;
2105     return true;
2106   }
2107 
2108   // If both vector operands for the shuffle are the same vector, the mask will
2109   // contain only elements from the first one and the second one will be undef.
2110   if (N->getOperand(1).isUndef()) {
2111     ShiftElts = 0;
2112     Swap = true;
2113     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2114     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2115       InsertAtByte = IsLE ? 12 : 0;
2116       return true;
2117     }
2118     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2119       InsertAtByte = IsLE ? 8 : 4;
2120       return true;
2121     }
2122     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2123       InsertAtByte = IsLE ? 4 : 8;
2124       return true;
2125     }
2126     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2127       InsertAtByte = IsLE ? 0 : 12;
2128       return true;
2129     }
2130   }
2131 
2132   return false;
2133 }
2134 
2135 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2136                                bool &Swap, bool IsLE) {
2137   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2138   // Ensure each byte index of the word is consecutive.
2139   if (!isNByteElemShuffleMask(N, 4, 1))
2140     return false;
2141 
2142   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2143   unsigned M0 = N->getMaskElt(0) / 4;
2144   unsigned M1 = N->getMaskElt(4) / 4;
2145   unsigned M2 = N->getMaskElt(8) / 4;
2146   unsigned M3 = N->getMaskElt(12) / 4;
2147 
2148   // If both vector operands for the shuffle are the same vector, the mask will
2149   // contain only elements from the first one and the second one will be undef.
2150   if (N->getOperand(1).isUndef()) {
2151     assert(M0 < 4 && "Indexing into an undef vector?");
2152     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2153       return false;
2154 
2155     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2156     Swap = false;
2157     return true;
2158   }
2159 
2160   // Ensure each word index of the ShuffleVector Mask is consecutive.
2161   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2162     return false;
2163 
2164   if (IsLE) {
2165     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2166       // Input vectors don't need to be swapped if the leading element
2167       // of the result is one of the 3 left elements of the second vector
2168       // (or if there is no shift to be done at all).
2169       Swap = false;
2170       ShiftElts = (8 - M0) % 8;
2171     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2172       // Input vectors need to be swapped if the leading element
2173       // of the result is one of the 3 left elements of the first vector
2174       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2175       Swap = true;
2176       ShiftElts = (4 - M0) % 4;
2177     }
2178 
2179     return true;
2180   } else {                                          // BE
2181     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2182       // Input vectors don't need to be swapped if the leading element
2183       // of the result is one of the 4 elements of the first vector.
2184       Swap = false;
2185       ShiftElts = M0;
2186     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2187       // Input vectors need to be swapped if the leading element
2188       // of the result is one of the 4 elements of the right vector.
2189       Swap = true;
2190       ShiftElts = M0 - 4;
2191     }
2192 
2193     return true;
2194   }
2195 }
2196 
2197 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2198   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2199 
2200   if (!isNByteElemShuffleMask(N, Width, -1))
2201     return false;
2202 
2203   for (int i = 0; i < 16; i += Width)
2204     if (N->getMaskElt(i) != i + Width - 1)
2205       return false;
2206 
2207   return true;
2208 }
2209 
2210 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2211   return isXXBRShuffleMaskHelper(N, 2);
2212 }
2213 
2214 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2215   return isXXBRShuffleMaskHelper(N, 4);
2216 }
2217 
2218 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2219   return isXXBRShuffleMaskHelper(N, 8);
2220 }
2221 
2222 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2223   return isXXBRShuffleMaskHelper(N, 16);
2224 }
2225 
2226 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2227 /// if the inputs to the instruction should be swapped and set \p DM to the
2228 /// value for the immediate.
2229 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2230 /// AND element 0 of the result comes from the first input (LE) or second input
2231 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2232 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2233 /// mask.
2234 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2235                                bool &Swap, bool IsLE) {
2236   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2237 
2238   // Ensure each byte index of the double word is consecutive.
2239   if (!isNByteElemShuffleMask(N, 8, 1))
2240     return false;
2241 
2242   unsigned M0 = N->getMaskElt(0) / 8;
2243   unsigned M1 = N->getMaskElt(8) / 8;
2244   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2245 
2246   // If both vector operands for the shuffle are the same vector, the mask will
2247   // contain only elements from the first one and the second one will be undef.
2248   if (N->getOperand(1).isUndef()) {
2249     if ((M0 | M1) < 2) {
2250       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2251       Swap = false;
2252       return true;
2253     } else
2254       return false;
2255   }
2256 
2257   if (IsLE) {
2258     if (M0 > 1 && M1 < 2) {
2259       Swap = false;
2260     } else if (M0 < 2 && M1 > 1) {
2261       M0 = (M0 + 2) % 4;
2262       M1 = (M1 + 2) % 4;
2263       Swap = true;
2264     } else
2265       return false;
2266 
2267     // Note: if control flow comes here that means Swap is already set above
2268     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2269     return true;
2270   } else { // BE
2271     if (M0 < 2 && M1 > 1) {
2272       Swap = false;
2273     } else if (M0 > 1 && M1 < 2) {
2274       M0 = (M0 + 2) % 4;
2275       M1 = (M1 + 2) % 4;
2276       Swap = true;
2277     } else
2278       return false;
2279 
2280     // Note: if control flow comes here that means Swap is already set above
2281     DM = (M0 << 1) + (M1 & 1);
2282     return true;
2283   }
2284 }
2285 
2286 
2287 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2288 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2289 /// elements are counted from the left of the vector register).
2290 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2291                                          SelectionDAG &DAG) {
2292   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2293   assert(isSplatShuffleMask(SVOp, EltSize));
2294   if (DAG.getDataLayout().isLittleEndian())
2295     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2296   else
2297     return SVOp->getMaskElt(0) / EltSize;
2298 }
2299 
2300 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2301 /// by using a vspltis[bhw] instruction of the specified element size, return
2302 /// the constant being splatted.  The ByteSize field indicates the number of
2303 /// bytes of each element [124] -> [bhw].
2304 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2305   SDValue OpVal(nullptr, 0);
2306 
2307   // If ByteSize of the splat is bigger than the element size of the
2308   // build_vector, then we have a case where we are checking for a splat where
2309   // multiple elements of the buildvector are folded together into a single
2310   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2311   unsigned EltSize = 16/N->getNumOperands();
2312   if (EltSize < ByteSize) {
2313     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2314     SDValue UniquedVals[4];
2315     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2316 
2317     // See if all of the elements in the buildvector agree across.
2318     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2319       if (N->getOperand(i).isUndef()) continue;
2320       // If the element isn't a constant, bail fully out.
2321       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2322 
2323       if (!UniquedVals[i&(Multiple-1)].getNode())
2324         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2325       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2326         return SDValue();  // no match.
2327     }
2328 
2329     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2330     // either constant or undef values that are identical for each chunk.  See
2331     // if these chunks can form into a larger vspltis*.
2332 
2333     // Check to see if all of the leading entries are either 0 or -1.  If
2334     // neither, then this won't fit into the immediate field.
2335     bool LeadingZero = true;
2336     bool LeadingOnes = true;
2337     for (unsigned i = 0; i != Multiple-1; ++i) {
2338       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2339 
2340       LeadingZero &= isNullConstant(UniquedVals[i]);
2341       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2342     }
2343     // Finally, check the least significant entry.
2344     if (LeadingZero) {
2345       if (!UniquedVals[Multiple-1].getNode())
2346         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2347       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2348       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2349         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2350     }
2351     if (LeadingOnes) {
2352       if (!UniquedVals[Multiple-1].getNode())
2353         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2354       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2355       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2356         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2357     }
2358 
2359     return SDValue();
2360   }
2361 
2362   // Check to see if this buildvec has a single non-undef value in its elements.
2363   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2364     if (N->getOperand(i).isUndef()) continue;
2365     if (!OpVal.getNode())
2366       OpVal = N->getOperand(i);
2367     else if (OpVal != N->getOperand(i))
2368       return SDValue();
2369   }
2370 
2371   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2372 
2373   unsigned ValSizeInBytes = EltSize;
2374   uint64_t Value = 0;
2375   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2376     Value = CN->getZExtValue();
2377   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2378     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2379     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2380   }
2381 
2382   // If the splat value is larger than the element value, then we can never do
2383   // this splat.  The only case that we could fit the replicated bits into our
2384   // immediate field for would be zero, and we prefer to use vxor for it.
2385   if (ValSizeInBytes < ByteSize) return SDValue();
2386 
2387   // If the element value is larger than the splat value, check if it consists
2388   // of a repeated bit pattern of size ByteSize.
2389   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2390     return SDValue();
2391 
2392   // Properly sign extend the value.
2393   int MaskVal = SignExtend32(Value, ByteSize * 8);
2394 
2395   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2396   if (MaskVal == 0) return SDValue();
2397 
2398   // Finally, if this value fits in a 5 bit sext field, return it
2399   if (SignExtend32<5>(MaskVal) == MaskVal)
2400     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2401   return SDValue();
2402 }
2403 
2404 //===----------------------------------------------------------------------===//
2405 //  Addressing Mode Selection
2406 //===----------------------------------------------------------------------===//
2407 
2408 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2409 /// or 64-bit immediate, and if the value can be accurately represented as a
2410 /// sign extension from a 16-bit value.  If so, this returns true and the
2411 /// immediate.
2412 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2413   if (!isa<ConstantSDNode>(N))
2414     return false;
2415 
2416   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2417   if (N->getValueType(0) == MVT::i32)
2418     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2419   else
2420     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2421 }
2422 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2423   return isIntS16Immediate(Op.getNode(), Imm);
2424 }
2425 
2426 
2427 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2428 /// be represented as an indexed [r+r] operation.
2429 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2430                                                SDValue &Index,
2431                                                SelectionDAG &DAG) const {
2432   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2433       UI != E; ++UI) {
2434     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2435       if (Memop->getMemoryVT() == MVT::f64) {
2436           Base = N.getOperand(0);
2437           Index = N.getOperand(1);
2438           return true;
2439       }
2440     }
2441   }
2442   return false;
2443 }
2444 
2445 /// isIntS34Immediate - This method tests if value of node given can be
2446 /// accurately represented as a sign extension from a 34-bit value.  If so,
2447 /// this returns true and the immediate.
2448 bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
2449   if (!isa<ConstantSDNode>(N))
2450     return false;
2451 
2452   Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2453   return isInt<34>(Imm);
2454 }
2455 bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
2456   return isIntS34Immediate(Op.getNode(), Imm);
2457 }
2458 
2459 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2460 /// can be represented as an indexed [r+r] operation.  Returns false if it
2461 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2462 /// non-zero and N can be represented by a base register plus a signed 16-bit
2463 /// displacement, make a more precise judgement by checking (displacement % \p
2464 /// EncodingAlignment).
2465 bool PPCTargetLowering::SelectAddressRegReg(
2466     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2467     MaybeAlign EncodingAlignment) const {
2468   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2469   // a [pc+imm].
2470   if (SelectAddressPCRel(N, Base))
2471     return false;
2472 
2473   int16_t Imm = 0;
2474   if (N.getOpcode() == ISD::ADD) {
2475     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2476     // SPE load/store can only handle 8-bit offsets.
2477     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2478         return true;
2479     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2480         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2481       return false; // r+i
2482     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2483       return false;    // r+i
2484 
2485     Base = N.getOperand(0);
2486     Index = N.getOperand(1);
2487     return true;
2488   } else if (N.getOpcode() == ISD::OR) {
2489     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2490         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2491       return false; // r+i can fold it if we can.
2492 
2493     // If this is an or of disjoint bitfields, we can codegen this as an add
2494     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2495     // disjoint.
2496     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2497 
2498     if (LHSKnown.Zero.getBoolValue()) {
2499       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2500       // If all of the bits are known zero on the LHS or RHS, the add won't
2501       // carry.
2502       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2503         Base = N.getOperand(0);
2504         Index = N.getOperand(1);
2505         return true;
2506       }
2507     }
2508   }
2509 
2510   return false;
2511 }
2512 
2513 // If we happen to be doing an i64 load or store into a stack slot that has
2514 // less than a 4-byte alignment, then the frame-index elimination may need to
2515 // use an indexed load or store instruction (because the offset may not be a
2516 // multiple of 4). The extra register needed to hold the offset comes from the
2517 // register scavenger, and it is possible that the scavenger will need to use
2518 // an emergency spill slot. As a result, we need to make sure that a spill slot
2519 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2520 // stack slot.
2521 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2522   // FIXME: This does not handle the LWA case.
2523   if (VT != MVT::i64)
2524     return;
2525 
2526   // NOTE: We'll exclude negative FIs here, which come from argument
2527   // lowering, because there are no known test cases triggering this problem
2528   // using packed structures (or similar). We can remove this exclusion if
2529   // we find such a test case. The reason why this is so test-case driven is
2530   // because this entire 'fixup' is only to prevent crashes (from the
2531   // register scavenger) on not-really-valid inputs. For example, if we have:
2532   //   %a = alloca i1
2533   //   %b = bitcast i1* %a to i64*
2534   //   store i64* a, i64 b
2535   // then the store should really be marked as 'align 1', but is not. If it
2536   // were marked as 'align 1' then the indexed form would have been
2537   // instruction-selected initially, and the problem this 'fixup' is preventing
2538   // won't happen regardless.
2539   if (FrameIdx < 0)
2540     return;
2541 
2542   MachineFunction &MF = DAG.getMachineFunction();
2543   MachineFrameInfo &MFI = MF.getFrameInfo();
2544 
2545   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2546     return;
2547 
2548   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2549   FuncInfo->setHasNonRISpills();
2550 }
2551 
2552 /// Returns true if the address N can be represented by a base register plus
2553 /// a signed 16-bit displacement [r+imm], and if it is not better
2554 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2555 /// displacements that are multiples of that value.
2556 bool PPCTargetLowering::SelectAddressRegImm(
2557     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2558     MaybeAlign EncodingAlignment) const {
2559   // FIXME dl should come from parent load or store, not from address
2560   SDLoc dl(N);
2561 
2562   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2563   // a [pc+imm].
2564   if (SelectAddressPCRel(N, Base))
2565     return false;
2566 
2567   // If this can be more profitably realized as r+r, fail.
2568   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2569     return false;
2570 
2571   if (N.getOpcode() == ISD::ADD) {
2572     int16_t imm = 0;
2573     if (isIntS16Immediate(N.getOperand(1), imm) &&
2574         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2575       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2576       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2577         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2578         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2579       } else {
2580         Base = N.getOperand(0);
2581       }
2582       return true; // [r+i]
2583     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2584       // Match LOAD (ADD (X, Lo(G))).
2585       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2586              && "Cannot handle constant offsets yet!");
2587       Disp = N.getOperand(1).getOperand(0);  // The global address.
2588       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2589              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2590              Disp.getOpcode() == ISD::TargetConstantPool ||
2591              Disp.getOpcode() == ISD::TargetJumpTable);
2592       Base = N.getOperand(0);
2593       return true;  // [&g+r]
2594     }
2595   } else if (N.getOpcode() == ISD::OR) {
2596     int16_t imm = 0;
2597     if (isIntS16Immediate(N.getOperand(1), imm) &&
2598         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2599       // If this is an or of disjoint bitfields, we can codegen this as an add
2600       // (for better address arithmetic) if the LHS and RHS of the OR are
2601       // provably disjoint.
2602       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2603 
2604       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2605         // If all of the bits are known zero on the LHS or RHS, the add won't
2606         // carry.
2607         if (FrameIndexSDNode *FI =
2608               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2609           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2610           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2611         } else {
2612           Base = N.getOperand(0);
2613         }
2614         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2615         return true;
2616       }
2617     }
2618   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2619     // Loading from a constant address.
2620 
2621     // If this address fits entirely in a 16-bit sext immediate field, codegen
2622     // this as "d, 0"
2623     int16_t Imm;
2624     if (isIntS16Immediate(CN, Imm) &&
2625         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2626       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2627       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2628                              CN->getValueType(0));
2629       return true;
2630     }
2631 
2632     // Handle 32-bit sext immediates with LIS + addr mode.
2633     if ((CN->getValueType(0) == MVT::i32 ||
2634          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2635         (!EncodingAlignment ||
2636          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2637       int Addr = (int)CN->getZExtValue();
2638 
2639       // Otherwise, break this down into an LIS + disp.
2640       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2641 
2642       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2643                                    MVT::i32);
2644       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2645       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2646       return true;
2647     }
2648   }
2649 
2650   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2651   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2652     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2653     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2654   } else
2655     Base = N;
2656   return true;      // [r+0]
2657 }
2658 
2659 /// Similar to the 16-bit case but for instructions that take a 34-bit
2660 /// displacement field (prefixed loads/stores).
2661 bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp,
2662                                               SDValue &Base,
2663                                               SelectionDAG &DAG) const {
2664   // Only on 64-bit targets.
2665   if (N.getValueType() != MVT::i64)
2666     return false;
2667 
2668   SDLoc dl(N);
2669   int64_t Imm = 0;
2670 
2671   if (N.getOpcode() == ISD::ADD) {
2672     if (!isIntS34Immediate(N.getOperand(1), Imm))
2673       return false;
2674     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2675     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2676       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2677     else
2678       Base = N.getOperand(0);
2679     return true;
2680   }
2681 
2682   if (N.getOpcode() == ISD::OR) {
2683     if (!isIntS34Immediate(N.getOperand(1), Imm))
2684       return false;
2685     // If this is an or of disjoint bitfields, we can codegen this as an add
2686     // (for better address arithmetic) if the LHS and RHS of the OR are
2687     // provably disjoint.
2688     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2689     if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL)
2690       return false;
2691     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2692       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2693     else
2694       Base = N.getOperand(0);
2695     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2696     return true;
2697   }
2698 
2699   if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const.
2700     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2701     Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
2702     return true;
2703   }
2704 
2705   return false;
2706 }
2707 
2708 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2709 /// represented as an indexed [r+r] operation.
2710 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2711                                                 SDValue &Index,
2712                                                 SelectionDAG &DAG) const {
2713   // Check to see if we can easily represent this as an [r+r] address.  This
2714   // will fail if it thinks that the address is more profitably represented as
2715   // reg+imm, e.g. where imm = 0.
2716   if (SelectAddressRegReg(N, Base, Index, DAG))
2717     return true;
2718 
2719   // If the address is the result of an add, we will utilize the fact that the
2720   // address calculation includes an implicit add.  However, we can reduce
2721   // register pressure if we do not materialize a constant just for use as the
2722   // index register.  We only get rid of the add if it is not an add of a
2723   // value and a 16-bit signed constant and both have a single use.
2724   int16_t imm = 0;
2725   if (N.getOpcode() == ISD::ADD &&
2726       (!isIntS16Immediate(N.getOperand(1), imm) ||
2727        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2728     Base = N.getOperand(0);
2729     Index = N.getOperand(1);
2730     return true;
2731   }
2732 
2733   // Otherwise, do it the hard way, using R0 as the base register.
2734   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2735                          N.getValueType());
2736   Index = N;
2737   return true;
2738 }
2739 
2740 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2741   Ty *PCRelCand = dyn_cast<Ty>(N);
2742   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2743 }
2744 
2745 /// Returns true if this address is a PC Relative address.
2746 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2747 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2748 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2749   // This is a materialize PC Relative node. Always select this as PC Relative.
2750   Base = N;
2751   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2752     return true;
2753   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2754       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2755       isValidPCRelNode<JumpTableSDNode>(N) ||
2756       isValidPCRelNode<BlockAddressSDNode>(N))
2757     return true;
2758   return false;
2759 }
2760 
2761 /// Returns true if we should use a direct load into vector instruction
2762 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2763 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2764 
2765   // If there are any other uses other than scalar to vector, then we should
2766   // keep it as a scalar load -> direct move pattern to prevent multiple
2767   // loads.
2768   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2769   if (!LD)
2770     return false;
2771 
2772   EVT MemVT = LD->getMemoryVT();
2773   if (!MemVT.isSimple())
2774     return false;
2775   switch(MemVT.getSimpleVT().SimpleTy) {
2776   case MVT::i64:
2777     break;
2778   case MVT::i32:
2779     if (!ST.hasP8Vector())
2780       return false;
2781     break;
2782   case MVT::i16:
2783   case MVT::i8:
2784     if (!ST.hasP9Vector())
2785       return false;
2786     break;
2787   default:
2788     return false;
2789   }
2790 
2791   SDValue LoadedVal(N, 0);
2792   if (!LoadedVal.hasOneUse())
2793     return false;
2794 
2795   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2796        UI != UE; ++UI)
2797     if (UI.getUse().get().getResNo() == 0 &&
2798         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2799         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2800       return false;
2801 
2802   return true;
2803 }
2804 
2805 /// getPreIndexedAddressParts - returns true by value, base pointer and
2806 /// offset pointer and addressing mode by reference if the node's address
2807 /// can be legally represented as pre-indexed load / store address.
2808 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2809                                                   SDValue &Offset,
2810                                                   ISD::MemIndexedMode &AM,
2811                                                   SelectionDAG &DAG) const {
2812   if (DisablePPCPreinc) return false;
2813 
2814   bool isLoad = true;
2815   SDValue Ptr;
2816   EVT VT;
2817   unsigned Alignment;
2818   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2819     Ptr = LD->getBasePtr();
2820     VT = LD->getMemoryVT();
2821     Alignment = LD->getAlignment();
2822   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2823     Ptr = ST->getBasePtr();
2824     VT  = ST->getMemoryVT();
2825     Alignment = ST->getAlignment();
2826     isLoad = false;
2827   } else
2828     return false;
2829 
2830   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2831   // instructions because we can fold these into a more efficient instruction
2832   // instead, (such as LXSD).
2833   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2834     return false;
2835   }
2836 
2837   // PowerPC doesn't have preinc load/store instructions for vectors
2838   if (VT.isVector())
2839     return false;
2840 
2841   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2842     // Common code will reject creating a pre-inc form if the base pointer
2843     // is a frame index, or if N is a store and the base pointer is either
2844     // the same as or a predecessor of the value being stored.  Check for
2845     // those situations here, and try with swapped Base/Offset instead.
2846     bool Swap = false;
2847 
2848     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2849       Swap = true;
2850     else if (!isLoad) {
2851       SDValue Val = cast<StoreSDNode>(N)->getValue();
2852       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2853         Swap = true;
2854     }
2855 
2856     if (Swap)
2857       std::swap(Base, Offset);
2858 
2859     AM = ISD::PRE_INC;
2860     return true;
2861   }
2862 
2863   // LDU/STU can only handle immediates that are a multiple of 4.
2864   if (VT != MVT::i64) {
2865     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2866       return false;
2867   } else {
2868     // LDU/STU need an address with at least 4-byte alignment.
2869     if (Alignment < 4)
2870       return false;
2871 
2872     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2873       return false;
2874   }
2875 
2876   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2877     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2878     // sext i32 to i64 when addr mode is r+i.
2879     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2880         LD->getExtensionType() == ISD::SEXTLOAD &&
2881         isa<ConstantSDNode>(Offset))
2882       return false;
2883   }
2884 
2885   AM = ISD::PRE_INC;
2886   return true;
2887 }
2888 
2889 //===----------------------------------------------------------------------===//
2890 //  LowerOperation implementation
2891 //===----------------------------------------------------------------------===//
2892 
2893 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2894 /// and LoOpFlags to the target MO flags.
2895 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2896                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2897                                const GlobalValue *GV = nullptr) {
2898   HiOpFlags = PPCII::MO_HA;
2899   LoOpFlags = PPCII::MO_LO;
2900 
2901   // Don't use the pic base if not in PIC relocation model.
2902   if (IsPIC) {
2903     HiOpFlags |= PPCII::MO_PIC_FLAG;
2904     LoOpFlags |= PPCII::MO_PIC_FLAG;
2905   }
2906 }
2907 
2908 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2909                              SelectionDAG &DAG) {
2910   SDLoc DL(HiPart);
2911   EVT PtrVT = HiPart.getValueType();
2912   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2913 
2914   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2915   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2916 
2917   // With PIC, the first instruction is actually "GR+hi(&G)".
2918   if (isPIC)
2919     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2920                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2921 
2922   // Generate non-pic code that has direct accesses to the constant pool.
2923   // The address of the global is just (hi(&g)+lo(&g)).
2924   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2925 }
2926 
2927 static void setUsesTOCBasePtr(MachineFunction &MF) {
2928   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2929   FuncInfo->setUsesTOCBasePtr();
2930 }
2931 
2932 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2933   setUsesTOCBasePtr(DAG.getMachineFunction());
2934 }
2935 
2936 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2937                                        SDValue GA) const {
2938   const bool Is64Bit = Subtarget.isPPC64();
2939   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2940   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2941                         : Subtarget.isAIXABI()
2942                               ? DAG.getRegister(PPC::R2, VT)
2943                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2944   SDValue Ops[] = { GA, Reg };
2945   return DAG.getMemIntrinsicNode(
2946       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2947       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2948       MachineMemOperand::MOLoad);
2949 }
2950 
2951 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2952                                              SelectionDAG &DAG) const {
2953   EVT PtrVT = Op.getValueType();
2954   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2955   const Constant *C = CP->getConstVal();
2956 
2957   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2958   // The actual address of the GlobalValue is stored in the TOC.
2959   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2960     if (Subtarget.isUsingPCRelativeCalls()) {
2961       SDLoc DL(CP);
2962       EVT Ty = getPointerTy(DAG.getDataLayout());
2963       SDValue ConstPool = DAG.getTargetConstantPool(
2964           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2965       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2966     }
2967     setUsesTOCBasePtr(DAG);
2968     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2969     return getTOCEntry(DAG, SDLoc(CP), GA);
2970   }
2971 
2972   unsigned MOHiFlag, MOLoFlag;
2973   bool IsPIC = isPositionIndependent();
2974   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2975 
2976   if (IsPIC && Subtarget.isSVR4ABI()) {
2977     SDValue GA =
2978         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2979     return getTOCEntry(DAG, SDLoc(CP), GA);
2980   }
2981 
2982   SDValue CPIHi =
2983       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2984   SDValue CPILo =
2985       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2986   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2987 }
2988 
2989 // For 64-bit PowerPC, prefer the more compact relative encodings.
2990 // This trades 32 bits per jump table entry for one or two instructions
2991 // on the jump site.
2992 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2993   if (isJumpTableRelative())
2994     return MachineJumpTableInfo::EK_LabelDifference32;
2995 
2996   return TargetLowering::getJumpTableEncoding();
2997 }
2998 
2999 bool PPCTargetLowering::isJumpTableRelative() const {
3000   if (UseAbsoluteJumpTables)
3001     return false;
3002   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
3003     return true;
3004   return TargetLowering::isJumpTableRelative();
3005 }
3006 
3007 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3008                                                     SelectionDAG &DAG) const {
3009   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3010     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3011 
3012   switch (getTargetMachine().getCodeModel()) {
3013   case CodeModel::Small:
3014   case CodeModel::Medium:
3015     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3016   default:
3017     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
3018                        getPointerTy(DAG.getDataLayout()));
3019   }
3020 }
3021 
3022 const MCExpr *
3023 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3024                                                 unsigned JTI,
3025                                                 MCContext &Ctx) const {
3026   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3027     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3028 
3029   switch (getTargetMachine().getCodeModel()) {
3030   case CodeModel::Small:
3031   case CodeModel::Medium:
3032     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3033   default:
3034     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
3035   }
3036 }
3037 
3038 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
3039   EVT PtrVT = Op.getValueType();
3040   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3041 
3042   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3043   if (Subtarget.isUsingPCRelativeCalls()) {
3044     SDLoc DL(JT);
3045     EVT Ty = getPointerTy(DAG.getDataLayout());
3046     SDValue GA =
3047         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
3048     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3049     return MatAddr;
3050   }
3051 
3052   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3053   // The actual address of the GlobalValue is stored in the TOC.
3054   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3055     setUsesTOCBasePtr(DAG);
3056     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3057     return getTOCEntry(DAG, SDLoc(JT), GA);
3058   }
3059 
3060   unsigned MOHiFlag, MOLoFlag;
3061   bool IsPIC = isPositionIndependent();
3062   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3063 
3064   if (IsPIC && Subtarget.isSVR4ABI()) {
3065     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3066                                         PPCII::MO_PIC_FLAG);
3067     return getTOCEntry(DAG, SDLoc(GA), GA);
3068   }
3069 
3070   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3071   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3072   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3073 }
3074 
3075 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3076                                              SelectionDAG &DAG) const {
3077   EVT PtrVT = Op.getValueType();
3078   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3079   const BlockAddress *BA = BASDN->getBlockAddress();
3080 
3081   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3082   if (Subtarget.isUsingPCRelativeCalls()) {
3083     SDLoc DL(BASDN);
3084     EVT Ty = getPointerTy(DAG.getDataLayout());
3085     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3086                                            PPCII::MO_PCREL_FLAG);
3087     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3088     return MatAddr;
3089   }
3090 
3091   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3092   // The actual BlockAddress is stored in the TOC.
3093   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3094     setUsesTOCBasePtr(DAG);
3095     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3096     return getTOCEntry(DAG, SDLoc(BASDN), GA);
3097   }
3098 
3099   // 32-bit position-independent ELF stores the BlockAddress in the .got.
3100   if (Subtarget.is32BitELFABI() && isPositionIndependent())
3101     return getTOCEntry(
3102         DAG, SDLoc(BASDN),
3103         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3104 
3105   unsigned MOHiFlag, MOLoFlag;
3106   bool IsPIC = isPositionIndependent();
3107   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3108   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3109   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3110   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3111 }
3112 
3113 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3114                                               SelectionDAG &DAG) const {
3115   // FIXME: TLS addresses currently use medium model code sequences,
3116   // which is the most useful form.  Eventually support for small and
3117   // large models could be added if users need it, at the cost of
3118   // additional complexity.
3119   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3120   if (DAG.getTarget().useEmulatedTLS())
3121     return LowerToTLSEmulatedModel(GA, DAG);
3122 
3123   SDLoc dl(GA);
3124   const GlobalValue *GV = GA->getGlobal();
3125   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3126   bool is64bit = Subtarget.isPPC64();
3127   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3128   PICLevel::Level picLevel = M->getPICLevel();
3129 
3130   const TargetMachine &TM = getTargetMachine();
3131   TLSModel::Model Model = TM.getTLSModel(GV);
3132 
3133   if (Model == TLSModel::LocalExec) {
3134     if (Subtarget.isUsingPCRelativeCalls()) {
3135       SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3136       SDValue TGA = DAG.getTargetGlobalAddress(
3137           GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG));
3138       SDValue MatAddr =
3139           DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
3140       return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
3141     }
3142 
3143     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3144                                                PPCII::MO_TPREL_HA);
3145     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3146                                                PPCII::MO_TPREL_LO);
3147     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3148                              : DAG.getRegister(PPC::R2, MVT::i32);
3149 
3150     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3151     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3152   }
3153 
3154   if (Model == TLSModel::InitialExec) {
3155     bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3156     SDValue TGA = DAG.getTargetGlobalAddress(
3157         GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3158     SDValue TGATLS = DAG.getTargetGlobalAddress(
3159         GV, dl, PtrVT, 0,
3160         IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
3161     SDValue TPOffset;
3162     if (IsPCRel) {
3163       SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3164       TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3165                              MachinePointerInfo());
3166     } else {
3167       SDValue GOTPtr;
3168       if (is64bit) {
3169         setUsesTOCBasePtr(DAG);
3170         SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3171         GOTPtr =
3172             DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3173       } else {
3174         if (!TM.isPositionIndependent())
3175           GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3176         else if (picLevel == PICLevel::SmallPIC)
3177           GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3178         else
3179           GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3180       }
3181       TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3182     }
3183     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3184   }
3185 
3186   if (Model == TLSModel::GeneralDynamic) {
3187     if (Subtarget.isUsingPCRelativeCalls()) {
3188       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3189                                                PPCII::MO_GOT_TLSGD_PCREL_FLAG);
3190       return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3191     }
3192 
3193     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3194     SDValue GOTPtr;
3195     if (is64bit) {
3196       setUsesTOCBasePtr(DAG);
3197       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3198       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3199                                    GOTReg, TGA);
3200     } else {
3201       if (picLevel == PICLevel::SmallPIC)
3202         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3203       else
3204         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3205     }
3206     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3207                        GOTPtr, TGA, TGA);
3208   }
3209 
3210   if (Model == TLSModel::LocalDynamic) {
3211     if (Subtarget.isUsingPCRelativeCalls()) {
3212       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3213                                                PPCII::MO_GOT_TLSLD_PCREL_FLAG);
3214       SDValue MatPCRel =
3215           DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3216       return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA);
3217     }
3218 
3219     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3220     SDValue GOTPtr;
3221     if (is64bit) {
3222       setUsesTOCBasePtr(DAG);
3223       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3224       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3225                            GOTReg, TGA);
3226     } else {
3227       if (picLevel == PICLevel::SmallPIC)
3228         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3229       else
3230         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3231     }
3232     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3233                                   PtrVT, GOTPtr, TGA, TGA);
3234     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3235                                       PtrVT, TLSAddr, TGA);
3236     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3237   }
3238 
3239   llvm_unreachable("Unknown TLS model!");
3240 }
3241 
3242 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3243                                               SelectionDAG &DAG) const {
3244   EVT PtrVT = Op.getValueType();
3245   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3246   SDLoc DL(GSDN);
3247   const GlobalValue *GV = GSDN->getGlobal();
3248 
3249   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3250   // The actual address of the GlobalValue is stored in the TOC.
3251   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3252     if (Subtarget.isUsingPCRelativeCalls()) {
3253       EVT Ty = getPointerTy(DAG.getDataLayout());
3254       if (isAccessedAsGotIndirect(Op)) {
3255         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3256                                                 PPCII::MO_PCREL_FLAG |
3257                                                     PPCII::MO_GOT_FLAG);
3258         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3259         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3260                                    MachinePointerInfo());
3261         return Load;
3262       } else {
3263         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3264                                                 PPCII::MO_PCREL_FLAG);
3265         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3266       }
3267     }
3268     setUsesTOCBasePtr(DAG);
3269     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3270     return getTOCEntry(DAG, DL, GA);
3271   }
3272 
3273   unsigned MOHiFlag, MOLoFlag;
3274   bool IsPIC = isPositionIndependent();
3275   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3276 
3277   if (IsPIC && Subtarget.isSVR4ABI()) {
3278     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3279                                             GSDN->getOffset(),
3280                                             PPCII::MO_PIC_FLAG);
3281     return getTOCEntry(DAG, DL, GA);
3282   }
3283 
3284   SDValue GAHi =
3285     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3286   SDValue GALo =
3287     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3288 
3289   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3290 }
3291 
3292 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3293   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3294   SDLoc dl(Op);
3295 
3296   if (Op.getValueType() == MVT::v2i64) {
3297     // When the operands themselves are v2i64 values, we need to do something
3298     // special because VSX has no underlying comparison operations for these.
3299     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3300       // Equality can be handled by casting to the legal type for Altivec
3301       // comparisons, everything else needs to be expanded.
3302       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3303         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3304                  DAG.getSetCC(dl, MVT::v4i32,
3305                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3306                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3307                    CC));
3308       }
3309 
3310       return SDValue();
3311     }
3312 
3313     // We handle most of these in the usual way.
3314     return Op;
3315   }
3316 
3317   // If we're comparing for equality to zero, expose the fact that this is
3318   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3319   // fold the new nodes.
3320   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3321     return V;
3322 
3323   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3324     // Leave comparisons against 0 and -1 alone for now, since they're usually
3325     // optimized.  FIXME: revisit this when we can custom lower all setcc
3326     // optimizations.
3327     if (C->isAllOnesValue() || C->isNullValue())
3328       return SDValue();
3329   }
3330 
3331   // If we have an integer seteq/setne, turn it into a compare against zero
3332   // by xor'ing the rhs with the lhs, which is faster than setting a
3333   // condition register, reading it back out, and masking the correct bit.  The
3334   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3335   // the result to other bit-twiddling opportunities.
3336   EVT LHSVT = Op.getOperand(0).getValueType();
3337   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3338     EVT VT = Op.getValueType();
3339     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3340                                 Op.getOperand(1));
3341     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3342   }
3343   return SDValue();
3344 }
3345 
3346 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3347   SDNode *Node = Op.getNode();
3348   EVT VT = Node->getValueType(0);
3349   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3350   SDValue InChain = Node->getOperand(0);
3351   SDValue VAListPtr = Node->getOperand(1);
3352   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3353   SDLoc dl(Node);
3354 
3355   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3356 
3357   // gpr_index
3358   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3359                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3360   InChain = GprIndex.getValue(1);
3361 
3362   if (VT == MVT::i64) {
3363     // Check if GprIndex is even
3364     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3365                                  DAG.getConstant(1, dl, MVT::i32));
3366     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3367                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3368     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3369                                           DAG.getConstant(1, dl, MVT::i32));
3370     // Align GprIndex to be even if it isn't
3371     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3372                            GprIndex);
3373   }
3374 
3375   // fpr index is 1 byte after gpr
3376   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3377                                DAG.getConstant(1, dl, MVT::i32));
3378 
3379   // fpr
3380   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3381                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3382   InChain = FprIndex.getValue(1);
3383 
3384   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3385                                        DAG.getConstant(8, dl, MVT::i32));
3386 
3387   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3388                                         DAG.getConstant(4, dl, MVT::i32));
3389 
3390   // areas
3391   SDValue OverflowArea =
3392       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3393   InChain = OverflowArea.getValue(1);
3394 
3395   SDValue RegSaveArea =
3396       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3397   InChain = RegSaveArea.getValue(1);
3398 
3399   // select overflow_area if index > 8
3400   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3401                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3402 
3403   // adjustment constant gpr_index * 4/8
3404   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3405                                     VT.isInteger() ? GprIndex : FprIndex,
3406                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3407                                                     MVT::i32));
3408 
3409   // OurReg = RegSaveArea + RegConstant
3410   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3411                                RegConstant);
3412 
3413   // Floating types are 32 bytes into RegSaveArea
3414   if (VT.isFloatingPoint())
3415     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3416                          DAG.getConstant(32, dl, MVT::i32));
3417 
3418   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3419   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3420                                    VT.isInteger() ? GprIndex : FprIndex,
3421                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3422                                                    MVT::i32));
3423 
3424   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3425                               VT.isInteger() ? VAListPtr : FprPtr,
3426                               MachinePointerInfo(SV), MVT::i8);
3427 
3428   // determine if we should load from reg_save_area or overflow_area
3429   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3430 
3431   // increase overflow_area by 4/8 if gpr/fpr > 8
3432   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3433                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3434                                           dl, MVT::i32));
3435 
3436   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3437                              OverflowAreaPlusN);
3438 
3439   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3440                               MachinePointerInfo(), MVT::i32);
3441 
3442   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3443 }
3444 
3445 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3446   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3447 
3448   // We have to copy the entire va_list struct:
3449   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3450   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3451                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3452                        false, true, false, MachinePointerInfo(),
3453                        MachinePointerInfo());
3454 }
3455 
3456 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3457                                                   SelectionDAG &DAG) const {
3458   if (Subtarget.isAIXABI())
3459     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3460 
3461   return Op.getOperand(0);
3462 }
3463 
3464 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3465                                                 SelectionDAG &DAG) const {
3466   if (Subtarget.isAIXABI())
3467     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3468 
3469   SDValue Chain = Op.getOperand(0);
3470   SDValue Trmp = Op.getOperand(1); // trampoline
3471   SDValue FPtr = Op.getOperand(2); // nested function
3472   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3473   SDLoc dl(Op);
3474 
3475   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3476   bool isPPC64 = (PtrVT == MVT::i64);
3477   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3478 
3479   TargetLowering::ArgListTy Args;
3480   TargetLowering::ArgListEntry Entry;
3481 
3482   Entry.Ty = IntPtrTy;
3483   Entry.Node = Trmp; Args.push_back(Entry);
3484 
3485   // TrampSize == (isPPC64 ? 48 : 40);
3486   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3487                                isPPC64 ? MVT::i64 : MVT::i32);
3488   Args.push_back(Entry);
3489 
3490   Entry.Node = FPtr; Args.push_back(Entry);
3491   Entry.Node = Nest; Args.push_back(Entry);
3492 
3493   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3494   TargetLowering::CallLoweringInfo CLI(DAG);
3495   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3496       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3497       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3498 
3499   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3500   return CallResult.second;
3501 }
3502 
3503 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3504   MachineFunction &MF = DAG.getMachineFunction();
3505   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3506   EVT PtrVT = getPointerTy(MF.getDataLayout());
3507 
3508   SDLoc dl(Op);
3509 
3510   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3511     // vastart just stores the address of the VarArgsFrameIndex slot into the
3512     // memory location argument.
3513     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3514     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3515     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3516                         MachinePointerInfo(SV));
3517   }
3518 
3519   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3520   // We suppose the given va_list is already allocated.
3521   //
3522   // typedef struct {
3523   //  char gpr;     /* index into the array of 8 GPRs
3524   //                 * stored in the register save area
3525   //                 * gpr=0 corresponds to r3,
3526   //                 * gpr=1 to r4, etc.
3527   //                 */
3528   //  char fpr;     /* index into the array of 8 FPRs
3529   //                 * stored in the register save area
3530   //                 * fpr=0 corresponds to f1,
3531   //                 * fpr=1 to f2, etc.
3532   //                 */
3533   //  char *overflow_arg_area;
3534   //                /* location on stack that holds
3535   //                 * the next overflow argument
3536   //                 */
3537   //  char *reg_save_area;
3538   //               /* where r3:r10 and f1:f8 (if saved)
3539   //                * are stored
3540   //                */
3541   // } va_list[1];
3542 
3543   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3544   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3545   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3546                                             PtrVT);
3547   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3548                                  PtrVT);
3549 
3550   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3551   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3552 
3553   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3554   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3555 
3556   uint64_t FPROffset = 1;
3557   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3558 
3559   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3560 
3561   // Store first byte : number of int regs
3562   SDValue firstStore =
3563       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3564                         MachinePointerInfo(SV), MVT::i8);
3565   uint64_t nextOffset = FPROffset;
3566   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3567                                   ConstFPROffset);
3568 
3569   // Store second byte : number of float regs
3570   SDValue secondStore =
3571       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3572                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3573   nextOffset += StackOffset;
3574   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3575 
3576   // Store second word : arguments given on stack
3577   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3578                                     MachinePointerInfo(SV, nextOffset));
3579   nextOffset += FrameOffset;
3580   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3581 
3582   // Store third word : arguments given in registers
3583   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3584                       MachinePointerInfo(SV, nextOffset));
3585 }
3586 
3587 /// FPR - The set of FP registers that should be allocated for arguments
3588 /// on Darwin and AIX.
3589 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3590                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3591                                 PPC::F11, PPC::F12, PPC::F13};
3592 
3593 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3594 /// the stack.
3595 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3596                                        unsigned PtrByteSize) {
3597   unsigned ArgSize = ArgVT.getStoreSize();
3598   if (Flags.isByVal())
3599     ArgSize = Flags.getByValSize();
3600 
3601   // Round up to multiples of the pointer size, except for array members,
3602   // which are always packed.
3603   if (!Flags.isInConsecutiveRegs())
3604     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3605 
3606   return ArgSize;
3607 }
3608 
3609 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3610 /// on the stack.
3611 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3612                                          ISD::ArgFlagsTy Flags,
3613                                          unsigned PtrByteSize) {
3614   Align Alignment(PtrByteSize);
3615 
3616   // Altivec parameters are padded to a 16 byte boundary.
3617   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3618       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3619       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3620       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3621     Alignment = Align(16);
3622 
3623   // ByVal parameters are aligned as requested.
3624   if (Flags.isByVal()) {
3625     auto BVAlign = Flags.getNonZeroByValAlign();
3626     if (BVAlign > PtrByteSize) {
3627       if (BVAlign.value() % PtrByteSize != 0)
3628         llvm_unreachable(
3629             "ByVal alignment is not a multiple of the pointer size");
3630 
3631       Alignment = BVAlign;
3632     }
3633   }
3634 
3635   // Array members are always packed to their original alignment.
3636   if (Flags.isInConsecutiveRegs()) {
3637     // If the array member was split into multiple registers, the first
3638     // needs to be aligned to the size of the full type.  (Except for
3639     // ppcf128, which is only aligned as its f64 components.)
3640     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3641       Alignment = Align(OrigVT.getStoreSize());
3642     else
3643       Alignment = Align(ArgVT.getStoreSize());
3644   }
3645 
3646   return Alignment;
3647 }
3648 
3649 /// CalculateStackSlotUsed - Return whether this argument will use its
3650 /// stack slot (instead of being passed in registers).  ArgOffset,
3651 /// AvailableFPRs, and AvailableVRs must hold the current argument
3652 /// position, and will be updated to account for this argument.
3653 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
3654                                    unsigned PtrByteSize, unsigned LinkageSize,
3655                                    unsigned ParamAreaSize, unsigned &ArgOffset,
3656                                    unsigned &AvailableFPRs,
3657                                    unsigned &AvailableVRs) {
3658   bool UseMemory = false;
3659 
3660   // Respect alignment of argument on the stack.
3661   Align Alignment =
3662       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3663   ArgOffset = alignTo(ArgOffset, Alignment);
3664   // If there's no space left in the argument save area, we must
3665   // use memory (this check also catches zero-sized arguments).
3666   if (ArgOffset >= LinkageSize + ParamAreaSize)
3667     UseMemory = true;
3668 
3669   // Allocate argument on the stack.
3670   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3671   if (Flags.isInConsecutiveRegsLast())
3672     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3673   // If we overran the argument save area, we must use memory
3674   // (this check catches arguments passed partially in memory)
3675   if (ArgOffset > LinkageSize + ParamAreaSize)
3676     UseMemory = true;
3677 
3678   // However, if the argument is actually passed in an FPR or a VR,
3679   // we don't use memory after all.
3680   if (!Flags.isByVal()) {
3681     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3682       if (AvailableFPRs > 0) {
3683         --AvailableFPRs;
3684         return false;
3685       }
3686     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3687         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3688         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3689         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3690       if (AvailableVRs > 0) {
3691         --AvailableVRs;
3692         return false;
3693       }
3694   }
3695 
3696   return UseMemory;
3697 }
3698 
3699 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3700 /// ensure minimum alignment required for target.
3701 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3702                                      unsigned NumBytes) {
3703   return alignTo(NumBytes, Lowering->getStackAlign());
3704 }
3705 
3706 SDValue PPCTargetLowering::LowerFormalArguments(
3707     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3708     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3709     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3710   if (Subtarget.isAIXABI())
3711     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3712                                     InVals);
3713   if (Subtarget.is64BitELFABI())
3714     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3715                                        InVals);
3716   assert(Subtarget.is32BitELFABI());
3717   return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3718                                      InVals);
3719 }
3720 
3721 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3722     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3723     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3724     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3725 
3726   // 32-bit SVR4 ABI Stack Frame Layout:
3727   //              +-----------------------------------+
3728   //        +-->  |            Back chain             |
3729   //        |     +-----------------------------------+
3730   //        |     | Floating-point register save area |
3731   //        |     +-----------------------------------+
3732   //        |     |    General register save area     |
3733   //        |     +-----------------------------------+
3734   //        |     |          CR save word             |
3735   //        |     +-----------------------------------+
3736   //        |     |         VRSAVE save word          |
3737   //        |     +-----------------------------------+
3738   //        |     |         Alignment padding         |
3739   //        |     +-----------------------------------+
3740   //        |     |     Vector register save area     |
3741   //        |     +-----------------------------------+
3742   //        |     |       Local variable space        |
3743   //        |     +-----------------------------------+
3744   //        |     |        Parameter list area        |
3745   //        |     +-----------------------------------+
3746   //        |     |           LR save word            |
3747   //        |     +-----------------------------------+
3748   // SP-->  +---  |            Back chain             |
3749   //              +-----------------------------------+
3750   //
3751   // Specifications:
3752   //   System V Application Binary Interface PowerPC Processor Supplement
3753   //   AltiVec Technology Programming Interface Manual
3754 
3755   MachineFunction &MF = DAG.getMachineFunction();
3756   MachineFrameInfo &MFI = MF.getFrameInfo();
3757   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3758 
3759   EVT PtrVT = getPointerTy(MF.getDataLayout());
3760   // Potential tail calls could cause overwriting of argument stack slots.
3761   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3762                        (CallConv == CallingConv::Fast));
3763   const Align PtrAlign(4);
3764 
3765   // Assign locations to all of the incoming arguments.
3766   SmallVector<CCValAssign, 16> ArgLocs;
3767   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3768                  *DAG.getContext());
3769 
3770   // Reserve space for the linkage area on the stack.
3771   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3772   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3773   if (useSoftFloat())
3774     CCInfo.PreAnalyzeFormalArguments(Ins);
3775 
3776   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3777   CCInfo.clearWasPPCF128();
3778 
3779   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3780     CCValAssign &VA = ArgLocs[i];
3781 
3782     // Arguments stored in registers.
3783     if (VA.isRegLoc()) {
3784       const TargetRegisterClass *RC;
3785       EVT ValVT = VA.getValVT();
3786 
3787       switch (ValVT.getSimpleVT().SimpleTy) {
3788         default:
3789           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3790         case MVT::i1:
3791         case MVT::i32:
3792           RC = &PPC::GPRCRegClass;
3793           break;
3794         case MVT::f32:
3795           if (Subtarget.hasP8Vector())
3796             RC = &PPC::VSSRCRegClass;
3797           else if (Subtarget.hasSPE())
3798             RC = &PPC::GPRCRegClass;
3799           else
3800             RC = &PPC::F4RCRegClass;
3801           break;
3802         case MVT::f64:
3803           if (Subtarget.hasVSX())
3804             RC = &PPC::VSFRCRegClass;
3805           else if (Subtarget.hasSPE())
3806             // SPE passes doubles in GPR pairs.
3807             RC = &PPC::GPRCRegClass;
3808           else
3809             RC = &PPC::F8RCRegClass;
3810           break;
3811         case MVT::v16i8:
3812         case MVT::v8i16:
3813         case MVT::v4i32:
3814           RC = &PPC::VRRCRegClass;
3815           break;
3816         case MVT::v4f32:
3817           RC = &PPC::VRRCRegClass;
3818           break;
3819         case MVT::v2f64:
3820         case MVT::v2i64:
3821           RC = &PPC::VRRCRegClass;
3822           break;
3823       }
3824 
3825       SDValue ArgValue;
3826       // Transform the arguments stored in physical registers into
3827       // virtual ones.
3828       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3829         assert(i + 1 < e && "No second half of double precision argument");
3830         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3831         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3832         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3833         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3834         if (!Subtarget.isLittleEndian())
3835           std::swap (ArgValueLo, ArgValueHi);
3836         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3837                                ArgValueHi);
3838       } else {
3839         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3840         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3841                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3842         if (ValVT == MVT::i1)
3843           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3844       }
3845 
3846       InVals.push_back(ArgValue);
3847     } else {
3848       // Argument stored in memory.
3849       assert(VA.isMemLoc());
3850 
3851       // Get the extended size of the argument type in stack
3852       unsigned ArgSize = VA.getLocVT().getStoreSize();
3853       // Get the actual size of the argument type
3854       unsigned ObjSize = VA.getValVT().getStoreSize();
3855       unsigned ArgOffset = VA.getLocMemOffset();
3856       // Stack objects in PPC32 are right justified.
3857       ArgOffset += ArgSize - ObjSize;
3858       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3859 
3860       // Create load nodes to retrieve arguments from the stack.
3861       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3862       InVals.push_back(
3863           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3864     }
3865   }
3866 
3867   // Assign locations to all of the incoming aggregate by value arguments.
3868   // Aggregates passed by value are stored in the local variable space of the
3869   // caller's stack frame, right above the parameter list area.
3870   SmallVector<CCValAssign, 16> ByValArgLocs;
3871   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3872                       ByValArgLocs, *DAG.getContext());
3873 
3874   // Reserve stack space for the allocations in CCInfo.
3875   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3876 
3877   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3878 
3879   // Area that is at least reserved in the caller of this function.
3880   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3881   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3882 
3883   // Set the size that is at least reserved in caller of this function.  Tail
3884   // call optimized function's reserved stack space needs to be aligned so that
3885   // taking the difference between two stack areas will result in an aligned
3886   // stack.
3887   MinReservedArea =
3888       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3889   FuncInfo->setMinReservedArea(MinReservedArea);
3890 
3891   SmallVector<SDValue, 8> MemOps;
3892 
3893   // If the function takes variable number of arguments, make a frame index for
3894   // the start of the first vararg value... for expansion of llvm.va_start.
3895   if (isVarArg) {
3896     static const MCPhysReg GPArgRegs[] = {
3897       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3898       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3899     };
3900     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3901 
3902     static const MCPhysReg FPArgRegs[] = {
3903       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3904       PPC::F8
3905     };
3906     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3907 
3908     if (useSoftFloat() || hasSPE())
3909        NumFPArgRegs = 0;
3910 
3911     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3912     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3913 
3914     // Make room for NumGPArgRegs and NumFPArgRegs.
3915     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3916                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3917 
3918     FuncInfo->setVarArgsStackOffset(
3919       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3920                             CCInfo.getNextStackOffset(), true));
3921 
3922     FuncInfo->setVarArgsFrameIndex(
3923         MFI.CreateStackObject(Depth, Align(8), false));
3924     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3925 
3926     // The fixed integer arguments of a variadic function are stored to the
3927     // VarArgsFrameIndex on the stack so that they may be loaded by
3928     // dereferencing the result of va_next.
3929     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3930       // Get an existing live-in vreg, or add a new one.
3931       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3932       if (!VReg)
3933         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3934 
3935       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3936       SDValue Store =
3937           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3938       MemOps.push_back(Store);
3939       // Increment the address by four for the next argument to store
3940       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3941       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3942     }
3943 
3944     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3945     // is set.
3946     // The double arguments are stored to the VarArgsFrameIndex
3947     // on the stack.
3948     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3949       // Get an existing live-in vreg, or add a new one.
3950       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3951       if (!VReg)
3952         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3953 
3954       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3955       SDValue Store =
3956           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3957       MemOps.push_back(Store);
3958       // Increment the address by eight for the next argument to store
3959       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3960                                          PtrVT);
3961       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3962     }
3963   }
3964 
3965   if (!MemOps.empty())
3966     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3967 
3968   return Chain;
3969 }
3970 
3971 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3972 // value to MVT::i64 and then truncate to the correct register size.
3973 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3974                                              EVT ObjectVT, SelectionDAG &DAG,
3975                                              SDValue ArgVal,
3976                                              const SDLoc &dl) const {
3977   if (Flags.isSExt())
3978     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3979                          DAG.getValueType(ObjectVT));
3980   else if (Flags.isZExt())
3981     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3982                          DAG.getValueType(ObjectVT));
3983 
3984   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3985 }
3986 
3987 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3988     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3989     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3990     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3991   // TODO: add description of PPC stack frame format, or at least some docs.
3992   //
3993   bool isELFv2ABI = Subtarget.isELFv2ABI();
3994   bool isLittleEndian = Subtarget.isLittleEndian();
3995   MachineFunction &MF = DAG.getMachineFunction();
3996   MachineFrameInfo &MFI = MF.getFrameInfo();
3997   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3998 
3999   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
4000          "fastcc not supported on varargs functions");
4001 
4002   EVT PtrVT = getPointerTy(MF.getDataLayout());
4003   // Potential tail calls could cause overwriting of argument stack slots.
4004   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4005                        (CallConv == CallingConv::Fast));
4006   unsigned PtrByteSize = 8;
4007   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4008 
4009   static const MCPhysReg GPR[] = {
4010     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4011     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4012   };
4013   static const MCPhysReg VR[] = {
4014     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4015     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4016   };
4017 
4018   const unsigned Num_GPR_Regs = array_lengthof(GPR);
4019   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4020   const unsigned Num_VR_Regs  = array_lengthof(VR);
4021 
4022   // Do a first pass over the arguments to determine whether the ABI
4023   // guarantees that our caller has allocated the parameter save area
4024   // on its stack frame.  In the ELFv1 ABI, this is always the case;
4025   // in the ELFv2 ABI, it is true if this is a vararg function or if
4026   // any parameter is located in a stack slot.
4027 
4028   bool HasParameterArea = !isELFv2ABI || isVarArg;
4029   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
4030   unsigned NumBytes = LinkageSize;
4031   unsigned AvailableFPRs = Num_FPR_Regs;
4032   unsigned AvailableVRs = Num_VR_Regs;
4033   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4034     if (Ins[i].Flags.isNest())
4035       continue;
4036 
4037     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
4038                                PtrByteSize, LinkageSize, ParamAreaSize,
4039                                NumBytes, AvailableFPRs, AvailableVRs))
4040       HasParameterArea = true;
4041   }
4042 
4043   // Add DAG nodes to load the arguments or copy them out of registers.  On
4044   // entry to a function on PPC, the arguments start after the linkage area,
4045   // although the first ones are often in registers.
4046 
4047   unsigned ArgOffset = LinkageSize;
4048   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4049   SmallVector<SDValue, 8> MemOps;
4050   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4051   unsigned CurArgIdx = 0;
4052   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4053     SDValue ArgVal;
4054     bool needsLoad = false;
4055     EVT ObjectVT = Ins[ArgNo].VT;
4056     EVT OrigVT = Ins[ArgNo].ArgVT;
4057     unsigned ObjSize = ObjectVT.getStoreSize();
4058     unsigned ArgSize = ObjSize;
4059     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4060     if (Ins[ArgNo].isOrigArg()) {
4061       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4062       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4063     }
4064     // We re-align the argument offset for each argument, except when using the
4065     // fast calling convention, when we need to make sure we do that only when
4066     // we'll actually use a stack slot.
4067     unsigned CurArgOffset;
4068     Align Alignment;
4069     auto ComputeArgOffset = [&]() {
4070       /* Respect alignment of argument on the stack.  */
4071       Alignment =
4072           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4073       ArgOffset = alignTo(ArgOffset, Alignment);
4074       CurArgOffset = ArgOffset;
4075     };
4076 
4077     if (CallConv != CallingConv::Fast) {
4078       ComputeArgOffset();
4079 
4080       /* Compute GPR index associated with argument offset.  */
4081       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4082       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4083     }
4084 
4085     // FIXME the codegen can be much improved in some cases.
4086     // We do not have to keep everything in memory.
4087     if (Flags.isByVal()) {
4088       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4089 
4090       if (CallConv == CallingConv::Fast)
4091         ComputeArgOffset();
4092 
4093       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4094       ObjSize = Flags.getByValSize();
4095       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4096       // Empty aggregate parameters do not take up registers.  Examples:
4097       //   struct { } a;
4098       //   union  { } b;
4099       //   int c[0];
4100       // etc.  However, we have to provide a place-holder in InVals, so
4101       // pretend we have an 8-byte item at the current address for that
4102       // purpose.
4103       if (!ObjSize) {
4104         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4105         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4106         InVals.push_back(FIN);
4107         continue;
4108       }
4109 
4110       // Create a stack object covering all stack doublewords occupied
4111       // by the argument.  If the argument is (fully or partially) on
4112       // the stack, or if the argument is fully in registers but the
4113       // caller has allocated the parameter save anyway, we can refer
4114       // directly to the caller's stack frame.  Otherwise, create a
4115       // local copy in our own frame.
4116       int FI;
4117       if (HasParameterArea ||
4118           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4119         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4120       else
4121         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4122       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4123 
4124       // Handle aggregates smaller than 8 bytes.
4125       if (ObjSize < PtrByteSize) {
4126         // The value of the object is its address, which differs from the
4127         // address of the enclosing doubleword on big-endian systems.
4128         SDValue Arg = FIN;
4129         if (!isLittleEndian) {
4130           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4131           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4132         }
4133         InVals.push_back(Arg);
4134 
4135         if (GPR_idx != Num_GPR_Regs) {
4136           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4137           FuncInfo->addLiveInAttr(VReg, Flags);
4138           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4139           SDValue Store;
4140 
4141           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4142             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4143                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4144             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4145                                       MachinePointerInfo(&*FuncArg), ObjType);
4146           } else {
4147             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4148             // store the whole register as-is to the parameter save area
4149             // slot.
4150             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4151                                  MachinePointerInfo(&*FuncArg));
4152           }
4153 
4154           MemOps.push_back(Store);
4155         }
4156         // Whether we copied from a register or not, advance the offset
4157         // into the parameter save area by a full doubleword.
4158         ArgOffset += PtrByteSize;
4159         continue;
4160       }
4161 
4162       // The value of the object is its address, which is the address of
4163       // its first stack doubleword.
4164       InVals.push_back(FIN);
4165 
4166       // Store whatever pieces of the object are in registers to memory.
4167       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4168         if (GPR_idx == Num_GPR_Regs)
4169           break;
4170 
4171         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4172         FuncInfo->addLiveInAttr(VReg, Flags);
4173         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4174         SDValue Addr = FIN;
4175         if (j) {
4176           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4177           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4178         }
4179         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4180                                      MachinePointerInfo(&*FuncArg, j));
4181         MemOps.push_back(Store);
4182         ++GPR_idx;
4183       }
4184       ArgOffset += ArgSize;
4185       continue;
4186     }
4187 
4188     switch (ObjectVT.getSimpleVT().SimpleTy) {
4189     default: llvm_unreachable("Unhandled argument type!");
4190     case MVT::i1:
4191     case MVT::i32:
4192     case MVT::i64:
4193       if (Flags.isNest()) {
4194         // The 'nest' parameter, if any, is passed in R11.
4195         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4196         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4197 
4198         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4199           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4200 
4201         break;
4202       }
4203 
4204       // These can be scalar arguments or elements of an integer array type
4205       // passed directly.  Clang may use those instead of "byval" aggregate
4206       // types to avoid forcing arguments to memory unnecessarily.
4207       if (GPR_idx != Num_GPR_Regs) {
4208         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4209         FuncInfo->addLiveInAttr(VReg, Flags);
4210         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4211 
4212         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4213           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4214           // value to MVT::i64 and then truncate to the correct register size.
4215           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4216       } else {
4217         if (CallConv == CallingConv::Fast)
4218           ComputeArgOffset();
4219 
4220         needsLoad = true;
4221         ArgSize = PtrByteSize;
4222       }
4223       if (CallConv != CallingConv::Fast || needsLoad)
4224         ArgOffset += 8;
4225       break;
4226 
4227     case MVT::f32:
4228     case MVT::f64:
4229       // These can be scalar arguments or elements of a float array type
4230       // passed directly.  The latter are used to implement ELFv2 homogenous
4231       // float aggregates.
4232       if (FPR_idx != Num_FPR_Regs) {
4233         unsigned VReg;
4234 
4235         if (ObjectVT == MVT::f32)
4236           VReg = MF.addLiveIn(FPR[FPR_idx],
4237                               Subtarget.hasP8Vector()
4238                                   ? &PPC::VSSRCRegClass
4239                                   : &PPC::F4RCRegClass);
4240         else
4241           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4242                                                 ? &PPC::VSFRCRegClass
4243                                                 : &PPC::F8RCRegClass);
4244 
4245         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4246         ++FPR_idx;
4247       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4248         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4249         // once we support fp <-> gpr moves.
4250 
4251         // This can only ever happen in the presence of f32 array types,
4252         // since otherwise we never run out of FPRs before running out
4253         // of GPRs.
4254         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4255         FuncInfo->addLiveInAttr(VReg, Flags);
4256         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4257 
4258         if (ObjectVT == MVT::f32) {
4259           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4260             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4261                                  DAG.getConstant(32, dl, MVT::i32));
4262           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4263         }
4264 
4265         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4266       } else {
4267         if (CallConv == CallingConv::Fast)
4268           ComputeArgOffset();
4269 
4270         needsLoad = true;
4271       }
4272 
4273       // When passing an array of floats, the array occupies consecutive
4274       // space in the argument area; only round up to the next doubleword
4275       // at the end of the array.  Otherwise, each float takes 8 bytes.
4276       if (CallConv != CallingConv::Fast || needsLoad) {
4277         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4278         ArgOffset += ArgSize;
4279         if (Flags.isInConsecutiveRegsLast())
4280           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4281       }
4282       break;
4283     case MVT::v4f32:
4284     case MVT::v4i32:
4285     case MVT::v8i16:
4286     case MVT::v16i8:
4287     case MVT::v2f64:
4288     case MVT::v2i64:
4289     case MVT::v1i128:
4290     case MVT::f128:
4291       // These can be scalar arguments or elements of a vector array type
4292       // passed directly.  The latter are used to implement ELFv2 homogenous
4293       // vector aggregates.
4294       if (VR_idx != Num_VR_Regs) {
4295         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4296         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4297         ++VR_idx;
4298       } else {
4299         if (CallConv == CallingConv::Fast)
4300           ComputeArgOffset();
4301         needsLoad = true;
4302       }
4303       if (CallConv != CallingConv::Fast || needsLoad)
4304         ArgOffset += 16;
4305       break;
4306     }
4307 
4308     // We need to load the argument to a virtual register if we determined
4309     // above that we ran out of physical registers of the appropriate type.
4310     if (needsLoad) {
4311       if (ObjSize < ArgSize && !isLittleEndian)
4312         CurArgOffset += ArgSize - ObjSize;
4313       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4314       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4315       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4316     }
4317 
4318     InVals.push_back(ArgVal);
4319   }
4320 
4321   // Area that is at least reserved in the caller of this function.
4322   unsigned MinReservedArea;
4323   if (HasParameterArea)
4324     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4325   else
4326     MinReservedArea = LinkageSize;
4327 
4328   // Set the size that is at least reserved in caller of this function.  Tail
4329   // call optimized functions' reserved stack space needs to be aligned so that
4330   // taking the difference between two stack areas will result in an aligned
4331   // stack.
4332   MinReservedArea =
4333       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4334   FuncInfo->setMinReservedArea(MinReservedArea);
4335 
4336   // If the function takes variable number of arguments, make a frame index for
4337   // the start of the first vararg value... for expansion of llvm.va_start.
4338   // On ELFv2ABI spec, it writes:
4339   // C programs that are intended to be *portable* across different compilers
4340   // and architectures must use the header file <stdarg.h> to deal with variable
4341   // argument lists.
4342   if (isVarArg && MFI.hasVAStart()) {
4343     int Depth = ArgOffset;
4344 
4345     FuncInfo->setVarArgsFrameIndex(
4346       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4347     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4348 
4349     // If this function is vararg, store any remaining integer argument regs
4350     // to their spots on the stack so that they may be loaded by dereferencing
4351     // the result of va_next.
4352     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4353          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4354       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4355       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4356       SDValue Store =
4357           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4358       MemOps.push_back(Store);
4359       // Increment the address by four for the next argument to store
4360       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4361       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4362     }
4363   }
4364 
4365   if (!MemOps.empty())
4366     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4367 
4368   return Chain;
4369 }
4370 
4371 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4372 /// adjusted to accommodate the arguments for the tailcall.
4373 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4374                                    unsigned ParamSize) {
4375 
4376   if (!isTailCall) return 0;
4377 
4378   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4379   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4380   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4381   // Remember only if the new adjustment is bigger.
4382   if (SPDiff < FI->getTailCallSPDelta())
4383     FI->setTailCallSPDelta(SPDiff);
4384 
4385   return SPDiff;
4386 }
4387 
4388 static bool isFunctionGlobalAddress(SDValue Callee);
4389 
4390 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4391                               const TargetMachine &TM) {
4392   // It does not make sense to call callsShareTOCBase() with a caller that
4393   // is PC Relative since PC Relative callers do not have a TOC.
4394 #ifndef NDEBUG
4395   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4396   assert(!STICaller->isUsingPCRelativeCalls() &&
4397          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4398 #endif
4399 
4400   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4401   // don't have enough information to determine if the caller and callee share
4402   // the same  TOC base, so we have to pessimistically assume they don't for
4403   // correctness.
4404   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4405   if (!G)
4406     return false;
4407 
4408   const GlobalValue *GV = G->getGlobal();
4409 
4410   // If the callee is preemptable, then the static linker will use a plt-stub
4411   // which saves the toc to the stack, and needs a nop after the call
4412   // instruction to convert to a toc-restore.
4413   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4414     return false;
4415 
4416   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4417   // We may need a TOC restore in the situation where the caller requires a
4418   // valid TOC but the callee is PC Relative and does not.
4419   const Function *F = dyn_cast<Function>(GV);
4420   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4421 
4422   // If we have an Alias we can try to get the function from there.
4423   if (Alias) {
4424     const GlobalObject *GlobalObj = Alias->getBaseObject();
4425     F = dyn_cast<Function>(GlobalObj);
4426   }
4427 
4428   // If we still have no valid function pointer we do not have enough
4429   // information to determine if the callee uses PC Relative calls so we must
4430   // assume that it does.
4431   if (!F)
4432     return false;
4433 
4434   // If the callee uses PC Relative we cannot guarantee that the callee won't
4435   // clobber the TOC of the caller and so we must assume that the two
4436   // functions do not share a TOC base.
4437   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4438   if (STICallee->isUsingPCRelativeCalls())
4439     return false;
4440 
4441   // If the GV is not a strong definition then we need to assume it can be
4442   // replaced by another function at link time. The function that replaces
4443   // it may not share the same TOC as the caller since the callee may be
4444   // replaced by a PC Relative version of the same function.
4445   if (!GV->isStrongDefinitionForLinker())
4446     return false;
4447 
4448   // The medium and large code models are expected to provide a sufficiently
4449   // large TOC to provide all data addressing needs of a module with a
4450   // single TOC.
4451   if (CodeModel::Medium == TM.getCodeModel() ||
4452       CodeModel::Large == TM.getCodeModel())
4453     return true;
4454 
4455   // Any explicitly-specified sections and section prefixes must also match.
4456   // Also, if we're using -ffunction-sections, then each function is always in
4457   // a different section (the same is true for COMDAT functions).
4458   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4459       GV->getSection() != Caller->getSection())
4460     return false;
4461   if (const auto *F = dyn_cast<Function>(GV)) {
4462     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4463       return false;
4464   }
4465 
4466   return true;
4467 }
4468 
4469 static bool
4470 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4471                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4472   assert(Subtarget.is64BitELFABI());
4473 
4474   const unsigned PtrByteSize = 8;
4475   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4476 
4477   static const MCPhysReg GPR[] = {
4478     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4479     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4480   };
4481   static const MCPhysReg VR[] = {
4482     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4483     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4484   };
4485 
4486   const unsigned NumGPRs = array_lengthof(GPR);
4487   const unsigned NumFPRs = 13;
4488   const unsigned NumVRs = array_lengthof(VR);
4489   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4490 
4491   unsigned NumBytes = LinkageSize;
4492   unsigned AvailableFPRs = NumFPRs;
4493   unsigned AvailableVRs = NumVRs;
4494 
4495   for (const ISD::OutputArg& Param : Outs) {
4496     if (Param.Flags.isNest()) continue;
4497 
4498     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4499                                LinkageSize, ParamAreaSize, NumBytes,
4500                                AvailableFPRs, AvailableVRs))
4501       return true;
4502   }
4503   return false;
4504 }
4505 
4506 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4507   if (CB.arg_size() != CallerFn->arg_size())
4508     return false;
4509 
4510   auto CalleeArgIter = CB.arg_begin();
4511   auto CalleeArgEnd = CB.arg_end();
4512   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4513 
4514   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4515     const Value* CalleeArg = *CalleeArgIter;
4516     const Value* CallerArg = &(*CallerArgIter);
4517     if (CalleeArg == CallerArg)
4518       continue;
4519 
4520     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4521     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4522     //      }
4523     // 1st argument of callee is undef and has the same type as caller.
4524     if (CalleeArg->getType() == CallerArg->getType() &&
4525         isa<UndefValue>(CalleeArg))
4526       continue;
4527 
4528     return false;
4529   }
4530 
4531   return true;
4532 }
4533 
4534 // Returns true if TCO is possible between the callers and callees
4535 // calling conventions.
4536 static bool
4537 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4538                                     CallingConv::ID CalleeCC) {
4539   // Tail calls are possible with fastcc and ccc.
4540   auto isTailCallableCC  = [] (CallingConv::ID CC){
4541       return  CC == CallingConv::C || CC == CallingConv::Fast;
4542   };
4543   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4544     return false;
4545 
4546   // We can safely tail call both fastcc and ccc callees from a c calling
4547   // convention caller. If the caller is fastcc, we may have less stack space
4548   // than a non-fastcc caller with the same signature so disable tail-calls in
4549   // that case.
4550   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4551 }
4552 
4553 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4554     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4555     const SmallVectorImpl<ISD::OutputArg> &Outs,
4556     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4557   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4558 
4559   if (DisableSCO && !TailCallOpt) return false;
4560 
4561   // Variadic argument functions are not supported.
4562   if (isVarArg) return false;
4563 
4564   auto &Caller = DAG.getMachineFunction().getFunction();
4565   // Check that the calling conventions are compatible for tco.
4566   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4567     return false;
4568 
4569   // Caller contains any byval parameter is not supported.
4570   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4571     return false;
4572 
4573   // Callee contains any byval parameter is not supported, too.
4574   // Note: This is a quick work around, because in some cases, e.g.
4575   // caller's stack size > callee's stack size, we are still able to apply
4576   // sibling call optimization. For example, gcc is able to do SCO for caller1
4577   // in the following example, but not for caller2.
4578   //   struct test {
4579   //     long int a;
4580   //     char ary[56];
4581   //   } gTest;
4582   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4583   //     b->a = v.a;
4584   //     return 0;
4585   //   }
4586   //   void caller1(struct test a, struct test c, struct test *b) {
4587   //     callee(gTest, b); }
4588   //   void caller2(struct test *b) { callee(gTest, b); }
4589   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4590     return false;
4591 
4592   // If callee and caller use different calling conventions, we cannot pass
4593   // parameters on stack since offsets for the parameter area may be different.
4594   if (Caller.getCallingConv() != CalleeCC &&
4595       needStackSlotPassParameters(Subtarget, Outs))
4596     return false;
4597 
4598   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4599   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4600   // callee potentially have different TOC bases then we cannot tail call since
4601   // we need to restore the TOC pointer after the call.
4602   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4603   // We cannot guarantee this for indirect calls or calls to external functions.
4604   // When PC-Relative addressing is used, the concept of the TOC is no longer
4605   // applicable so this check is not required.
4606   // Check first for indirect calls.
4607   if (!Subtarget.isUsingPCRelativeCalls() &&
4608       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4609     return false;
4610 
4611   // Check if we share the TOC base.
4612   if (!Subtarget.isUsingPCRelativeCalls() &&
4613       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4614     return false;
4615 
4616   // TCO allows altering callee ABI, so we don't have to check further.
4617   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4618     return true;
4619 
4620   if (DisableSCO) return false;
4621 
4622   // If callee use the same argument list that caller is using, then we can
4623   // apply SCO on this case. If it is not, then we need to check if callee needs
4624   // stack for passing arguments.
4625   // PC Relative tail calls may not have a CallBase.
4626   // If there is no CallBase we cannot verify if we have the same argument
4627   // list so assume that we don't have the same argument list.
4628   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4629       needStackSlotPassParameters(Subtarget, Outs))
4630     return false;
4631   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4632     return false;
4633 
4634   return true;
4635 }
4636 
4637 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4638 /// for tail call optimization. Targets which want to do tail call
4639 /// optimization should implement this function.
4640 bool
4641 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4642                                                      CallingConv::ID CalleeCC,
4643                                                      bool isVarArg,
4644                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4645                                                      SelectionDAG& DAG) const {
4646   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4647     return false;
4648 
4649   // Variable argument functions are not supported.
4650   if (isVarArg)
4651     return false;
4652 
4653   MachineFunction &MF = DAG.getMachineFunction();
4654   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4655   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4656     // Functions containing by val parameters are not supported.
4657     for (unsigned i = 0; i != Ins.size(); i++) {
4658        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4659        if (Flags.isByVal()) return false;
4660     }
4661 
4662     // Non-PIC/GOT tail calls are supported.
4663     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4664       return true;
4665 
4666     // At the moment we can only do local tail calls (in same module, hidden
4667     // or protected) if we are generating PIC.
4668     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4669       return G->getGlobal()->hasHiddenVisibility()
4670           || G->getGlobal()->hasProtectedVisibility();
4671   }
4672 
4673   return false;
4674 }
4675 
4676 /// isCallCompatibleAddress - Return the immediate to use if the specified
4677 /// 32-bit value is representable in the immediate field of a BxA instruction.
4678 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4679   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4680   if (!C) return nullptr;
4681 
4682   int Addr = C->getZExtValue();
4683   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4684       SignExtend32<26>(Addr) != Addr)
4685     return nullptr;  // Top 6 bits have to be sext of immediate.
4686 
4687   return DAG
4688       .getConstant(
4689           (int)C->getZExtValue() >> 2, SDLoc(Op),
4690           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4691       .getNode();
4692 }
4693 
4694 namespace {
4695 
4696 struct TailCallArgumentInfo {
4697   SDValue Arg;
4698   SDValue FrameIdxOp;
4699   int FrameIdx = 0;
4700 
4701   TailCallArgumentInfo() = default;
4702 };
4703 
4704 } // end anonymous namespace
4705 
4706 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4707 static void StoreTailCallArgumentsToStackSlot(
4708     SelectionDAG &DAG, SDValue Chain,
4709     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4710     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4711   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4712     SDValue Arg = TailCallArgs[i].Arg;
4713     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4714     int FI = TailCallArgs[i].FrameIdx;
4715     // Store relative to framepointer.
4716     MemOpChains.push_back(DAG.getStore(
4717         Chain, dl, Arg, FIN,
4718         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4719   }
4720 }
4721 
4722 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4723 /// the appropriate stack slot for the tail call optimized function call.
4724 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4725                                              SDValue OldRetAddr, SDValue OldFP,
4726                                              int SPDiff, const SDLoc &dl) {
4727   if (SPDiff) {
4728     // Calculate the new stack slot for the return address.
4729     MachineFunction &MF = DAG.getMachineFunction();
4730     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4731     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4732     bool isPPC64 = Subtarget.isPPC64();
4733     int SlotSize = isPPC64 ? 8 : 4;
4734     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4735     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4736                                                          NewRetAddrLoc, true);
4737     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4738     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4739     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4740                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4741   }
4742   return Chain;
4743 }
4744 
4745 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4746 /// the position of the argument.
4747 static void
4748 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4749                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4750                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4751   int Offset = ArgOffset + SPDiff;
4752   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4753   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4754   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4755   SDValue FIN = DAG.getFrameIndex(FI, VT);
4756   TailCallArgumentInfo Info;
4757   Info.Arg = Arg;
4758   Info.FrameIdxOp = FIN;
4759   Info.FrameIdx = FI;
4760   TailCallArguments.push_back(Info);
4761 }
4762 
4763 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4764 /// stack slot. Returns the chain as result and the loaded frame pointers in
4765 /// LROpOut/FPOpout. Used when tail calling.
4766 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4767     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4768     SDValue &FPOpOut, const SDLoc &dl) const {
4769   if (SPDiff) {
4770     // Load the LR and FP stack slot for later adjusting.
4771     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4772     LROpOut = getReturnAddrFrameIndex(DAG);
4773     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4774     Chain = SDValue(LROpOut.getNode(), 1);
4775   }
4776   return Chain;
4777 }
4778 
4779 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4780 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4781 /// specified by the specific parameter attribute. The copy will be passed as
4782 /// a byval function parameter.
4783 /// Sometimes what we are copying is the end of a larger object, the part that
4784 /// does not fit in registers.
4785 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4786                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4787                                          SelectionDAG &DAG, const SDLoc &dl) {
4788   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4789   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
4790                        Flags.getNonZeroByValAlign(), false, false, false,
4791                        MachinePointerInfo(), MachinePointerInfo());
4792 }
4793 
4794 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4795 /// tail calls.
4796 static void LowerMemOpCallTo(
4797     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4798     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4799     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4800     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4801   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4802   if (!isTailCall) {
4803     if (isVector) {
4804       SDValue StackPtr;
4805       if (isPPC64)
4806         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4807       else
4808         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4809       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4810                            DAG.getConstant(ArgOffset, dl, PtrVT));
4811     }
4812     MemOpChains.push_back(
4813         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4814     // Calculate and remember argument location.
4815   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4816                                   TailCallArguments);
4817 }
4818 
4819 static void
4820 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4821                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4822                 SDValue FPOp,
4823                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4824   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4825   // might overwrite each other in case of tail call optimization.
4826   SmallVector<SDValue, 8> MemOpChains2;
4827   // Do not flag preceding copytoreg stuff together with the following stuff.
4828   InFlag = SDValue();
4829   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4830                                     MemOpChains2, dl);
4831   if (!MemOpChains2.empty())
4832     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4833 
4834   // Store the return address to the appropriate stack slot.
4835   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4836 
4837   // Emit callseq_end just before tailcall node.
4838   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4839                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4840   InFlag = Chain.getValue(1);
4841 }
4842 
4843 // Is this global address that of a function that can be called by name? (as
4844 // opposed to something that must hold a descriptor for an indirect call).
4845 static bool isFunctionGlobalAddress(SDValue Callee) {
4846   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4847     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4848         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4849       return false;
4850 
4851     return G->getGlobal()->getValueType()->isFunctionTy();
4852   }
4853 
4854   return false;
4855 }
4856 
4857 SDValue PPCTargetLowering::LowerCallResult(
4858     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
4859     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4860     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4861   SmallVector<CCValAssign, 16> RVLocs;
4862   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
4863                     *DAG.getContext());
4864 
4865   CCRetInfo.AnalyzeCallResult(
4866       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
4867                ? RetCC_PPC_Cold
4868                : RetCC_PPC);
4869 
4870   // Copy all of the result registers out of their specified physreg.
4871   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4872     CCValAssign &VA = RVLocs[i];
4873     assert(VA.isRegLoc() && "Can only return in registers!");
4874 
4875     SDValue Val;
4876 
4877     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
4878       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
4879                                       InFlag);
4880       Chain = Lo.getValue(1);
4881       InFlag = Lo.getValue(2);
4882       VA = RVLocs[++i]; // skip ahead to next loc
4883       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
4884                                       InFlag);
4885       Chain = Hi.getValue(1);
4886       InFlag = Hi.getValue(2);
4887       if (!Subtarget.isLittleEndian())
4888         std::swap (Lo, Hi);
4889       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
4890     } else {
4891       Val = DAG.getCopyFromReg(Chain, dl,
4892                                VA.getLocReg(), VA.getLocVT(), InFlag);
4893       Chain = Val.getValue(1);
4894       InFlag = Val.getValue(2);
4895     }
4896 
4897     switch (VA.getLocInfo()) {
4898     default: llvm_unreachable("Unknown loc info!");
4899     case CCValAssign::Full: break;
4900     case CCValAssign::AExt:
4901       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4902       break;
4903     case CCValAssign::ZExt:
4904       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
4905                         DAG.getValueType(VA.getValVT()));
4906       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4907       break;
4908     case CCValAssign::SExt:
4909       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
4910                         DAG.getValueType(VA.getValVT()));
4911       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4912       break;
4913     }
4914 
4915     InVals.push_back(Val);
4916   }
4917 
4918   return Chain;
4919 }
4920 
4921 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
4922                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
4923   // PatchPoint calls are not indirect.
4924   if (isPatchPoint)
4925     return false;
4926 
4927   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
4928     return false;
4929 
4930   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
4931   // becuase the immediate function pointer points to a descriptor instead of
4932   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
4933   // pointer immediate points to the global entry point, while the BLA would
4934   // need to jump to the local entry point (see rL211174).
4935   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
4936       isBLACompatibleAddress(Callee, DAG))
4937     return false;
4938 
4939   return true;
4940 }
4941 
4942 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
4943 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
4944   return Subtarget.isAIXABI() ||
4945          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
4946 }
4947 
4948 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
4949                               const Function &Caller,
4950                               const SDValue &Callee,
4951                               const PPCSubtarget &Subtarget,
4952                               const TargetMachine &TM) {
4953   if (CFlags.IsTailCall)
4954     return PPCISD::TC_RETURN;
4955 
4956   // This is a call through a function pointer.
4957   if (CFlags.IsIndirect) {
4958     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
4959     // indirect calls. The save of the caller's TOC pointer to the stack will be
4960     // inserted into the DAG as part of call lowering. The restore of the TOC
4961     // pointer is modeled by using a pseudo instruction for the call opcode that
4962     // represents the 2 instruction sequence of an indirect branch and link,
4963     // immediately followed by a load of the TOC pointer from the the stack save
4964     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
4965     // as it is not saved or used.
4966     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
4967                                                : PPCISD::BCTRL;
4968   }
4969 
4970   if (Subtarget.isUsingPCRelativeCalls()) {
4971     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
4972     return PPCISD::CALL_NOTOC;
4973   }
4974 
4975   // The ABIs that maintain a TOC pointer accross calls need to have a nop
4976   // immediately following the call instruction if the caller and callee may
4977   // have different TOC bases. At link time if the linker determines the calls
4978   // may not share a TOC base, the call is redirected to a trampoline inserted
4979   // by the linker. The trampoline will (among other things) save the callers
4980   // TOC pointer at an ABI designated offset in the linkage area and the linker
4981   // will rewrite the nop to be a load of the TOC pointer from the linkage area
4982   // into gpr2.
4983   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
4984     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
4985                                                   : PPCISD::CALL_NOP;
4986 
4987   return PPCISD::CALL;
4988 }
4989 
4990 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
4991                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
4992   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
4993     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
4994       return SDValue(Dest, 0);
4995 
4996   // Returns true if the callee is local, and false otherwise.
4997   auto isLocalCallee = [&]() {
4998     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4999     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5000     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5001 
5002     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5003            !dyn_cast_or_null<GlobalIFunc>(GV);
5004   };
5005 
5006   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5007   // a static relocation model causes some versions of GNU LD (2.17.50, at
5008   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5009   // built with secure-PLT.
5010   bool UsePlt =
5011       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5012       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5013 
5014   const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5015     const TargetMachine &TM = Subtarget.getTargetMachine();
5016     const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5017     MCSymbolXCOFF *S =
5018         cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5019 
5020     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5021     return DAG.getMCSymbol(S, PtrVT);
5022   };
5023 
5024   if (isFunctionGlobalAddress(Callee)) {
5025     const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5026 
5027     if (Subtarget.isAIXABI()) {
5028       assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5029       return getAIXFuncEntryPointSymbolSDNode(GV);
5030     }
5031     return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5032                                       UsePlt ? PPCII::MO_PLT : 0);
5033   }
5034 
5035   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5036     const char *SymName = S->getSymbol();
5037     if (Subtarget.isAIXABI()) {
5038       // If there exists a user-declared function whose name is the same as the
5039       // ExternalSymbol's, then we pick up the user-declared version.
5040       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5041       if (const Function *F =
5042               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5043         return getAIXFuncEntryPointSymbolSDNode(F);
5044 
5045       // On AIX, direct function calls reference the symbol for the function's
5046       // entry point, which is named by prepending a "." before the function's
5047       // C-linkage name. A Qualname is returned here because an external
5048       // function entry point is a csect with XTY_ER property.
5049       const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5050         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5051         MCSectionXCOFF *Sec = Context.getXCOFFSection(
5052             (Twine(".") + Twine(SymName)).str(), XCOFF::XMC_PR, XCOFF::XTY_ER,
5053             SectionKind::getMetadata());
5054         return Sec->getQualNameSymbol();
5055       };
5056 
5057       SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5058     }
5059     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5060                                        UsePlt ? PPCII::MO_PLT : 0);
5061   }
5062 
5063   // No transformation needed.
5064   assert(Callee.getNode() && "What no callee?");
5065   return Callee;
5066 }
5067 
5068 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5069   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5070          "Expected a CALLSEQ_STARTSDNode.");
5071 
5072   // The last operand is the chain, except when the node has glue. If the node
5073   // has glue, then the last operand is the glue, and the chain is the second
5074   // last operand.
5075   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5076   if (LastValue.getValueType() != MVT::Glue)
5077     return LastValue;
5078 
5079   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5080 }
5081 
5082 // Creates the node that moves a functions address into the count register
5083 // to prepare for an indirect call instruction.
5084 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5085                                 SDValue &Glue, SDValue &Chain,
5086                                 const SDLoc &dl) {
5087   SDValue MTCTROps[] = {Chain, Callee, Glue};
5088   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5089   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5090                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5091   // The glue is the second value produced.
5092   Glue = Chain.getValue(1);
5093 }
5094 
5095 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5096                                           SDValue &Glue, SDValue &Chain,
5097                                           SDValue CallSeqStart,
5098                                           const CallBase *CB, const SDLoc &dl,
5099                                           bool hasNest,
5100                                           const PPCSubtarget &Subtarget) {
5101   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5102   // entry point, but to the function descriptor (the function entry point
5103   // address is part of the function descriptor though).
5104   // The function descriptor is a three doubleword structure with the
5105   // following fields: function entry point, TOC base address and
5106   // environment pointer.
5107   // Thus for a call through a function pointer, the following actions need
5108   // to be performed:
5109   //   1. Save the TOC of the caller in the TOC save area of its stack
5110   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5111   //   2. Load the address of the function entry point from the function
5112   //      descriptor.
5113   //   3. Load the TOC of the callee from the function descriptor into r2.
5114   //   4. Load the environment pointer from the function descriptor into
5115   //      r11.
5116   //   5. Branch to the function entry point address.
5117   //   6. On return of the callee, the TOC of the caller needs to be
5118   //      restored (this is done in FinishCall()).
5119   //
5120   // The loads are scheduled at the beginning of the call sequence, and the
5121   // register copies are flagged together to ensure that no other
5122   // operations can be scheduled in between. E.g. without flagging the
5123   // copies together, a TOC access in the caller could be scheduled between
5124   // the assignment of the callee TOC and the branch to the callee, which leads
5125   // to incorrect code.
5126 
5127   // Start by loading the function address from the descriptor.
5128   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5129   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5130                       ? (MachineMemOperand::MODereferenceable |
5131                          MachineMemOperand::MOInvariant)
5132                       : MachineMemOperand::MONone;
5133 
5134   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5135 
5136   // Registers used in building the DAG.
5137   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5138   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5139 
5140   // Offsets of descriptor members.
5141   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5142   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5143 
5144   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5145   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5146 
5147   // One load for the functions entry point address.
5148   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5149                                     Alignment, MMOFlags);
5150 
5151   // One for loading the TOC anchor for the module that contains the called
5152   // function.
5153   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5154   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5155   SDValue TOCPtr =
5156       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5157                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5158 
5159   // One for loading the environment pointer.
5160   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5161   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5162   SDValue LoadEnvPtr =
5163       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5164                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5165 
5166 
5167   // Then copy the newly loaded TOC anchor to the TOC pointer.
5168   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5169   Chain = TOCVal.getValue(0);
5170   Glue = TOCVal.getValue(1);
5171 
5172   // If the function call has an explicit 'nest' parameter, it takes the
5173   // place of the environment pointer.
5174   assert((!hasNest || !Subtarget.isAIXABI()) &&
5175          "Nest parameter is not supported on AIX.");
5176   if (!hasNest) {
5177     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5178     Chain = EnvVal.getValue(0);
5179     Glue = EnvVal.getValue(1);
5180   }
5181 
5182   // The rest of the indirect call sequence is the same as the non-descriptor
5183   // DAG.
5184   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5185 }
5186 
5187 static void
5188 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5189                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5190                   SelectionDAG &DAG,
5191                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5192                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5193                   const PPCSubtarget &Subtarget) {
5194   const bool IsPPC64 = Subtarget.isPPC64();
5195   // MVT for a general purpose register.
5196   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5197 
5198   // First operand is always the chain.
5199   Ops.push_back(Chain);
5200 
5201   // If it's a direct call pass the callee as the second operand.
5202   if (!CFlags.IsIndirect)
5203     Ops.push_back(Callee);
5204   else {
5205     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5206 
5207     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5208     // on the stack (this would have been done in `LowerCall_64SVR4` or
5209     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5210     // represents both the indirect branch and a load that restores the TOC
5211     // pointer from the linkage area. The operand for the TOC restore is an add
5212     // of the TOC save offset to the stack pointer. This must be the second
5213     // operand: after the chain input but before any other variadic arguments.
5214     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5215     // saved or used.
5216     if (isTOCSaveRestoreRequired(Subtarget)) {
5217       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5218 
5219       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5220       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5221       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5222       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5223       Ops.push_back(AddTOC);
5224     }
5225 
5226     // Add the register used for the environment pointer.
5227     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5228       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5229                                     RegVT));
5230 
5231 
5232     // Add CTR register as callee so a bctr can be emitted later.
5233     if (CFlags.IsTailCall)
5234       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5235   }
5236 
5237   // If this is a tail call add stack pointer delta.
5238   if (CFlags.IsTailCall)
5239     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5240 
5241   // Add argument registers to the end of the list so that they are known live
5242   // into the call.
5243   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5244     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5245                                   RegsToPass[i].second.getValueType()));
5246 
5247   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5248   // no way to mark dependencies as implicit here.
5249   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5250   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5251        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5252     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5253 
5254   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5255   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5256     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5257 
5258   // Add a register mask operand representing the call-preserved registers.
5259   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5260   const uint32_t *Mask =
5261       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5262   assert(Mask && "Missing call preserved mask for calling convention");
5263   Ops.push_back(DAG.getRegisterMask(Mask));
5264 
5265   // If the glue is valid, it is the last operand.
5266   if (Glue.getNode())
5267     Ops.push_back(Glue);
5268 }
5269 
5270 SDValue PPCTargetLowering::FinishCall(
5271     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5272     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5273     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5274     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5275     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5276 
5277   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5278       Subtarget.isAIXABI())
5279     setUsesTOCBasePtr(DAG);
5280 
5281   unsigned CallOpc =
5282       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5283                     Subtarget, DAG.getTarget());
5284 
5285   if (!CFlags.IsIndirect)
5286     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5287   else if (Subtarget.usesFunctionDescriptors())
5288     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5289                                   dl, CFlags.HasNest, Subtarget);
5290   else
5291     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5292 
5293   // Build the operand list for the call instruction.
5294   SmallVector<SDValue, 8> Ops;
5295   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5296                     SPDiff, Subtarget);
5297 
5298   // Emit tail call.
5299   if (CFlags.IsTailCall) {
5300     // Indirect tail call when using PC Relative calls do not have the same
5301     // constraints.
5302     assert(((Callee.getOpcode() == ISD::Register &&
5303              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5304             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5305             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5306             isa<ConstantSDNode>(Callee) ||
5307             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5308            "Expecting a global address, external symbol, absolute value, "
5309            "register or an indirect tail call when PC Relative calls are "
5310            "used.");
5311     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5312     assert(CallOpc == PPCISD::TC_RETURN &&
5313            "Unexpected call opcode for a tail call.");
5314     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5315     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5316   }
5317 
5318   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5319   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5320   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5321   Glue = Chain.getValue(1);
5322 
5323   // When performing tail call optimization the callee pops its arguments off
5324   // the stack. Account for this here so these bytes can be pushed back on in
5325   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5326   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5327                          getTargetMachine().Options.GuaranteedTailCallOpt)
5328                             ? NumBytes
5329                             : 0;
5330 
5331   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5332                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5333                              Glue, dl);
5334   Glue = Chain.getValue(1);
5335 
5336   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5337                          DAG, InVals);
5338 }
5339 
5340 SDValue
5341 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5342                              SmallVectorImpl<SDValue> &InVals) const {
5343   SelectionDAG &DAG                     = CLI.DAG;
5344   SDLoc &dl                             = CLI.DL;
5345   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5346   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5347   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5348   SDValue Chain                         = CLI.Chain;
5349   SDValue Callee                        = CLI.Callee;
5350   bool &isTailCall                      = CLI.IsTailCall;
5351   CallingConv::ID CallConv              = CLI.CallConv;
5352   bool isVarArg                         = CLI.IsVarArg;
5353   bool isPatchPoint                     = CLI.IsPatchPoint;
5354   const CallBase *CB                    = CLI.CB;
5355 
5356   if (isTailCall) {
5357     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5358       isTailCall = false;
5359     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5360       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5361           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5362     else
5363       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5364                                                      Ins, DAG);
5365     if (isTailCall) {
5366       ++NumTailCalls;
5367       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5368         ++NumSiblingCalls;
5369 
5370       // PC Relative calls no longer guarantee that the callee is a Global
5371       // Address Node. The callee could be an indirect tail call in which
5372       // case the SDValue for the callee could be a load (to load the address
5373       // of a function pointer) or it may be a register copy (to move the
5374       // address of the callee from a function parameter into a virtual
5375       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5376       assert((Subtarget.isUsingPCRelativeCalls() ||
5377               isa<GlobalAddressSDNode>(Callee)) &&
5378              "Callee should be an llvm::Function object.");
5379 
5380       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5381                         << "\nTCO callee: ");
5382       LLVM_DEBUG(Callee.dump());
5383     }
5384   }
5385 
5386   if (!isTailCall && CB && CB->isMustTailCall())
5387     report_fatal_error("failed to perform tail call elimination on a call "
5388                        "site marked musttail");
5389 
5390   // When long calls (i.e. indirect calls) are always used, calls are always
5391   // made via function pointer. If we have a function name, first translate it
5392   // into a pointer.
5393   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5394       !isTailCall)
5395     Callee = LowerGlobalAddress(Callee, DAG);
5396 
5397   CallFlags CFlags(
5398       CallConv, isTailCall, isVarArg, isPatchPoint,
5399       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5400       // hasNest
5401       Subtarget.is64BitELFABI() &&
5402           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5403       CLI.NoMerge);
5404 
5405   if (Subtarget.isAIXABI())
5406     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5407                          InVals, CB);
5408 
5409   assert(Subtarget.isSVR4ABI());
5410   if (Subtarget.isPPC64())
5411     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5412                             InVals, CB);
5413   return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5414                           InVals, CB);
5415 }
5416 
5417 SDValue PPCTargetLowering::LowerCall_32SVR4(
5418     SDValue Chain, SDValue Callee, CallFlags CFlags,
5419     const SmallVectorImpl<ISD::OutputArg> &Outs,
5420     const SmallVectorImpl<SDValue> &OutVals,
5421     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5422     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5423     const CallBase *CB) const {
5424   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5425   // of the 32-bit SVR4 ABI stack frame layout.
5426 
5427   const CallingConv::ID CallConv = CFlags.CallConv;
5428   const bool IsVarArg = CFlags.IsVarArg;
5429   const bool IsTailCall = CFlags.IsTailCall;
5430 
5431   assert((CallConv == CallingConv::C ||
5432           CallConv == CallingConv::Cold ||
5433           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5434 
5435   const Align PtrAlign(4);
5436 
5437   MachineFunction &MF = DAG.getMachineFunction();
5438 
5439   // Mark this function as potentially containing a function that contains a
5440   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5441   // and restoring the callers stack pointer in this functions epilog. This is
5442   // done because by tail calling the called function might overwrite the value
5443   // in this function's (MF) stack pointer stack slot 0(SP).
5444   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5445       CallConv == CallingConv::Fast)
5446     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5447 
5448   // Count how many bytes are to be pushed on the stack, including the linkage
5449   // area, parameter list area and the part of the local variable space which
5450   // contains copies of aggregates which are passed by value.
5451 
5452   // Assign locations to all of the outgoing arguments.
5453   SmallVector<CCValAssign, 16> ArgLocs;
5454   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5455 
5456   // Reserve space for the linkage area on the stack.
5457   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5458                        PtrAlign);
5459   if (useSoftFloat())
5460     CCInfo.PreAnalyzeCallOperands(Outs);
5461 
5462   if (IsVarArg) {
5463     // Handle fixed and variable vector arguments differently.
5464     // Fixed vector arguments go into registers as long as registers are
5465     // available. Variable vector arguments always go into memory.
5466     unsigned NumArgs = Outs.size();
5467 
5468     for (unsigned i = 0; i != NumArgs; ++i) {
5469       MVT ArgVT = Outs[i].VT;
5470       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5471       bool Result;
5472 
5473       if (Outs[i].IsFixed) {
5474         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5475                                CCInfo);
5476       } else {
5477         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5478                                       ArgFlags, CCInfo);
5479       }
5480 
5481       if (Result) {
5482 #ifndef NDEBUG
5483         errs() << "Call operand #" << i << " has unhandled type "
5484              << EVT(ArgVT).getEVTString() << "\n";
5485 #endif
5486         llvm_unreachable(nullptr);
5487       }
5488     }
5489   } else {
5490     // All arguments are treated the same.
5491     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5492   }
5493   CCInfo.clearWasPPCF128();
5494 
5495   // Assign locations to all of the outgoing aggregate by value arguments.
5496   SmallVector<CCValAssign, 16> ByValArgLocs;
5497   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5498 
5499   // Reserve stack space for the allocations in CCInfo.
5500   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5501 
5502   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5503 
5504   // Size of the linkage area, parameter list area and the part of the local
5505   // space variable where copies of aggregates which are passed by value are
5506   // stored.
5507   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5508 
5509   // Calculate by how many bytes the stack has to be adjusted in case of tail
5510   // call optimization.
5511   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5512 
5513   // Adjust the stack pointer for the new arguments...
5514   // These operations are automatically eliminated by the prolog/epilog pass
5515   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5516   SDValue CallSeqStart = Chain;
5517 
5518   // Load the return address and frame pointer so it can be moved somewhere else
5519   // later.
5520   SDValue LROp, FPOp;
5521   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5522 
5523   // Set up a copy of the stack pointer for use loading and storing any
5524   // arguments that may not fit in the registers available for argument
5525   // passing.
5526   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5527 
5528   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5529   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5530   SmallVector<SDValue, 8> MemOpChains;
5531 
5532   bool seenFloatArg = false;
5533   // Walk the register/memloc assignments, inserting copies/loads.
5534   // i - Tracks the index into the list of registers allocated for the call
5535   // RealArgIdx - Tracks the index into the list of actual function arguments
5536   // j - Tracks the index into the list of byval arguments
5537   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5538        i != e;
5539        ++i, ++RealArgIdx) {
5540     CCValAssign &VA = ArgLocs[i];
5541     SDValue Arg = OutVals[RealArgIdx];
5542     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5543 
5544     if (Flags.isByVal()) {
5545       // Argument is an aggregate which is passed by value, thus we need to
5546       // create a copy of it in the local variable space of the current stack
5547       // frame (which is the stack frame of the caller) and pass the address of
5548       // this copy to the callee.
5549       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5550       CCValAssign &ByValVA = ByValArgLocs[j++];
5551       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5552 
5553       // Memory reserved in the local variable space of the callers stack frame.
5554       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5555 
5556       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5557       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5558                            StackPtr, PtrOff);
5559 
5560       // Create a copy of the argument in the local area of the current
5561       // stack frame.
5562       SDValue MemcpyCall =
5563         CreateCopyOfByValArgument(Arg, PtrOff,
5564                                   CallSeqStart.getNode()->getOperand(0),
5565                                   Flags, DAG, dl);
5566 
5567       // This must go outside the CALLSEQ_START..END.
5568       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5569                                                      SDLoc(MemcpyCall));
5570       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5571                              NewCallSeqStart.getNode());
5572       Chain = CallSeqStart = NewCallSeqStart;
5573 
5574       // Pass the address of the aggregate copy on the stack either in a
5575       // physical register or in the parameter list area of the current stack
5576       // frame to the callee.
5577       Arg = PtrOff;
5578     }
5579 
5580     // When useCRBits() is true, there can be i1 arguments.
5581     // It is because getRegisterType(MVT::i1) => MVT::i1,
5582     // and for other integer types getRegisterType() => MVT::i32.
5583     // Extend i1 and ensure callee will get i32.
5584     if (Arg.getValueType() == MVT::i1)
5585       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5586                         dl, MVT::i32, Arg);
5587 
5588     if (VA.isRegLoc()) {
5589       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5590       // Put argument in a physical register.
5591       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5592         bool IsLE = Subtarget.isLittleEndian();
5593         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5594                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5595         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5596         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5597                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5598         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5599                              SVal.getValue(0)));
5600       } else
5601         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5602     } else {
5603       // Put argument in the parameter list area of the current stack frame.
5604       assert(VA.isMemLoc());
5605       unsigned LocMemOffset = VA.getLocMemOffset();
5606 
5607       if (!IsTailCall) {
5608         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5609         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5610                              StackPtr, PtrOff);
5611 
5612         MemOpChains.push_back(
5613             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5614       } else {
5615         // Calculate and remember argument location.
5616         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5617                                  TailCallArguments);
5618       }
5619     }
5620   }
5621 
5622   if (!MemOpChains.empty())
5623     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5624 
5625   // Build a sequence of copy-to-reg nodes chained together with token chain
5626   // and flag operands which copy the outgoing args into the appropriate regs.
5627   SDValue InFlag;
5628   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5629     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5630                              RegsToPass[i].second, InFlag);
5631     InFlag = Chain.getValue(1);
5632   }
5633 
5634   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5635   // registers.
5636   if (IsVarArg) {
5637     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5638     SDValue Ops[] = { Chain, InFlag };
5639 
5640     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5641                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5642 
5643     InFlag = Chain.getValue(1);
5644   }
5645 
5646   if (IsTailCall)
5647     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5648                     TailCallArguments);
5649 
5650   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5651                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5652 }
5653 
5654 // Copy an argument into memory, being careful to do this outside the
5655 // call sequence for the call to which the argument belongs.
5656 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5657     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5658     SelectionDAG &DAG, const SDLoc &dl) const {
5659   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5660                         CallSeqStart.getNode()->getOperand(0),
5661                         Flags, DAG, dl);
5662   // The MEMCPY must go outside the CALLSEQ_START..END.
5663   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5664   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5665                                                  SDLoc(MemcpyCall));
5666   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5667                          NewCallSeqStart.getNode());
5668   return NewCallSeqStart;
5669 }
5670 
5671 SDValue PPCTargetLowering::LowerCall_64SVR4(
5672     SDValue Chain, SDValue Callee, CallFlags CFlags,
5673     const SmallVectorImpl<ISD::OutputArg> &Outs,
5674     const SmallVectorImpl<SDValue> &OutVals,
5675     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5676     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5677     const CallBase *CB) const {
5678   bool isELFv2ABI = Subtarget.isELFv2ABI();
5679   bool isLittleEndian = Subtarget.isLittleEndian();
5680   unsigned NumOps = Outs.size();
5681   bool IsSibCall = false;
5682   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5683 
5684   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5685   unsigned PtrByteSize = 8;
5686 
5687   MachineFunction &MF = DAG.getMachineFunction();
5688 
5689   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5690     IsSibCall = true;
5691 
5692   // Mark this function as potentially containing a function that contains a
5693   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5694   // and restoring the callers stack pointer in this functions epilog. This is
5695   // done because by tail calling the called function might overwrite the value
5696   // in this function's (MF) stack pointer stack slot 0(SP).
5697   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5698     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5699 
5700   assert(!(IsFastCall && CFlags.IsVarArg) &&
5701          "fastcc not supported on varargs functions");
5702 
5703   // Count how many bytes are to be pushed on the stack, including the linkage
5704   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5705   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5706   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5707   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5708   unsigned NumBytes = LinkageSize;
5709   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5710 
5711   static const MCPhysReg GPR[] = {
5712     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5713     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5714   };
5715   static const MCPhysReg VR[] = {
5716     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5717     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5718   };
5719 
5720   const unsigned NumGPRs = array_lengthof(GPR);
5721   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5722   const unsigned NumVRs  = array_lengthof(VR);
5723 
5724   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5725   // can be passed to the callee in registers.
5726   // For the fast calling convention, there is another check below.
5727   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5728   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5729   if (!HasParameterArea) {
5730     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5731     unsigned AvailableFPRs = NumFPRs;
5732     unsigned AvailableVRs = NumVRs;
5733     unsigned NumBytesTmp = NumBytes;
5734     for (unsigned i = 0; i != NumOps; ++i) {
5735       if (Outs[i].Flags.isNest()) continue;
5736       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5737                                  PtrByteSize, LinkageSize, ParamAreaSize,
5738                                  NumBytesTmp, AvailableFPRs, AvailableVRs))
5739         HasParameterArea = true;
5740     }
5741   }
5742 
5743   // When using the fast calling convention, we don't provide backing for
5744   // arguments that will be in registers.
5745   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5746 
5747   // Avoid allocating parameter area for fastcc functions if all the arguments
5748   // can be passed in the registers.
5749   if (IsFastCall)
5750     HasParameterArea = false;
5751 
5752   // Add up all the space actually used.
5753   for (unsigned i = 0; i != NumOps; ++i) {
5754     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5755     EVT ArgVT = Outs[i].VT;
5756     EVT OrigVT = Outs[i].ArgVT;
5757 
5758     if (Flags.isNest())
5759       continue;
5760 
5761     if (IsFastCall) {
5762       if (Flags.isByVal()) {
5763         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5764         if (NumGPRsUsed > NumGPRs)
5765           HasParameterArea = true;
5766       } else {
5767         switch (ArgVT.getSimpleVT().SimpleTy) {
5768         default: llvm_unreachable("Unexpected ValueType for argument!");
5769         case MVT::i1:
5770         case MVT::i32:
5771         case MVT::i64:
5772           if (++NumGPRsUsed <= NumGPRs)
5773             continue;
5774           break;
5775         case MVT::v4i32:
5776         case MVT::v8i16:
5777         case MVT::v16i8:
5778         case MVT::v2f64:
5779         case MVT::v2i64:
5780         case MVT::v1i128:
5781         case MVT::f128:
5782           if (++NumVRsUsed <= NumVRs)
5783             continue;
5784           break;
5785         case MVT::v4f32:
5786           if (++NumVRsUsed <= NumVRs)
5787             continue;
5788           break;
5789         case MVT::f32:
5790         case MVT::f64:
5791           if (++NumFPRsUsed <= NumFPRs)
5792             continue;
5793           break;
5794         }
5795         HasParameterArea = true;
5796       }
5797     }
5798 
5799     /* Respect alignment of argument on the stack.  */
5800     auto Alignement =
5801         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5802     NumBytes = alignTo(NumBytes, Alignement);
5803 
5804     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5805     if (Flags.isInConsecutiveRegsLast())
5806       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5807   }
5808 
5809   unsigned NumBytesActuallyUsed = NumBytes;
5810 
5811   // In the old ELFv1 ABI,
5812   // the prolog code of the callee may store up to 8 GPR argument registers to
5813   // the stack, allowing va_start to index over them in memory if its varargs.
5814   // Because we cannot tell if this is needed on the caller side, we have to
5815   // conservatively assume that it is needed.  As such, make sure we have at
5816   // least enough stack space for the caller to store the 8 GPRs.
5817   // In the ELFv2 ABI, we allocate the parameter area iff a callee
5818   // really requires memory operands, e.g. a vararg function.
5819   if (HasParameterArea)
5820     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5821   else
5822     NumBytes = LinkageSize;
5823 
5824   // Tail call needs the stack to be aligned.
5825   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5826     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5827 
5828   int SPDiff = 0;
5829 
5830   // Calculate by how many bytes the stack has to be adjusted in case of tail
5831   // call optimization.
5832   if (!IsSibCall)
5833     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
5834 
5835   // To protect arguments on the stack from being clobbered in a tail call,
5836   // force all the loads to happen before doing any other lowering.
5837   if (CFlags.IsTailCall)
5838     Chain = DAG.getStackArgumentTokenFactor(Chain);
5839 
5840   // Adjust the stack pointer for the new arguments...
5841   // These operations are automatically eliminated by the prolog/epilog pass
5842   if (!IsSibCall)
5843     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5844   SDValue CallSeqStart = Chain;
5845 
5846   // Load the return address and frame pointer so it can be move somewhere else
5847   // later.
5848   SDValue LROp, FPOp;
5849   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5850 
5851   // Set up a copy of the stack pointer for use loading and storing any
5852   // arguments that may not fit in the registers available for argument
5853   // passing.
5854   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5855 
5856   // Figure out which arguments are going to go in registers, and which in
5857   // memory.  Also, if this is a vararg function, floating point operations
5858   // must be stored to our stack, and loaded into integer regs as well, if
5859   // any integer regs are available for argument passing.
5860   unsigned ArgOffset = LinkageSize;
5861 
5862   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5863   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5864 
5865   SmallVector<SDValue, 8> MemOpChains;
5866   for (unsigned i = 0; i != NumOps; ++i) {
5867     SDValue Arg = OutVals[i];
5868     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5869     EVT ArgVT = Outs[i].VT;
5870     EVT OrigVT = Outs[i].ArgVT;
5871 
5872     // PtrOff will be used to store the current argument to the stack if a
5873     // register cannot be found for it.
5874     SDValue PtrOff;
5875 
5876     // We re-align the argument offset for each argument, except when using the
5877     // fast calling convention, when we need to make sure we do that only when
5878     // we'll actually use a stack slot.
5879     auto ComputePtrOff = [&]() {
5880       /* Respect alignment of argument on the stack.  */
5881       auto Alignment =
5882           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5883       ArgOffset = alignTo(ArgOffset, Alignment);
5884 
5885       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
5886 
5887       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5888     };
5889 
5890     if (!IsFastCall) {
5891       ComputePtrOff();
5892 
5893       /* Compute GPR index associated with argument offset.  */
5894       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5895       GPR_idx = std::min(GPR_idx, NumGPRs);
5896     }
5897 
5898     // Promote integers to 64-bit values.
5899     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
5900       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
5901       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5902       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
5903     }
5904 
5905     // FIXME memcpy is used way more than necessary.  Correctness first.
5906     // Note: "by value" is code for passing a structure by value, not
5907     // basic types.
5908     if (Flags.isByVal()) {
5909       // Note: Size includes alignment padding, so
5910       //   struct x { short a; char b; }
5911       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
5912       // These are the proper values we need for right-justifying the
5913       // aggregate in a parameter register.
5914       unsigned Size = Flags.getByValSize();
5915 
5916       // An empty aggregate parameter takes up no storage and no
5917       // registers.
5918       if (Size == 0)
5919         continue;
5920 
5921       if (IsFastCall)
5922         ComputePtrOff();
5923 
5924       // All aggregates smaller than 8 bytes must be passed right-justified.
5925       if (Size==1 || Size==2 || Size==4) {
5926         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
5927         if (GPR_idx != NumGPRs) {
5928           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
5929                                         MachinePointerInfo(), VT);
5930           MemOpChains.push_back(Load.getValue(1));
5931           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5932 
5933           ArgOffset += PtrByteSize;
5934           continue;
5935         }
5936       }
5937 
5938       if (GPR_idx == NumGPRs && Size < 8) {
5939         SDValue AddPtr = PtrOff;
5940         if (!isLittleEndian) {
5941           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
5942                                           PtrOff.getValueType());
5943           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5944         }
5945         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5946                                                           CallSeqStart,
5947                                                           Flags, DAG, dl);
5948         ArgOffset += PtrByteSize;
5949         continue;
5950       }
5951       // Copy entire object into memory.  There are cases where gcc-generated
5952       // code assumes it is there, even if it could be put entirely into
5953       // registers.  (This is not what the doc says.)
5954 
5955       // FIXME: The above statement is likely due to a misunderstanding of the
5956       // documents.  All arguments must be copied into the parameter area BY
5957       // THE CALLEE in the event that the callee takes the address of any
5958       // formal argument.  That has not yet been implemented.  However, it is
5959       // reasonable to use the stack area as a staging area for the register
5960       // load.
5961 
5962       // Skip this for small aggregates, as we will use the same slot for a
5963       // right-justified copy, below.
5964       if (Size >= 8)
5965         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5966                                                           CallSeqStart,
5967                                                           Flags, DAG, dl);
5968 
5969       // When a register is available, pass a small aggregate right-justified.
5970       if (Size < 8 && GPR_idx != NumGPRs) {
5971         // The easiest way to get this right-justified in a register
5972         // is to copy the structure into the rightmost portion of a
5973         // local variable slot, then load the whole slot into the
5974         // register.
5975         // FIXME: The memcpy seems to produce pretty awful code for
5976         // small aggregates, particularly for packed ones.
5977         // FIXME: It would be preferable to use the slot in the
5978         // parameter save area instead of a new local variable.
5979         SDValue AddPtr = PtrOff;
5980         if (!isLittleEndian) {
5981           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
5982           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5983         }
5984         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5985                                                           CallSeqStart,
5986                                                           Flags, DAG, dl);
5987 
5988         // Load the slot into the register.
5989         SDValue Load =
5990             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
5991         MemOpChains.push_back(Load.getValue(1));
5992         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5993 
5994         // Done with this argument.
5995         ArgOffset += PtrByteSize;
5996         continue;
5997       }
5998 
5999       // For aggregates larger than PtrByteSize, copy the pieces of the
6000       // object that fit into registers from the parameter save area.
6001       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6002         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6003         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6004         if (GPR_idx != NumGPRs) {
6005           SDValue Load =
6006               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6007           MemOpChains.push_back(Load.getValue(1));
6008           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6009           ArgOffset += PtrByteSize;
6010         } else {
6011           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6012           break;
6013         }
6014       }
6015       continue;
6016     }
6017 
6018     switch (Arg.getSimpleValueType().SimpleTy) {
6019     default: llvm_unreachable("Unexpected ValueType for argument!");
6020     case MVT::i1:
6021     case MVT::i32:
6022     case MVT::i64:
6023       if (Flags.isNest()) {
6024         // The 'nest' parameter, if any, is passed in R11.
6025         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6026         break;
6027       }
6028 
6029       // These can be scalar arguments or elements of an integer array type
6030       // passed directly.  Clang may use those instead of "byval" aggregate
6031       // types to avoid forcing arguments to memory unnecessarily.
6032       if (GPR_idx != NumGPRs) {
6033         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6034       } else {
6035         if (IsFastCall)
6036           ComputePtrOff();
6037 
6038         assert(HasParameterArea &&
6039                "Parameter area must exist to pass an argument in memory.");
6040         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6041                          true, CFlags.IsTailCall, false, MemOpChains,
6042                          TailCallArguments, dl);
6043         if (IsFastCall)
6044           ArgOffset += PtrByteSize;
6045       }
6046       if (!IsFastCall)
6047         ArgOffset += PtrByteSize;
6048       break;
6049     case MVT::f32:
6050     case MVT::f64: {
6051       // These can be scalar arguments or elements of a float array type
6052       // passed directly.  The latter are used to implement ELFv2 homogenous
6053       // float aggregates.
6054 
6055       // Named arguments go into FPRs first, and once they overflow, the
6056       // remaining arguments go into GPRs and then the parameter save area.
6057       // Unnamed arguments for vararg functions always go to GPRs and
6058       // then the parameter save area.  For now, put all arguments to vararg
6059       // routines always in both locations (FPR *and* GPR or stack slot).
6060       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6061       bool NeededLoad = false;
6062 
6063       // First load the argument into the next available FPR.
6064       if (FPR_idx != NumFPRs)
6065         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6066 
6067       // Next, load the argument into GPR or stack slot if needed.
6068       if (!NeedGPROrStack)
6069         ;
6070       else if (GPR_idx != NumGPRs && !IsFastCall) {
6071         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6072         // once we support fp <-> gpr moves.
6073 
6074         // In the non-vararg case, this can only ever happen in the
6075         // presence of f32 array types, since otherwise we never run
6076         // out of FPRs before running out of GPRs.
6077         SDValue ArgVal;
6078 
6079         // Double values are always passed in a single GPR.
6080         if (Arg.getValueType() != MVT::f32) {
6081           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6082 
6083         // Non-array float values are extended and passed in a GPR.
6084         } else if (!Flags.isInConsecutiveRegs()) {
6085           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6086           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6087 
6088         // If we have an array of floats, we collect every odd element
6089         // together with its predecessor into one GPR.
6090         } else if (ArgOffset % PtrByteSize != 0) {
6091           SDValue Lo, Hi;
6092           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6093           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6094           if (!isLittleEndian)
6095             std::swap(Lo, Hi);
6096           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6097 
6098         // The final element, if even, goes into the first half of a GPR.
6099         } else if (Flags.isInConsecutiveRegsLast()) {
6100           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6101           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6102           if (!isLittleEndian)
6103             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6104                                  DAG.getConstant(32, dl, MVT::i32));
6105 
6106         // Non-final even elements are skipped; they will be handled
6107         // together the with subsequent argument on the next go-around.
6108         } else
6109           ArgVal = SDValue();
6110 
6111         if (ArgVal.getNode())
6112           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6113       } else {
6114         if (IsFastCall)
6115           ComputePtrOff();
6116 
6117         // Single-precision floating-point values are mapped to the
6118         // second (rightmost) word of the stack doubleword.
6119         if (Arg.getValueType() == MVT::f32 &&
6120             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6121           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6122           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6123         }
6124 
6125         assert(HasParameterArea &&
6126                "Parameter area must exist to pass an argument in memory.");
6127         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6128                          true, CFlags.IsTailCall, false, MemOpChains,
6129                          TailCallArguments, dl);
6130 
6131         NeededLoad = true;
6132       }
6133       // When passing an array of floats, the array occupies consecutive
6134       // space in the argument area; only round up to the next doubleword
6135       // at the end of the array.  Otherwise, each float takes 8 bytes.
6136       if (!IsFastCall || NeededLoad) {
6137         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6138                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6139         if (Flags.isInConsecutiveRegsLast())
6140           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6141       }
6142       break;
6143     }
6144     case MVT::v4f32:
6145     case MVT::v4i32:
6146     case MVT::v8i16:
6147     case MVT::v16i8:
6148     case MVT::v2f64:
6149     case MVT::v2i64:
6150     case MVT::v1i128:
6151     case MVT::f128:
6152       // These can be scalar arguments or elements of a vector array type
6153       // passed directly.  The latter are used to implement ELFv2 homogenous
6154       // vector aggregates.
6155 
6156       // For a varargs call, named arguments go into VRs or on the stack as
6157       // usual; unnamed arguments always go to the stack or the corresponding
6158       // GPRs when within range.  For now, we always put the value in both
6159       // locations (or even all three).
6160       if (CFlags.IsVarArg) {
6161         assert(HasParameterArea &&
6162                "Parameter area must exist if we have a varargs call.");
6163         // We could elide this store in the case where the object fits
6164         // entirely in R registers.  Maybe later.
6165         SDValue Store =
6166             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6167         MemOpChains.push_back(Store);
6168         if (VR_idx != NumVRs) {
6169           SDValue Load =
6170               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6171           MemOpChains.push_back(Load.getValue(1));
6172           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6173         }
6174         ArgOffset += 16;
6175         for (unsigned i=0; i<16; i+=PtrByteSize) {
6176           if (GPR_idx == NumGPRs)
6177             break;
6178           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6179                                    DAG.getConstant(i, dl, PtrVT));
6180           SDValue Load =
6181               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6182           MemOpChains.push_back(Load.getValue(1));
6183           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6184         }
6185         break;
6186       }
6187 
6188       // Non-varargs Altivec params go into VRs or on the stack.
6189       if (VR_idx != NumVRs) {
6190         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6191       } else {
6192         if (IsFastCall)
6193           ComputePtrOff();
6194 
6195         assert(HasParameterArea &&
6196                "Parameter area must exist to pass an argument in memory.");
6197         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6198                          true, CFlags.IsTailCall, true, MemOpChains,
6199                          TailCallArguments, dl);
6200         if (IsFastCall)
6201           ArgOffset += 16;
6202       }
6203 
6204       if (!IsFastCall)
6205         ArgOffset += 16;
6206       break;
6207     }
6208   }
6209 
6210   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6211          "mismatch in size of parameter area");
6212   (void)NumBytesActuallyUsed;
6213 
6214   if (!MemOpChains.empty())
6215     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6216 
6217   // Check if this is an indirect call (MTCTR/BCTRL).
6218   // See prepareDescriptorIndirectCall and buildCallOperands for more
6219   // information about calls through function pointers in the 64-bit SVR4 ABI.
6220   if (CFlags.IsIndirect) {
6221     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6222     // caller in the TOC save area.
6223     if (isTOCSaveRestoreRequired(Subtarget)) {
6224       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6225       // Load r2 into a virtual register and store it to the TOC save area.
6226       setUsesTOCBasePtr(DAG);
6227       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6228       // TOC save area offset.
6229       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6230       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6231       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6232       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6233                            MachinePointerInfo::getStack(
6234                                DAG.getMachineFunction(), TOCSaveOffset));
6235     }
6236     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6237     // This does not mean the MTCTR instruction must use R12; it's easier
6238     // to model this as an extra parameter, so do that.
6239     if (isELFv2ABI && !CFlags.IsPatchPoint)
6240       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6241   }
6242 
6243   // Build a sequence of copy-to-reg nodes chained together with token chain
6244   // and flag operands which copy the outgoing args into the appropriate regs.
6245   SDValue InFlag;
6246   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6247     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6248                              RegsToPass[i].second, InFlag);
6249     InFlag = Chain.getValue(1);
6250   }
6251 
6252   if (CFlags.IsTailCall && !IsSibCall)
6253     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6254                     TailCallArguments);
6255 
6256   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6257                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6258 }
6259 
6260 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6261                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6262                    CCState &State) {
6263 
6264   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6265       State.getMachineFunction().getSubtarget());
6266   const bool IsPPC64 = Subtarget.isPPC64();
6267   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6268   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6269 
6270   if (ValVT.isVector() && !State.getMachineFunction()
6271                                .getTarget()
6272                                .Options.EnableAIXExtendedAltivecABI)
6273     report_fatal_error("the default Altivec AIX ABI is not yet supported");
6274 
6275   if (ValVT == MVT::f128)
6276     report_fatal_error("f128 is unimplemented on AIX.");
6277 
6278   if (ArgFlags.isNest())
6279     report_fatal_error("Nest arguments are unimplemented.");
6280 
6281   static const MCPhysReg GPR_32[] = {// 32-bit registers.
6282                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6283                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6284   static const MCPhysReg GPR_64[] = {// 64-bit registers.
6285                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6286                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6287 
6288   static const MCPhysReg VR[] = {// Vector registers.
6289                                  PPC::V2,  PPC::V3,  PPC::V4,  PPC::V5,
6290                                  PPC::V6,  PPC::V7,  PPC::V8,  PPC::V9,
6291                                  PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6292 
6293   if (ArgFlags.isByVal()) {
6294     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6295       report_fatal_error("Pass-by-value arguments with alignment greater than "
6296                          "register width are not supported.");
6297 
6298     const unsigned ByValSize = ArgFlags.getByValSize();
6299 
6300     // An empty aggregate parameter takes up no storage and no registers,
6301     // but needs a MemLoc for a stack slot for the formal arguments side.
6302     if (ByValSize == 0) {
6303       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6304                                        State.getNextStackOffset(), RegVT,
6305                                        LocInfo));
6306       return false;
6307     }
6308 
6309     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6310     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6311     for (const unsigned E = Offset + StackSize; Offset < E;
6312          Offset += PtrAlign.value()) {
6313       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6314         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6315       else {
6316         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6317                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
6318                                          LocInfo));
6319         break;
6320       }
6321     }
6322     return false;
6323   }
6324 
6325   // Arguments always reserve parameter save area.
6326   switch (ValVT.SimpleTy) {
6327   default:
6328     report_fatal_error("Unhandled value type for argument.");
6329   case MVT::i64:
6330     // i64 arguments should have been split to i32 for PPC32.
6331     assert(IsPPC64 && "PPC32 should have split i64 values.");
6332     LLVM_FALLTHROUGH;
6333   case MVT::i1:
6334   case MVT::i32: {
6335     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6336     // AIX integer arguments are always passed in register width.
6337     if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
6338       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6339                                   : CCValAssign::LocInfo::ZExt;
6340     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6341       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6342     else
6343       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6344 
6345     return false;
6346   }
6347   case MVT::f32:
6348   case MVT::f64: {
6349     // Parameter save area (PSA) is reserved even if the float passes in fpr.
6350     const unsigned StoreSize = LocVT.getStoreSize();
6351     // Floats are always 4-byte aligned in the PSA on AIX.
6352     // This includes f64 in 64-bit mode for ABI compatibility.
6353     const unsigned Offset =
6354         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6355     unsigned FReg = State.AllocateReg(FPR);
6356     if (FReg)
6357       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6358 
6359     // Reserve and initialize GPRs or initialize the PSA as required.
6360     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6361       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6362         assert(FReg && "An FPR should be available when a GPR is reserved.");
6363         if (State.isVarArg()) {
6364           // Successfully reserved GPRs are only initialized for vararg calls.
6365           // Custom handling is required for:
6366           //   f64 in PPC32 needs to be split into 2 GPRs.
6367           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6368           State.addLoc(
6369               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6370         }
6371       } else {
6372         // If there are insufficient GPRs, the PSA needs to be initialized.
6373         // Initialization occurs even if an FPR was initialized for
6374         // compatibility with the AIX XL compiler. The full memory for the
6375         // argument will be initialized even if a prior word is saved in GPR.
6376         // A custom memLoc is used when the argument also passes in FPR so
6377         // that the callee handling can skip over it easily.
6378         State.addLoc(
6379             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6380                                              LocInfo)
6381                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6382         break;
6383       }
6384     }
6385 
6386     return false;
6387   }
6388   case MVT::v4f32:
6389   case MVT::v4i32:
6390   case MVT::v8i16:
6391   case MVT::v16i8:
6392   case MVT::v2i64:
6393   case MVT::v2f64:
6394   case MVT::v1i128: {
6395     if (State.isVarArg())
6396       report_fatal_error(
6397           "variadic arguments for vector types are unimplemented for AIX");
6398 
6399     if (unsigned VReg = State.AllocateReg(VR))
6400       State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6401     else {
6402       report_fatal_error(
6403           "passing vector parameters to the stack is unimplemented for AIX");
6404     }
6405     return false;
6406   }
6407   }
6408   return true;
6409 }
6410 
6411 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6412                                                     bool IsPPC64) {
6413   assert((IsPPC64 || SVT != MVT::i64) &&
6414          "i64 should have been split for 32-bit codegen.");
6415 
6416   switch (SVT) {
6417   default:
6418     report_fatal_error("Unexpected value type for formal argument");
6419   case MVT::i1:
6420   case MVT::i32:
6421   case MVT::i64:
6422     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6423   case MVT::f32:
6424     return &PPC::F4RCRegClass;
6425   case MVT::f64:
6426     return &PPC::F8RCRegClass;
6427   case MVT::v4f32:
6428   case MVT::v4i32:
6429   case MVT::v8i16:
6430   case MVT::v16i8:
6431   case MVT::v2i64:
6432   case MVT::v2f64:
6433   case MVT::v1i128:
6434     return &PPC::VRRCRegClass;
6435   }
6436 }
6437 
6438 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6439                                         SelectionDAG &DAG, SDValue ArgValue,
6440                                         MVT LocVT, const SDLoc &dl) {
6441   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
6442   assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits());
6443 
6444   if (Flags.isSExt())
6445     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6446                            DAG.getValueType(ValVT));
6447   else if (Flags.isZExt())
6448     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6449                            DAG.getValueType(ValVT));
6450 
6451   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
6452 }
6453 
6454 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
6455   const unsigned LASize = FL->getLinkageSize();
6456 
6457   if (PPC::GPRCRegClass.contains(Reg)) {
6458     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
6459            "Reg must be a valid argument register!");
6460     return LASize + 4 * (Reg - PPC::R3);
6461   }
6462 
6463   if (PPC::G8RCRegClass.contains(Reg)) {
6464     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
6465            "Reg must be a valid argument register!");
6466     return LASize + 8 * (Reg - PPC::X3);
6467   }
6468 
6469   llvm_unreachable("Only general purpose registers expected.");
6470 }
6471 
6472 //   AIX ABI Stack Frame Layout:
6473 //
6474 //   Low Memory +--------------------------------------------+
6475 //   SP   +---> | Back chain                                 | ---+
6476 //        |     +--------------------------------------------+    |
6477 //        |     | Saved Condition Register                   |    |
6478 //        |     +--------------------------------------------+    |
6479 //        |     | Saved Linkage Register                     |    |
6480 //        |     +--------------------------------------------+    | Linkage Area
6481 //        |     | Reserved for compilers                     |    |
6482 //        |     +--------------------------------------------+    |
6483 //        |     | Reserved for binders                       |    |
6484 //        |     +--------------------------------------------+    |
6485 //        |     | Saved TOC pointer                          | ---+
6486 //        |     +--------------------------------------------+
6487 //        |     | Parameter save area                        |
6488 //        |     +--------------------------------------------+
6489 //        |     | Alloca space                               |
6490 //        |     +--------------------------------------------+
6491 //        |     | Local variable space                       |
6492 //        |     +--------------------------------------------+
6493 //        |     | Float/int conversion temporary             |
6494 //        |     +--------------------------------------------+
6495 //        |     | Save area for AltiVec registers            |
6496 //        |     +--------------------------------------------+
6497 //        |     | AltiVec alignment padding                  |
6498 //        |     +--------------------------------------------+
6499 //        |     | Save area for VRSAVE register              |
6500 //        |     +--------------------------------------------+
6501 //        |     | Save area for General Purpose registers    |
6502 //        |     +--------------------------------------------+
6503 //        |     | Save area for Floating Point registers     |
6504 //        |     +--------------------------------------------+
6505 //        +---- | Back chain                                 |
6506 // High Memory  +--------------------------------------------+
6507 //
6508 //  Specifications:
6509 //  AIX 7.2 Assembler Language Reference
6510 //  Subroutine linkage convention
6511 
6512 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
6513     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
6514     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6515     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
6516 
6517   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
6518           CallConv == CallingConv::Fast) &&
6519          "Unexpected calling convention!");
6520 
6521   if (getTargetMachine().Options.GuaranteedTailCallOpt)
6522     report_fatal_error("Tail call support is unimplemented on AIX.");
6523 
6524   if (useSoftFloat())
6525     report_fatal_error("Soft float support is unimplemented on AIX.");
6526 
6527   const PPCSubtarget &Subtarget =
6528       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
6529 
6530   const bool IsPPC64 = Subtarget.isPPC64();
6531   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6532 
6533   // Assign locations to all of the incoming arguments.
6534   SmallVector<CCValAssign, 16> ArgLocs;
6535   MachineFunction &MF = DAG.getMachineFunction();
6536   MachineFrameInfo &MFI = MF.getFrameInfo();
6537   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
6538   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
6539 
6540   const EVT PtrVT = getPointerTy(MF.getDataLayout());
6541   // Reserve space for the linkage area on the stack.
6542   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6543   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
6544   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
6545 
6546   SmallVector<SDValue, 8> MemOps;
6547 
6548   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
6549     CCValAssign &VA = ArgLocs[I++];
6550     MVT LocVT = VA.getLocVT();
6551     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
6552     if (VA.isMemLoc() && VA.getValVT().isVector())
6553       report_fatal_error(
6554           "passing vector parameters to the stack is unimplemented for AIX");
6555 
6556     // For compatibility with the AIX XL compiler, the float args in the
6557     // parameter save area are initialized even if the argument is available
6558     // in register.  The caller is required to initialize both the register
6559     // and memory, however, the callee can choose to expect it in either.
6560     // The memloc is dismissed here because the argument is retrieved from
6561     // the register.
6562     if (VA.isMemLoc() && VA.needsCustom())
6563       continue;
6564 
6565     if (VA.isRegLoc()) {
6566       if (VA.getValVT().isScalarInteger())
6567         FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6568       else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector())
6569         FuncInfo->appendParameterType(VA.getValVT().SimpleTy == MVT::f32
6570                                           ? PPCFunctionInfo::ShortFloatPoint
6571                                           : PPCFunctionInfo::LongFloatPoint);
6572     }
6573 
6574     if (Flags.isByVal() && VA.isMemLoc()) {
6575       const unsigned Size =
6576           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
6577                   PtrByteSize);
6578       const int FI = MF.getFrameInfo().CreateFixedObject(
6579           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
6580           /* IsAliased */ true);
6581       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6582       InVals.push_back(FIN);
6583 
6584       continue;
6585     }
6586 
6587     if (Flags.isByVal()) {
6588       assert(VA.isRegLoc() && "MemLocs should already be handled.");
6589 
6590       const MCPhysReg ArgReg = VA.getLocReg();
6591       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
6592 
6593       if (Flags.getNonZeroByValAlign() > PtrByteSize)
6594         report_fatal_error("Over aligned byvals not supported yet.");
6595 
6596       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
6597       const int FI = MF.getFrameInfo().CreateFixedObject(
6598           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
6599           /* IsAliased */ true);
6600       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6601       InVals.push_back(FIN);
6602 
6603       // Add live ins for all the RegLocs for the same ByVal.
6604       const TargetRegisterClass *RegClass =
6605           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6606 
6607       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
6608                                                unsigned Offset) {
6609         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
6610         // Since the callers side has left justified the aggregate in the
6611         // register, we can simply store the entire register into the stack
6612         // slot.
6613         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
6614         // The store to the fixedstack object is needed becuase accessing a
6615         // field of the ByVal will use a gep and load. Ideally we will optimize
6616         // to extracting the value from the register directly, and elide the
6617         // stores when the arguments address is not taken, but that will need to
6618         // be future work.
6619         SDValue Store = DAG.getStore(
6620             CopyFrom.getValue(1), dl, CopyFrom,
6621             DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
6622             MachinePointerInfo::getFixedStack(MF, FI, Offset));
6623 
6624         MemOps.push_back(Store);
6625       };
6626 
6627       unsigned Offset = 0;
6628       HandleRegLoc(VA.getLocReg(), Offset);
6629       Offset += PtrByteSize;
6630       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
6631            Offset += PtrByteSize) {
6632         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
6633                "RegLocs should be for ByVal argument.");
6634 
6635         const CCValAssign RL = ArgLocs[I++];
6636         HandleRegLoc(RL.getLocReg(), Offset);
6637         FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6638       }
6639 
6640       if (Offset != StackSize) {
6641         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
6642                "Expected MemLoc for remaining bytes.");
6643         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
6644         // Consume the MemLoc.The InVal has already been emitted, so nothing
6645         // more needs to be done.
6646         ++I;
6647       }
6648 
6649       continue;
6650     }
6651 
6652     EVT ValVT = VA.getValVT();
6653     if (VA.isRegLoc() && !VA.needsCustom()) {
6654       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
6655       unsigned VReg =
6656           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
6657       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
6658       if (ValVT.isScalarInteger() &&
6659           (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) {
6660         ArgValue =
6661             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
6662       }
6663       InVals.push_back(ArgValue);
6664       continue;
6665     }
6666     if (VA.isMemLoc()) {
6667       const unsigned LocSize = LocVT.getStoreSize();
6668       const unsigned ValSize = ValVT.getStoreSize();
6669       assert((ValSize <= LocSize) &&
6670              "Object size is larger than size of MemLoc");
6671       int CurArgOffset = VA.getLocMemOffset();
6672       // Objects are right-justified because AIX is big-endian.
6673       if (LocSize > ValSize)
6674         CurArgOffset += LocSize - ValSize;
6675       // Potential tail calls could cause overwriting of argument stack slots.
6676       const bool IsImmutable =
6677           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
6678             (CallConv == CallingConv::Fast));
6679       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
6680       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6681       SDValue ArgValue =
6682           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
6683       InVals.push_back(ArgValue);
6684       continue;
6685     }
6686   }
6687 
6688   // On AIX a minimum of 8 words is saved to the parameter save area.
6689   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
6690   // Area that is at least reserved in the caller of this function.
6691   unsigned CallerReservedArea =
6692       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
6693 
6694   // Set the size that is at least reserved in caller of this function. Tail
6695   // call optimized function's reserved stack space needs to be aligned so
6696   // that taking the difference between two stack areas will result in an
6697   // aligned stack.
6698   CallerReservedArea =
6699       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
6700   FuncInfo->setMinReservedArea(CallerReservedArea);
6701 
6702   if (isVarArg) {
6703     FuncInfo->setVarArgsFrameIndex(
6704         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
6705     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
6706 
6707     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6708                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6709 
6710     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6711                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6712     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
6713 
6714     // The fixed integer arguments of a variadic function are stored to the
6715     // VarArgsFrameIndex on the stack so that they may be loaded by
6716     // dereferencing the result of va_next.
6717     for (unsigned GPRIndex =
6718              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
6719          GPRIndex < NumGPArgRegs; ++GPRIndex) {
6720 
6721       const unsigned VReg =
6722           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
6723                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
6724 
6725       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
6726       SDValue Store =
6727           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
6728       MemOps.push_back(Store);
6729       // Increment the address for the next argument to store.
6730       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
6731       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
6732     }
6733   }
6734 
6735   if (!MemOps.empty())
6736     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
6737 
6738   return Chain;
6739 }
6740 
6741 SDValue PPCTargetLowering::LowerCall_AIX(
6742     SDValue Chain, SDValue Callee, CallFlags CFlags,
6743     const SmallVectorImpl<ISD::OutputArg> &Outs,
6744     const SmallVectorImpl<SDValue> &OutVals,
6745     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6746     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6747     const CallBase *CB) const {
6748   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
6749   // AIX ABI stack frame layout.
6750 
6751   assert((CFlags.CallConv == CallingConv::C ||
6752           CFlags.CallConv == CallingConv::Cold ||
6753           CFlags.CallConv == CallingConv::Fast) &&
6754          "Unexpected calling convention!");
6755 
6756   if (CFlags.IsPatchPoint)
6757     report_fatal_error("This call type is unimplemented on AIX.");
6758 
6759   const PPCSubtarget& Subtarget =
6760       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
6761 
6762   MachineFunction &MF = DAG.getMachineFunction();
6763   SmallVector<CCValAssign, 16> ArgLocs;
6764   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
6765                  *DAG.getContext());
6766 
6767   // Reserve space for the linkage save area (LSA) on the stack.
6768   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
6769   //   [SP][CR][LR][2 x reserved][TOC].
6770   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
6771   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6772   const bool IsPPC64 = Subtarget.isPPC64();
6773   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
6774   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6775   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
6776   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
6777 
6778   // The prolog code of the callee may store up to 8 GPR argument registers to
6779   // the stack, allowing va_start to index over them in memory if the callee
6780   // is variadic.
6781   // Because we cannot tell if this is needed on the caller side, we have to
6782   // conservatively assume that it is needed.  As such, make sure we have at
6783   // least enough stack space for the caller to store the 8 GPRs.
6784   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
6785   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
6786                                      CCInfo.getNextStackOffset());
6787 
6788   // Adjust the stack pointer for the new arguments...
6789   // These operations are automatically eliminated by the prolog/epilog pass.
6790   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6791   SDValue CallSeqStart = Chain;
6792 
6793   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6794   SmallVector<SDValue, 8> MemOpChains;
6795 
6796   // Set up a copy of the stack pointer for loading and storing any
6797   // arguments that may not fit in the registers available for argument
6798   // passing.
6799   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
6800                                    : DAG.getRegister(PPC::R1, MVT::i32);
6801 
6802   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
6803     const unsigned ValNo = ArgLocs[I].getValNo();
6804     SDValue Arg = OutVals[ValNo];
6805     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
6806 
6807     if (Flags.isByVal()) {
6808       const unsigned ByValSize = Flags.getByValSize();
6809 
6810       // Nothing to do for zero-sized ByVals on the caller side.
6811       if (!ByValSize) {
6812         ++I;
6813         continue;
6814       }
6815 
6816       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
6817         return DAG.getExtLoad(
6818             ISD::ZEXTLOAD, dl, PtrVT, Chain,
6819             (LoadOffset != 0)
6820                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
6821                 : Arg,
6822             MachinePointerInfo(), VT);
6823       };
6824 
6825       unsigned LoadOffset = 0;
6826 
6827       // Initialize registers, which are fully occupied by the by-val argument.
6828       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
6829         SDValue Load = GetLoad(PtrVT, LoadOffset);
6830         MemOpChains.push_back(Load.getValue(1));
6831         LoadOffset += PtrByteSize;
6832         const CCValAssign &ByValVA = ArgLocs[I++];
6833         assert(ByValVA.getValNo() == ValNo &&
6834                "Unexpected location for pass-by-value argument.");
6835         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
6836       }
6837 
6838       if (LoadOffset == ByValSize)
6839         continue;
6840 
6841       // There must be one more loc to handle the remainder.
6842       assert(ArgLocs[I].getValNo() == ValNo &&
6843              "Expected additional location for by-value argument.");
6844 
6845       if (ArgLocs[I].isMemLoc()) {
6846         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
6847         const CCValAssign &ByValVA = ArgLocs[I++];
6848         ISD::ArgFlagsTy MemcpyFlags = Flags;
6849         // Only memcpy the bytes that don't pass in register.
6850         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
6851         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
6852             (LoadOffset != 0)
6853                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
6854                 : Arg,
6855             DAG.getObjectPtrOffset(dl, StackPtr,
6856                                    TypeSize::Fixed(ByValVA.getLocMemOffset())),
6857             CallSeqStart, MemcpyFlags, DAG, dl);
6858         continue;
6859       }
6860 
6861       // Initialize the final register residue.
6862       // Any residue that occupies the final by-val arg register must be
6863       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
6864       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
6865       // 2 and 1 byte loads.
6866       const unsigned ResidueBytes = ByValSize % PtrByteSize;
6867       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
6868              "Unexpected register residue for by-value argument.");
6869       SDValue ResidueVal;
6870       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
6871         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
6872         const MVT VT =
6873             N == 1 ? MVT::i8
6874                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
6875         SDValue Load = GetLoad(VT, LoadOffset);
6876         MemOpChains.push_back(Load.getValue(1));
6877         LoadOffset += N;
6878         Bytes += N;
6879 
6880         // By-val arguments are passed left-justfied in register.
6881         // Every load here needs to be shifted, otherwise a full register load
6882         // should have been used.
6883         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
6884                "Unexpected load emitted during handling of pass-by-value "
6885                "argument.");
6886         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
6887         EVT ShiftAmountTy =
6888             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
6889         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
6890         SDValue ShiftedLoad =
6891             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
6892         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
6893                                               ShiftedLoad)
6894                                 : ShiftedLoad;
6895       }
6896 
6897       const CCValAssign &ByValVA = ArgLocs[I++];
6898       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
6899       continue;
6900     }
6901 
6902     CCValAssign &VA = ArgLocs[I++];
6903     const MVT LocVT = VA.getLocVT();
6904     const MVT ValVT = VA.getValVT();
6905 
6906     if (VA.isMemLoc() && VA.getValVT().isVector())
6907       report_fatal_error(
6908           "passing vector parameters to the stack is unimplemented for AIX");
6909 
6910     switch (VA.getLocInfo()) {
6911     default:
6912       report_fatal_error("Unexpected argument extension type.");
6913     case CCValAssign::Full:
6914       break;
6915     case CCValAssign::ZExt:
6916       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
6917       break;
6918     case CCValAssign::SExt:
6919       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
6920       break;
6921     }
6922 
6923     if (VA.isRegLoc() && !VA.needsCustom()) {
6924       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
6925       continue;
6926     }
6927 
6928     if (VA.isMemLoc()) {
6929       SDValue PtrOff =
6930           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
6931       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6932       MemOpChains.push_back(
6933           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
6934 
6935       continue;
6936     }
6937 
6938     // Custom handling is used for GPR initializations for vararg float
6939     // arguments.
6940     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
6941            ValVT.isFloatingPoint() && LocVT.isInteger() &&
6942            "Unexpected register handling for calling convention.");
6943 
6944     SDValue ArgAsInt =
6945         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
6946 
6947     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
6948       // f32 in 32-bit GPR
6949       // f64 in 64-bit GPR
6950       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
6951     else if (Arg.getValueType().getFixedSizeInBits() <
6952              LocVT.getFixedSizeInBits())
6953       // f32 in 64-bit GPR.
6954       RegsToPass.push_back(std::make_pair(
6955           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
6956     else {
6957       // f64 in two 32-bit GPRs
6958       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
6959       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
6960              "Unexpected custom register for argument!");
6961       CCValAssign &GPR1 = VA;
6962       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
6963                                      DAG.getConstant(32, dl, MVT::i8));
6964       RegsToPass.push_back(std::make_pair(
6965           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
6966 
6967       if (I != E) {
6968         // If only 1 GPR was available, there will only be one custom GPR and
6969         // the argument will also pass in memory.
6970         CCValAssign &PeekArg = ArgLocs[I];
6971         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
6972           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
6973           CCValAssign &GPR2 = ArgLocs[I++];
6974           RegsToPass.push_back(std::make_pair(
6975               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
6976         }
6977       }
6978     }
6979   }
6980 
6981   if (!MemOpChains.empty())
6982     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6983 
6984   // For indirect calls, we need to save the TOC base to the stack for
6985   // restoration after the call.
6986   if (CFlags.IsIndirect) {
6987     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
6988     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
6989     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
6990     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
6991     const unsigned TOCSaveOffset =
6992         Subtarget.getFrameLowering()->getTOCSaveOffset();
6993 
6994     setUsesTOCBasePtr(DAG);
6995     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
6996     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6997     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
6998     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6999     Chain = DAG.getStore(
7000         Val.getValue(1), dl, Val, AddPtr,
7001         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7002   }
7003 
7004   // Build a sequence of copy-to-reg nodes chained together with token chain
7005   // and flag operands which copy the outgoing args into the appropriate regs.
7006   SDValue InFlag;
7007   for (auto Reg : RegsToPass) {
7008     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7009     InFlag = Chain.getValue(1);
7010   }
7011 
7012   const int SPDiff = 0;
7013   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7014                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7015 }
7016 
7017 bool
7018 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7019                                   MachineFunction &MF, bool isVarArg,
7020                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7021                                   LLVMContext &Context) const {
7022   SmallVector<CCValAssign, 16> RVLocs;
7023   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7024   return CCInfo.CheckReturn(
7025       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7026                 ? RetCC_PPC_Cold
7027                 : RetCC_PPC);
7028 }
7029 
7030 SDValue
7031 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7032                                bool isVarArg,
7033                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7034                                const SmallVectorImpl<SDValue> &OutVals,
7035                                const SDLoc &dl, SelectionDAG &DAG) const {
7036   SmallVector<CCValAssign, 16> RVLocs;
7037   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7038                  *DAG.getContext());
7039   CCInfo.AnalyzeReturn(Outs,
7040                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7041                            ? RetCC_PPC_Cold
7042                            : RetCC_PPC);
7043 
7044   SDValue Flag;
7045   SmallVector<SDValue, 4> RetOps(1, Chain);
7046 
7047   // Copy the result values into the output registers.
7048   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7049     CCValAssign &VA = RVLocs[i];
7050     assert(VA.isRegLoc() && "Can only return in registers!");
7051 
7052     SDValue Arg = OutVals[RealResIdx];
7053 
7054     switch (VA.getLocInfo()) {
7055     default: llvm_unreachable("Unknown loc info!");
7056     case CCValAssign::Full: break;
7057     case CCValAssign::AExt:
7058       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7059       break;
7060     case CCValAssign::ZExt:
7061       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7062       break;
7063     case CCValAssign::SExt:
7064       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7065       break;
7066     }
7067     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7068       bool isLittleEndian = Subtarget.isLittleEndian();
7069       // Legalize ret f64 -> ret 2 x i32.
7070       SDValue SVal =
7071           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7072                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7073       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7074       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7075       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7076                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7077       Flag = Chain.getValue(1);
7078       VA = RVLocs[++i]; // skip ahead to next loc
7079       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7080     } else
7081       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7082     Flag = Chain.getValue(1);
7083     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7084   }
7085 
7086   RetOps[0] = Chain;  // Update chain.
7087 
7088   // Add the flag if we have it.
7089   if (Flag.getNode())
7090     RetOps.push_back(Flag);
7091 
7092   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7093 }
7094 
7095 SDValue
7096 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7097                                                 SelectionDAG &DAG) const {
7098   SDLoc dl(Op);
7099 
7100   // Get the correct type for integers.
7101   EVT IntVT = Op.getValueType();
7102 
7103   // Get the inputs.
7104   SDValue Chain = Op.getOperand(0);
7105   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7106   // Build a DYNAREAOFFSET node.
7107   SDValue Ops[2] = {Chain, FPSIdx};
7108   SDVTList VTs = DAG.getVTList(IntVT);
7109   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7110 }
7111 
7112 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7113                                              SelectionDAG &DAG) const {
7114   // When we pop the dynamic allocation we need to restore the SP link.
7115   SDLoc dl(Op);
7116 
7117   // Get the correct type for pointers.
7118   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7119 
7120   // Construct the stack pointer operand.
7121   bool isPPC64 = Subtarget.isPPC64();
7122   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7123   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7124 
7125   // Get the operands for the STACKRESTORE.
7126   SDValue Chain = Op.getOperand(0);
7127   SDValue SaveSP = Op.getOperand(1);
7128 
7129   // Load the old link SP.
7130   SDValue LoadLinkSP =
7131       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7132 
7133   // Restore the stack pointer.
7134   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7135 
7136   // Store the old link SP.
7137   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7138 }
7139 
7140 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7141   MachineFunction &MF = DAG.getMachineFunction();
7142   bool isPPC64 = Subtarget.isPPC64();
7143   EVT PtrVT = getPointerTy(MF.getDataLayout());
7144 
7145   // Get current frame pointer save index.  The users of this index will be
7146   // primarily DYNALLOC instructions.
7147   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7148   int RASI = FI->getReturnAddrSaveIndex();
7149 
7150   // If the frame pointer save index hasn't been defined yet.
7151   if (!RASI) {
7152     // Find out what the fix offset of the frame pointer save area.
7153     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7154     // Allocate the frame index for frame pointer save area.
7155     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7156     // Save the result.
7157     FI->setReturnAddrSaveIndex(RASI);
7158   }
7159   return DAG.getFrameIndex(RASI, PtrVT);
7160 }
7161 
7162 SDValue
7163 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7164   MachineFunction &MF = DAG.getMachineFunction();
7165   bool isPPC64 = Subtarget.isPPC64();
7166   EVT PtrVT = getPointerTy(MF.getDataLayout());
7167 
7168   // Get current frame pointer save index.  The users of this index will be
7169   // primarily DYNALLOC instructions.
7170   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7171   int FPSI = FI->getFramePointerSaveIndex();
7172 
7173   // If the frame pointer save index hasn't been defined yet.
7174   if (!FPSI) {
7175     // Find out what the fix offset of the frame pointer save area.
7176     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7177     // Allocate the frame index for frame pointer save area.
7178     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7179     // Save the result.
7180     FI->setFramePointerSaveIndex(FPSI);
7181   }
7182   return DAG.getFrameIndex(FPSI, PtrVT);
7183 }
7184 
7185 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7186                                                    SelectionDAG &DAG) const {
7187   MachineFunction &MF = DAG.getMachineFunction();
7188   // Get the inputs.
7189   SDValue Chain = Op.getOperand(0);
7190   SDValue Size  = Op.getOperand(1);
7191   SDLoc dl(Op);
7192 
7193   // Get the correct type for pointers.
7194   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7195   // Negate the size.
7196   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7197                                 DAG.getConstant(0, dl, PtrVT), Size);
7198   // Construct a node for the frame pointer save index.
7199   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7200   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7201   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7202   if (hasInlineStackProbe(MF))
7203     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7204   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7205 }
7206 
7207 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7208                                                      SelectionDAG &DAG) const {
7209   MachineFunction &MF = DAG.getMachineFunction();
7210 
7211   bool isPPC64 = Subtarget.isPPC64();
7212   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7213 
7214   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7215   return DAG.getFrameIndex(FI, PtrVT);
7216 }
7217 
7218 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7219                                                SelectionDAG &DAG) const {
7220   SDLoc DL(Op);
7221   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7222                      DAG.getVTList(MVT::i32, MVT::Other),
7223                      Op.getOperand(0), Op.getOperand(1));
7224 }
7225 
7226 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7227                                                 SelectionDAG &DAG) const {
7228   SDLoc DL(Op);
7229   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7230                      Op.getOperand(0), Op.getOperand(1));
7231 }
7232 
7233 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7234   if (Op.getValueType().isVector())
7235     return LowerVectorLoad(Op, DAG);
7236 
7237   assert(Op.getValueType() == MVT::i1 &&
7238          "Custom lowering only for i1 loads");
7239 
7240   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7241 
7242   SDLoc dl(Op);
7243   LoadSDNode *LD = cast<LoadSDNode>(Op);
7244 
7245   SDValue Chain = LD->getChain();
7246   SDValue BasePtr = LD->getBasePtr();
7247   MachineMemOperand *MMO = LD->getMemOperand();
7248 
7249   SDValue NewLD =
7250       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7251                      BasePtr, MVT::i8, MMO);
7252   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7253 
7254   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7255   return DAG.getMergeValues(Ops, dl);
7256 }
7257 
7258 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7259   if (Op.getOperand(1).getValueType().isVector())
7260     return LowerVectorStore(Op, DAG);
7261 
7262   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7263          "Custom lowering only for i1 stores");
7264 
7265   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7266 
7267   SDLoc dl(Op);
7268   StoreSDNode *ST = cast<StoreSDNode>(Op);
7269 
7270   SDValue Chain = ST->getChain();
7271   SDValue BasePtr = ST->getBasePtr();
7272   SDValue Value = ST->getValue();
7273   MachineMemOperand *MMO = ST->getMemOperand();
7274 
7275   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7276                       Value);
7277   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7278 }
7279 
7280 // FIXME: Remove this once the ANDI glue bug is fixed:
7281 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7282   assert(Op.getValueType() == MVT::i1 &&
7283          "Custom lowering only for i1 results");
7284 
7285   SDLoc DL(Op);
7286   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7287 }
7288 
7289 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7290                                                SelectionDAG &DAG) const {
7291 
7292   // Implements a vector truncate that fits in a vector register as a shuffle.
7293   // We want to legalize vector truncates down to where the source fits in
7294   // a vector register (and target is therefore smaller than vector register
7295   // size).  At that point legalization will try to custom lower the sub-legal
7296   // result and get here - where we can contain the truncate as a single target
7297   // operation.
7298 
7299   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7300   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7301   //
7302   // We will implement it for big-endian ordering as this (where x denotes
7303   // undefined):
7304   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7305   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7306   //
7307   // The same operation in little-endian ordering will be:
7308   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7309   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7310 
7311   EVT TrgVT = Op.getValueType();
7312   assert(TrgVT.isVector() && "Vector type expected.");
7313   unsigned TrgNumElts = TrgVT.getVectorNumElements();
7314   EVT EltVT = TrgVT.getVectorElementType();
7315   if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
7316       TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
7317       !isPowerOf2_32(EltVT.getSizeInBits()))
7318     return SDValue();
7319 
7320   SDValue N1 = Op.getOperand(0);
7321   EVT SrcVT = N1.getValueType();
7322   unsigned SrcSize = SrcVT.getSizeInBits();
7323   if (SrcSize > 256 ||
7324       !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
7325       !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
7326     return SDValue();
7327   if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
7328     return SDValue();
7329 
7330   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7331   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7332 
7333   SDLoc DL(Op);
7334   SDValue Op1, Op2;
7335   if (SrcSize == 256) {
7336     EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
7337     EVT SplitVT =
7338         N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
7339     unsigned SplitNumElts = SplitVT.getVectorNumElements();
7340     Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7341                       DAG.getConstant(0, DL, VecIdxTy));
7342     Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7343                       DAG.getConstant(SplitNumElts, DL, VecIdxTy));
7344   }
7345   else {
7346     Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7347     Op2 = DAG.getUNDEF(WideVT);
7348   }
7349 
7350   // First list the elements we want to keep.
7351   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7352   SmallVector<int, 16> ShuffV;
7353   if (Subtarget.isLittleEndian())
7354     for (unsigned i = 0; i < TrgNumElts; ++i)
7355       ShuffV.push_back(i * SizeMult);
7356   else
7357     for (unsigned i = 1; i <= TrgNumElts; ++i)
7358       ShuffV.push_back(i * SizeMult - 1);
7359 
7360   // Populate the remaining elements with undefs.
7361   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7362     // ShuffV.push_back(i + WideNumElts);
7363     ShuffV.push_back(WideNumElts + 1);
7364 
7365   Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
7366   Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
7367   return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
7368 }
7369 
7370 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7371 /// possible.
7372 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7373   // Not FP, or using SPE? Not a fsel.
7374   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
7375       !Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.hasSPE())
7376     return Op;
7377 
7378   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7379 
7380   EVT ResVT = Op.getValueType();
7381   EVT CmpVT = Op.getOperand(0).getValueType();
7382   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7383   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
7384   SDLoc dl(Op);
7385   SDNodeFlags Flags = Op.getNode()->getFlags();
7386 
7387   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7388   // presence of infinities.
7389   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7390     switch (CC) {
7391     default:
7392       break;
7393     case ISD::SETOGT:
7394     case ISD::SETGT:
7395       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7396     case ISD::SETOLT:
7397     case ISD::SETLT:
7398       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7399     }
7400   }
7401 
7402   // We might be able to do better than this under some circumstances, but in
7403   // general, fsel-based lowering of select is a finite-math-only optimization.
7404   // For more information, see section F.3 of the 2.06 ISA specification.
7405   // With ISA 3.0
7406   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
7407       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
7408     return Op;
7409 
7410   // If the RHS of the comparison is a 0.0, we don't need to do the
7411   // subtraction at all.
7412   SDValue Sel1;
7413   if (isFloatingPointZero(RHS))
7414     switch (CC) {
7415     default: break;       // SETUO etc aren't handled by fsel.
7416     case ISD::SETNE:
7417       std::swap(TV, FV);
7418       LLVM_FALLTHROUGH;
7419     case ISD::SETEQ:
7420       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7421         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7422       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7423       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7424         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7425       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7426                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7427     case ISD::SETULT:
7428     case ISD::SETLT:
7429       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7430       LLVM_FALLTHROUGH;
7431     case ISD::SETOGE:
7432     case ISD::SETGE:
7433       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7434         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7435       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7436     case ISD::SETUGT:
7437     case ISD::SETGT:
7438       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7439       LLVM_FALLTHROUGH;
7440     case ISD::SETOLE:
7441     case ISD::SETLE:
7442       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7443         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7444       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7445                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7446     }
7447 
7448   SDValue Cmp;
7449   switch (CC) {
7450   default: break;       // SETUO etc aren't handled by fsel.
7451   case ISD::SETNE:
7452     std::swap(TV, FV);
7453     LLVM_FALLTHROUGH;
7454   case ISD::SETEQ:
7455     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7456     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7457       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7458     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7459     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7460       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7461     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7462                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7463   case ISD::SETULT:
7464   case ISD::SETLT:
7465     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7466     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7467       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7468     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7469   case ISD::SETOGE:
7470   case ISD::SETGE:
7471     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7472     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7473       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7474     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7475   case ISD::SETUGT:
7476   case ISD::SETGT:
7477     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7478     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7479       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7480     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7481   case ISD::SETOLE:
7482   case ISD::SETLE:
7483     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7484     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7485       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7486     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7487   }
7488   return Op;
7489 }
7490 
7491 static unsigned getPPCStrictOpcode(unsigned Opc) {
7492   switch (Opc) {
7493   default:
7494     llvm_unreachable("No strict version of this opcode!");
7495   case PPCISD::FCTIDZ:
7496     return PPCISD::STRICT_FCTIDZ;
7497   case PPCISD::FCTIWZ:
7498     return PPCISD::STRICT_FCTIWZ;
7499   case PPCISD::FCTIDUZ:
7500     return PPCISD::STRICT_FCTIDUZ;
7501   case PPCISD::FCTIWUZ:
7502     return PPCISD::STRICT_FCTIWUZ;
7503   case PPCISD::FCFID:
7504     return PPCISD::STRICT_FCFID;
7505   case PPCISD::FCFIDU:
7506     return PPCISD::STRICT_FCFIDU;
7507   case PPCISD::FCFIDS:
7508     return PPCISD::STRICT_FCFIDS;
7509   case PPCISD::FCFIDUS:
7510     return PPCISD::STRICT_FCFIDUS;
7511   }
7512 }
7513 
7514 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
7515                               const PPCSubtarget &Subtarget) {
7516   SDLoc dl(Op);
7517   bool IsStrict = Op->isStrictFPOpcode();
7518   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7519                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7520 
7521   // TODO: Any other flags to propagate?
7522   SDNodeFlags Flags;
7523   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7524 
7525   // For strict nodes, source is the second operand.
7526   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7527   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
7528   assert(Src.getValueType().isFloatingPoint());
7529   if (Src.getValueType() == MVT::f32) {
7530     if (IsStrict) {
7531       Src =
7532           DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
7533                       DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
7534       Chain = Src.getValue(1);
7535     } else
7536       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7537   }
7538   SDValue Conv;
7539   unsigned Opc = ISD::DELETED_NODE;
7540   switch (Op.getSimpleValueType().SimpleTy) {
7541   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7542   case MVT::i32:
7543     Opc = IsSigned ? PPCISD::FCTIWZ
7544                    : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
7545     break;
7546   case MVT::i64:
7547     assert((IsSigned || Subtarget.hasFPCVT()) &&
7548            "i64 FP_TO_UINT is supported only with FPCVT");
7549     Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
7550   }
7551   if (IsStrict) {
7552     Opc = getPPCStrictOpcode(Opc);
7553     Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other),
7554                        {Chain, Src}, Flags);
7555   } else {
7556     Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
7557   }
7558   return Conv;
7559 }
7560 
7561 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7562                                                SelectionDAG &DAG,
7563                                                const SDLoc &dl) const {
7564   SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
7565   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7566                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7567   bool IsStrict = Op->isStrictFPOpcode();
7568 
7569   // Convert the FP value to an int value through memory.
7570   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7571                   (IsSigned || Subtarget.hasFPCVT());
7572   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7573   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7574   MachinePointerInfo MPI =
7575       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7576 
7577   // Emit a store to the stack slot.
7578   SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
7579   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
7580   if (i32Stack) {
7581     MachineFunction &MF = DAG.getMachineFunction();
7582     Alignment = Align(4);
7583     MachineMemOperand *MMO =
7584         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
7585     SDValue Ops[] = { Chain, Tmp, FIPtr };
7586     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7587               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7588   } else
7589     Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
7590 
7591   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
7592   // add in a bias on big endian.
7593   if (Op.getValueType() == MVT::i32 && !i32Stack) {
7594     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7595                         DAG.getConstant(4, dl, FIPtr.getValueType()));
7596     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7597   }
7598 
7599   RLI.Chain = Chain;
7600   RLI.Ptr = FIPtr;
7601   RLI.MPI = MPI;
7602   RLI.Alignment = Alignment;
7603 }
7604 
7605 /// Custom lowers floating point to integer conversions to use
7606 /// the direct move instructions available in ISA 2.07 to avoid the
7607 /// need for load/store combinations.
7608 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7609                                                     SelectionDAG &DAG,
7610                                                     const SDLoc &dl) const {
7611   SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
7612   SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
7613   if (Op->isStrictFPOpcode())
7614     return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
7615   else
7616     return Mov;
7617 }
7618 
7619 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7620                                           const SDLoc &dl) const {
7621   bool IsStrict = Op->isStrictFPOpcode();
7622   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7623                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7624   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7625   EVT SrcVT = Src.getValueType();
7626   EVT DstVT = Op.getValueType();
7627 
7628   // FP to INT conversions are legal for f128.
7629   if (SrcVT == MVT::f128)
7630     return Subtarget.hasP9Vector() ? Op : SDValue();
7631 
7632   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
7633   // PPC (the libcall is not available).
7634   if (SrcVT == MVT::ppcf128) {
7635     if (DstVT == MVT::i32) {
7636       // TODO: Conservatively pass only nofpexcept flag here. Need to check and
7637       // set other fast-math flags to FP operations in both strict and
7638       // non-strict cases. (FP_TO_SINT, FSUB)
7639       SDNodeFlags Flags;
7640       Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7641 
7642       if (IsSigned) {
7643         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
7644                                  DAG.getIntPtrConstant(0, dl));
7645         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
7646                                  DAG.getIntPtrConstant(1, dl));
7647 
7648         // Add the two halves of the long double in round-to-zero mode, and use
7649         // a smaller FP_TO_SINT.
7650         if (IsStrict) {
7651           SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl,
7652                                     DAG.getVTList(MVT::f64, MVT::Other),
7653                                     {Op.getOperand(0), Lo, Hi}, Flags);
7654           return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
7655                              DAG.getVTList(MVT::i32, MVT::Other),
7656                              {Res.getValue(1), Res}, Flags);
7657         } else {
7658           SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
7659           return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
7660         }
7661       } else {
7662         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
7663         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
7664         SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
7665         SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
7666         if (IsStrict) {
7667           // Sel = Src < 0x80000000
7668           // FltOfs = select Sel, 0.0, 0x80000000
7669           // IntOfs = select Sel, 0, 0x80000000
7670           // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
7671           SDValue Chain = Op.getOperand(0);
7672           EVT SetCCVT =
7673               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
7674           EVT DstSetCCVT =
7675               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
7676           SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
7677                                      Chain, true);
7678           Chain = Sel.getValue(1);
7679 
7680           SDValue FltOfs = DAG.getSelect(
7681               dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
7682           Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
7683 
7684           SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
7685                                     DAG.getVTList(SrcVT, MVT::Other),
7686                                     {Chain, Src, FltOfs}, Flags);
7687           Chain = Val.getValue(1);
7688           SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
7689                                      DAG.getVTList(DstVT, MVT::Other),
7690                                      {Chain, Val}, Flags);
7691           Chain = SInt.getValue(1);
7692           SDValue IntOfs = DAG.getSelect(
7693               dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
7694           SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
7695           return DAG.getMergeValues({Result, Chain}, dl);
7696         } else {
7697           // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
7698           // FIXME: generated code sucks.
7699           SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
7700           True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
7701           True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
7702           SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
7703           return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
7704         }
7705       }
7706     }
7707 
7708     return SDValue();
7709   }
7710 
7711   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
7712     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7713 
7714   ReuseLoadInfo RLI;
7715   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7716 
7717   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7718                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7719 }
7720 
7721 // We're trying to insert a regular store, S, and then a load, L. If the
7722 // incoming value, O, is a load, we might just be able to have our load use the
7723 // address used by O. However, we don't know if anything else will store to
7724 // that address before we can load from it. To prevent this situation, we need
7725 // to insert our load, L, into the chain as a peer of O. To do this, we give L
7726 // the same chain operand as O, we create a token factor from the chain results
7727 // of O and L, and we replace all uses of O's chain result with that token
7728 // factor (see spliceIntoChain below for this last part).
7729 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
7730                                             ReuseLoadInfo &RLI,
7731                                             SelectionDAG &DAG,
7732                                             ISD::LoadExtType ET) const {
7733   // Conservatively skip reusing for constrained FP nodes.
7734   if (Op->isStrictFPOpcode())
7735     return false;
7736 
7737   SDLoc dl(Op);
7738   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
7739                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
7740   if (ET == ISD::NON_EXTLOAD &&
7741       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
7742       isOperationLegalOrCustom(Op.getOpcode(),
7743                                Op.getOperand(0).getValueType())) {
7744 
7745     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7746     return true;
7747   }
7748 
7749   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
7750   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
7751       LD->isNonTemporal())
7752     return false;
7753   if (LD->getMemoryVT() != MemVT)
7754     return false;
7755 
7756   // If the result of the load is an illegal type, then we can't build a
7757   // valid chain for reuse since the legalised loads and token factor node that
7758   // ties the legalised loads together uses a different output chain then the
7759   // illegal load.
7760   if (!isTypeLegal(LD->getValueType(0)))
7761     return false;
7762 
7763   RLI.Ptr = LD->getBasePtr();
7764   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
7765     assert(LD->getAddressingMode() == ISD::PRE_INC &&
7766            "Non-pre-inc AM on PPC?");
7767     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
7768                           LD->getOffset());
7769   }
7770 
7771   RLI.Chain = LD->getChain();
7772   RLI.MPI = LD->getPointerInfo();
7773   RLI.IsDereferenceable = LD->isDereferenceable();
7774   RLI.IsInvariant = LD->isInvariant();
7775   RLI.Alignment = LD->getAlign();
7776   RLI.AAInfo = LD->getAAInfo();
7777   RLI.Ranges = LD->getRanges();
7778 
7779   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
7780   return true;
7781 }
7782 
7783 // Given the head of the old chain, ResChain, insert a token factor containing
7784 // it and NewResChain, and make users of ResChain now be users of that token
7785 // factor.
7786 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
7787 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
7788                                         SDValue NewResChain,
7789                                         SelectionDAG &DAG) const {
7790   if (!ResChain)
7791     return;
7792 
7793   SDLoc dl(NewResChain);
7794 
7795   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
7796                            NewResChain, DAG.getUNDEF(MVT::Other));
7797   assert(TF.getNode() != NewResChain.getNode() &&
7798          "A new TF really is required here");
7799 
7800   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
7801   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
7802 }
7803 
7804 /// Analyze profitability of direct move
7805 /// prefer float load to int load plus direct move
7806 /// when there is no integer use of int load
7807 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
7808   SDNode *Origin = Op.getOperand(0).getNode();
7809   if (Origin->getOpcode() != ISD::LOAD)
7810     return true;
7811 
7812   // If there is no LXSIBZX/LXSIHZX, like Power8,
7813   // prefer direct move if the memory size is 1 or 2 bytes.
7814   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
7815   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
7816     return true;
7817 
7818   for (SDNode::use_iterator UI = Origin->use_begin(),
7819                             UE = Origin->use_end();
7820        UI != UE; ++UI) {
7821 
7822     // Only look at the users of the loaded value.
7823     if (UI.getUse().get().getResNo() != 0)
7824       continue;
7825 
7826     if (UI->getOpcode() != ISD::SINT_TO_FP &&
7827         UI->getOpcode() != ISD::UINT_TO_FP &&
7828         UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
7829         UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
7830       return true;
7831   }
7832 
7833   return false;
7834 }
7835 
7836 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
7837                               const PPCSubtarget &Subtarget,
7838                               SDValue Chain = SDValue()) {
7839   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
7840                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
7841   SDLoc dl(Op);
7842 
7843   // TODO: Any other flags to propagate?
7844   SDNodeFlags Flags;
7845   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7846 
7847   // If we have FCFIDS, then use it when converting to single-precision.
7848   // Otherwise, convert to double-precision and then round.
7849   bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
7850   unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
7851                               : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
7852   EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
7853   if (Op->isStrictFPOpcode()) {
7854     if (!Chain)
7855       Chain = Op.getOperand(0);
7856     return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl,
7857                        DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
7858   } else
7859     return DAG.getNode(ConvOpc, dl, ConvTy, Src);
7860 }
7861 
7862 /// Custom lowers integer to floating point conversions to use
7863 /// the direct move instructions available in ISA 2.07 to avoid the
7864 /// need for load/store combinations.
7865 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
7866                                                     SelectionDAG &DAG,
7867                                                     const SDLoc &dl) const {
7868   assert((Op.getValueType() == MVT::f32 ||
7869           Op.getValueType() == MVT::f64) &&
7870          "Invalid floating point type as target of conversion");
7871   assert(Subtarget.hasFPCVT() &&
7872          "Int to FP conversions with direct moves require FPCVT");
7873   SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
7874   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
7875   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
7876                 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
7877   unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
7878   SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
7879   return convertIntToFP(Op, Mov, DAG, Subtarget);
7880 }
7881 
7882 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
7883 
7884   EVT VecVT = Vec.getValueType();
7885   assert(VecVT.isVector() && "Expected a vector type.");
7886   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
7887 
7888   EVT EltVT = VecVT.getVectorElementType();
7889   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7890   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7891 
7892   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
7893   SmallVector<SDValue, 16> Ops(NumConcat);
7894   Ops[0] = Vec;
7895   SDValue UndefVec = DAG.getUNDEF(VecVT);
7896   for (unsigned i = 1; i < NumConcat; ++i)
7897     Ops[i] = UndefVec;
7898 
7899   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
7900 }
7901 
7902 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
7903                                                 const SDLoc &dl) const {
7904   bool IsStrict = Op->isStrictFPOpcode();
7905   unsigned Opc = Op.getOpcode();
7906   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7907   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||
7908           Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
7909          "Unexpected conversion type");
7910   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
7911          "Supports conversions to v2f64/v4f32 only.");
7912 
7913   // TODO: Any other flags to propagate?
7914   SDNodeFlags Flags;
7915   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7916 
7917   bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
7918   bool FourEltRes = Op.getValueType() == MVT::v4f32;
7919 
7920   SDValue Wide = widenVec(DAG, Src, dl);
7921   EVT WideVT = Wide.getValueType();
7922   unsigned WideNumElts = WideVT.getVectorNumElements();
7923   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
7924 
7925   SmallVector<int, 16> ShuffV;
7926   for (unsigned i = 0; i < WideNumElts; ++i)
7927     ShuffV.push_back(i + WideNumElts);
7928 
7929   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
7930   int SaveElts = FourEltRes ? 4 : 2;
7931   if (Subtarget.isLittleEndian())
7932     for (int i = 0; i < SaveElts; i++)
7933       ShuffV[i * Stride] = i;
7934   else
7935     for (int i = 1; i <= SaveElts; i++)
7936       ShuffV[i * Stride - 1] = i - 1;
7937 
7938   SDValue ShuffleSrc2 =
7939       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
7940   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
7941 
7942   SDValue Extend;
7943   if (SignedConv) {
7944     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
7945     EVT ExtVT = Src.getValueType();
7946     if (Subtarget.hasP9Altivec())
7947       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
7948                                IntermediateVT.getVectorNumElements());
7949 
7950     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
7951                          DAG.getValueType(ExtVT));
7952   } else
7953     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
7954 
7955   if (IsStrict)
7956     return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
7957                        {Op.getOperand(0), Extend}, Flags);
7958 
7959   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
7960 }
7961 
7962 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
7963                                           SelectionDAG &DAG) const {
7964   SDLoc dl(Op);
7965   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
7966                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
7967   bool IsStrict = Op->isStrictFPOpcode();
7968   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7969   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
7970 
7971   // TODO: Any other flags to propagate?
7972   SDNodeFlags Flags;
7973   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7974 
7975   EVT InVT = Src.getValueType();
7976   EVT OutVT = Op.getValueType();
7977   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
7978       isOperationCustom(Op.getOpcode(), InVT))
7979     return LowerINT_TO_FPVector(Op, DAG, dl);
7980 
7981   // Conversions to f128 are legal.
7982   if (Op.getValueType() == MVT::f128)
7983     return Subtarget.hasP9Vector() ? Op : SDValue();
7984 
7985   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
7986   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
7987     return SDValue();
7988 
7989   if (Src.getValueType() == MVT::i1) {
7990     SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
7991                               DAG.getConstantFP(1.0, dl, Op.getValueType()),
7992                               DAG.getConstantFP(0.0, dl, Op.getValueType()));
7993     if (IsStrict)
7994       return DAG.getMergeValues({Sel, Chain}, dl);
7995     else
7996       return Sel;
7997   }
7998 
7999   // If we have direct moves, we can do all the conversion, skip the store/load
8000   // however, without FPCVT we can't do most conversions.
8001   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8002       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8003     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8004 
8005   assert((IsSigned || Subtarget.hasFPCVT()) &&
8006          "UINT_TO_FP is supported only with FPCVT");
8007 
8008   if (Src.getValueType() == MVT::i64) {
8009     SDValue SINT = Src;
8010     // When converting to single-precision, we actually need to convert
8011     // to double-precision first and then round to single-precision.
8012     // To avoid double-rounding effects during that operation, we have
8013     // to prepare the input operand.  Bits that might be truncated when
8014     // converting to double-precision are replaced by a bit that won't
8015     // be lost at this stage, but is below the single-precision rounding
8016     // position.
8017     //
8018     // However, if -enable-unsafe-fp-math is in effect, accept double
8019     // rounding to avoid the extra overhead.
8020     if (Op.getValueType() == MVT::f32 &&
8021         !Subtarget.hasFPCVT() &&
8022         !DAG.getTarget().Options.UnsafeFPMath) {
8023 
8024       // Twiddle input to make sure the low 11 bits are zero.  (If this
8025       // is the case, we are guaranteed the value will fit into the 53 bit
8026       // mantissa of an IEEE double-precision value without rounding.)
8027       // If any of those low 11 bits were not zero originally, make sure
8028       // bit 12 (value 2048) is set instead, so that the final rounding
8029       // to single-precision gets the correct result.
8030       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8031                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8032       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8033                           Round, DAG.getConstant(2047, dl, MVT::i64));
8034       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8035       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8036                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8037 
8038       // However, we cannot use that value unconditionally: if the magnitude
8039       // of the input value is small, the bit-twiddling we did above might
8040       // end up visibly changing the output.  Fortunately, in that case, we
8041       // don't need to twiddle bits since the original input will convert
8042       // exactly to double-precision floating-point already.  Therefore,
8043       // construct a conditional to use the original value if the top 11
8044       // bits are all sign-bit copies, and use the rounded value computed
8045       // above otherwise.
8046       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8047                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8048       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8049                          Cond, DAG.getConstant(1, dl, MVT::i64));
8050       Cond = DAG.getSetCC(
8051           dl,
8052           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8053           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8054 
8055       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8056     }
8057 
8058     ReuseLoadInfo RLI;
8059     SDValue Bits;
8060 
8061     MachineFunction &MF = DAG.getMachineFunction();
8062     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8063       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8064                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8065       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8066     } else if (Subtarget.hasLFIWAX() &&
8067                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8068       MachineMemOperand *MMO =
8069         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8070                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8071       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8072       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8073                                      DAG.getVTList(MVT::f64, MVT::Other),
8074                                      Ops, MVT::i32, MMO);
8075       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8076     } else if (Subtarget.hasFPCVT() &&
8077                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8078       MachineMemOperand *MMO =
8079         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8080                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8081       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8082       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8083                                      DAG.getVTList(MVT::f64, MVT::Other),
8084                                      Ops, MVT::i32, MMO);
8085       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8086     } else if (((Subtarget.hasLFIWAX() &&
8087                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8088                 (Subtarget.hasFPCVT() &&
8089                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8090                SINT.getOperand(0).getValueType() == MVT::i32) {
8091       MachineFrameInfo &MFI = MF.getFrameInfo();
8092       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8093 
8094       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8095       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8096 
8097       SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8098                                    MachinePointerInfo::getFixedStack(
8099                                        DAG.getMachineFunction(), FrameIdx));
8100       Chain = Store;
8101 
8102       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8103              "Expected an i32 store");
8104 
8105       RLI.Ptr = FIdx;
8106       RLI.Chain = Chain;
8107       RLI.MPI =
8108           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8109       RLI.Alignment = Align(4);
8110 
8111       MachineMemOperand *MMO =
8112         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8113                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8114       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8115       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8116                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8117                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8118                                      Ops, MVT::i32, MMO);
8119       Chain = Bits.getValue(1);
8120     } else
8121       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8122 
8123     SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8124     if (IsStrict)
8125       Chain = FP.getValue(1);
8126 
8127     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8128       if (IsStrict)
8129         FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8130                          DAG.getVTList(MVT::f32, MVT::Other),
8131                          {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8132       else
8133         FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8134                          DAG.getIntPtrConstant(0, dl));
8135     }
8136     return FP;
8137   }
8138 
8139   assert(Src.getValueType() == MVT::i32 &&
8140          "Unhandled INT_TO_FP type in custom expander!");
8141   // Since we only generate this in 64-bit mode, we can take advantage of
8142   // 64-bit registers.  In particular, sign extend the input value into the
8143   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8144   // then lfd it and fcfid it.
8145   MachineFunction &MF = DAG.getMachineFunction();
8146   MachineFrameInfo &MFI = MF.getFrameInfo();
8147   EVT PtrVT = getPointerTy(MF.getDataLayout());
8148 
8149   SDValue Ld;
8150   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8151     ReuseLoadInfo RLI;
8152     bool ReusingLoad;
8153     if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8154       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8155       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8156 
8157       SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8158                                    MachinePointerInfo::getFixedStack(
8159                                        DAG.getMachineFunction(), FrameIdx));
8160       Chain = Store;
8161 
8162       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8163              "Expected an i32 store");
8164 
8165       RLI.Ptr = FIdx;
8166       RLI.Chain = Chain;
8167       RLI.MPI =
8168           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8169       RLI.Alignment = Align(4);
8170     }
8171 
8172     MachineMemOperand *MMO =
8173       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8174                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8175     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8176     Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8177                                  DAG.getVTList(MVT::f64, MVT::Other), Ops,
8178                                  MVT::i32, MMO);
8179     Chain = Ld.getValue(1);
8180     if (ReusingLoad)
8181       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8182   } else {
8183     assert(Subtarget.isPPC64() &&
8184            "i32->FP without LFIWAX supported only on PPC64");
8185 
8186     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8187     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8188 
8189     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8190 
8191     // STD the extended value into the stack slot.
8192     SDValue Store = DAG.getStore(
8193         Chain, dl, Ext64, FIdx,
8194         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8195     Chain = Store;
8196 
8197     // Load the value as a double.
8198     Ld = DAG.getLoad(
8199         MVT::f64, dl, Chain, FIdx,
8200         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8201     Chain = Ld.getValue(1);
8202   }
8203 
8204   // FCFID it and return it.
8205   SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8206   if (IsStrict)
8207     Chain = FP.getValue(1);
8208   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8209     if (IsStrict)
8210       FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8211                        DAG.getVTList(MVT::f32, MVT::Other),
8212                        {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8213     else
8214       FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8215                        DAG.getIntPtrConstant(0, dl));
8216   }
8217   return FP;
8218 }
8219 
8220 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8221                                             SelectionDAG &DAG) const {
8222   SDLoc dl(Op);
8223   /*
8224    The rounding mode is in bits 30:31 of FPSR, and has the following
8225    settings:
8226      00 Round to nearest
8227      01 Round to 0
8228      10 Round to +inf
8229      11 Round to -inf
8230 
8231   FLT_ROUNDS, on the other hand, expects the following:
8232     -1 Undefined
8233      0 Round to 0
8234      1 Round to nearest
8235      2 Round to +inf
8236      3 Round to -inf
8237 
8238   To perform the conversion, we do:
8239     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8240   */
8241 
8242   MachineFunction &MF = DAG.getMachineFunction();
8243   EVT VT = Op.getValueType();
8244   EVT PtrVT = getPointerTy(MF.getDataLayout());
8245 
8246   // Save FP Control Word to register
8247   SDValue Chain = Op.getOperand(0);
8248   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8249   Chain = MFFS.getValue(1);
8250 
8251   SDValue CWD;
8252   if (isTypeLegal(MVT::i64)) {
8253     CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
8254                       DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
8255   } else {
8256     // Save FP register to stack slot
8257     int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8258     SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8259     Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8260 
8261     // Load FP Control Word from low 32 bits of stack slot.
8262     assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&
8263            "Stack slot adjustment is valid only on big endian subtargets!");
8264     SDValue Four = DAG.getConstant(4, dl, PtrVT);
8265     SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8266     CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8267     Chain = CWD.getValue(1);
8268   }
8269 
8270   // Transform as necessary
8271   SDValue CWD1 =
8272     DAG.getNode(ISD::AND, dl, MVT::i32,
8273                 CWD, DAG.getConstant(3, dl, MVT::i32));
8274   SDValue CWD2 =
8275     DAG.getNode(ISD::SRL, dl, MVT::i32,
8276                 DAG.getNode(ISD::AND, dl, MVT::i32,
8277                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8278                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8279                             DAG.getConstant(3, dl, MVT::i32)),
8280                 DAG.getConstant(1, dl, MVT::i32));
8281 
8282   SDValue RetVal =
8283     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8284 
8285   RetVal =
8286       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8287                   dl, VT, RetVal);
8288 
8289   return DAG.getMergeValues({RetVal, Chain}, dl);
8290 }
8291 
8292 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8293   EVT VT = Op.getValueType();
8294   unsigned BitWidth = VT.getSizeInBits();
8295   SDLoc dl(Op);
8296   assert(Op.getNumOperands() == 3 &&
8297          VT == Op.getOperand(1).getValueType() &&
8298          "Unexpected SHL!");
8299 
8300   // Expand into a bunch of logical ops.  Note that these ops
8301   // depend on the PPC behavior for oversized shift amounts.
8302   SDValue Lo = Op.getOperand(0);
8303   SDValue Hi = Op.getOperand(1);
8304   SDValue Amt = Op.getOperand(2);
8305   EVT AmtVT = Amt.getValueType();
8306 
8307   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8308                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8309   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8310   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8311   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8312   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8313                              DAG.getConstant(-BitWidth, dl, AmtVT));
8314   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8315   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8316   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8317   SDValue OutOps[] = { OutLo, OutHi };
8318   return DAG.getMergeValues(OutOps, dl);
8319 }
8320 
8321 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8322   EVT VT = Op.getValueType();
8323   SDLoc dl(Op);
8324   unsigned BitWidth = VT.getSizeInBits();
8325   assert(Op.getNumOperands() == 3 &&
8326          VT == Op.getOperand(1).getValueType() &&
8327          "Unexpected SRL!");
8328 
8329   // Expand into a bunch of logical ops.  Note that these ops
8330   // depend on the PPC behavior for oversized shift amounts.
8331   SDValue Lo = Op.getOperand(0);
8332   SDValue Hi = Op.getOperand(1);
8333   SDValue Amt = Op.getOperand(2);
8334   EVT AmtVT = Amt.getValueType();
8335 
8336   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8337                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8338   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8339   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8340   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8341   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8342                              DAG.getConstant(-BitWidth, dl, AmtVT));
8343   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8344   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8345   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8346   SDValue OutOps[] = { OutLo, OutHi };
8347   return DAG.getMergeValues(OutOps, dl);
8348 }
8349 
8350 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8351   SDLoc dl(Op);
8352   EVT VT = Op.getValueType();
8353   unsigned BitWidth = VT.getSizeInBits();
8354   assert(Op.getNumOperands() == 3 &&
8355          VT == Op.getOperand(1).getValueType() &&
8356          "Unexpected SRA!");
8357 
8358   // Expand into a bunch of logical ops, followed by a select_cc.
8359   SDValue Lo = Op.getOperand(0);
8360   SDValue Hi = Op.getOperand(1);
8361   SDValue Amt = Op.getOperand(2);
8362   EVT AmtVT = Amt.getValueType();
8363 
8364   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8365                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8366   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8367   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8368   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8369   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8370                              DAG.getConstant(-BitWidth, dl, AmtVT));
8371   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8372   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8373   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8374                                   Tmp4, Tmp6, ISD::SETLE);
8375   SDValue OutOps[] = { OutLo, OutHi };
8376   return DAG.getMergeValues(OutOps, dl);
8377 }
8378 
8379 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
8380                                             SelectionDAG &DAG) const {
8381   SDLoc dl(Op);
8382   EVT VT = Op.getValueType();
8383   unsigned BitWidth = VT.getSizeInBits();
8384 
8385   bool IsFSHL = Op.getOpcode() == ISD::FSHL;
8386   SDValue X = Op.getOperand(0);
8387   SDValue Y = Op.getOperand(1);
8388   SDValue Z = Op.getOperand(2);
8389   EVT AmtVT = Z.getValueType();
8390 
8391   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8392   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8393   // This is simpler than TargetLowering::expandFunnelShift because we can rely
8394   // on PowerPC shift by BW being well defined.
8395   Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
8396                   DAG.getConstant(BitWidth - 1, dl, AmtVT));
8397   SDValue SubZ =
8398       DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
8399   X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
8400   Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
8401   return DAG.getNode(ISD::OR, dl, VT, X, Y);
8402 }
8403 
8404 //===----------------------------------------------------------------------===//
8405 // Vector related lowering.
8406 //
8407 
8408 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8409 /// element size of SplatSize. Cast the result to VT.
8410 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8411                                       SelectionDAG &DAG, const SDLoc &dl) {
8412   static const MVT VTys[] = { // canonical VT to use for each size.
8413     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8414   };
8415 
8416   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8417 
8418   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8419   if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
8420     SplatSize = 1;
8421     Val = 0xFF;
8422   }
8423 
8424   EVT CanonicalVT = VTys[SplatSize-1];
8425 
8426   // Build a canonical splat for this value.
8427   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8428 }
8429 
8430 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8431 /// specified intrinsic ID.
8432 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8433                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
8434   if (DestVT == MVT::Other) DestVT = Op.getValueType();
8435   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8436                      DAG.getConstant(IID, dl, MVT::i32), Op);
8437 }
8438 
8439 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8440 /// specified intrinsic ID.
8441 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8442                                 SelectionDAG &DAG, const SDLoc &dl,
8443                                 EVT DestVT = MVT::Other) {
8444   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8445   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8446                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8447 }
8448 
8449 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8450 /// specified intrinsic ID.
8451 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8452                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8453                                 EVT DestVT = MVT::Other) {
8454   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8455   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8456                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8457 }
8458 
8459 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8460 /// amount.  The result has the specified value type.
8461 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8462                            SelectionDAG &DAG, const SDLoc &dl) {
8463   // Force LHS/RHS to be the right type.
8464   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8465   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8466 
8467   int Ops[16];
8468   for (unsigned i = 0; i != 16; ++i)
8469     Ops[i] = i + Amt;
8470   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8471   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8472 }
8473 
8474 /// Do we have an efficient pattern in a .td file for this node?
8475 ///
8476 /// \param V - pointer to the BuildVectorSDNode being matched
8477 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8478 ///
8479 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8480 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8481 /// the opposite is true (expansion is beneficial) are:
8482 /// - The node builds a vector out of integers that are not 32 or 64-bits
8483 /// - The node builds a vector out of constants
8484 /// - The node is a "load-and-splat"
8485 /// In all other cases, we will choose to keep the BUILD_VECTOR.
8486 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8487                                             bool HasDirectMove,
8488                                             bool HasP8Vector) {
8489   EVT VecVT = V->getValueType(0);
8490   bool RightType = VecVT == MVT::v2f64 ||
8491     (HasP8Vector && VecVT == MVT::v4f32) ||
8492     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8493   if (!RightType)
8494     return false;
8495 
8496   bool IsSplat = true;
8497   bool IsLoad = false;
8498   SDValue Op0 = V->getOperand(0);
8499 
8500   // This function is called in a block that confirms the node is not a constant
8501   // splat. So a constant BUILD_VECTOR here means the vector is built out of
8502   // different constants.
8503   if (V->isConstant())
8504     return false;
8505   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8506     if (V->getOperand(i).isUndef())
8507       return false;
8508     // We want to expand nodes that represent load-and-splat even if the
8509     // loaded value is a floating point truncation or conversion to int.
8510     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8511         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8512          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8513         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8514          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8515         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8516          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8517       IsLoad = true;
8518     // If the operands are different or the input is not a load and has more
8519     // uses than just this BV node, then it isn't a splat.
8520     if (V->getOperand(i) != Op0 ||
8521         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8522       IsSplat = false;
8523   }
8524   return !(IsSplat && IsLoad);
8525 }
8526 
8527 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8528 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8529 
8530   SDLoc dl(Op);
8531   SDValue Op0 = Op->getOperand(0);
8532 
8533   if ((Op.getValueType() != MVT::f128) ||
8534       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8535       (Op0.getOperand(0).getValueType() != MVT::i64) ||
8536       (Op0.getOperand(1).getValueType() != MVT::i64))
8537     return SDValue();
8538 
8539   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8540                      Op0.getOperand(1));
8541 }
8542 
8543 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
8544   const SDValue *InputLoad = &Op;
8545   if (InputLoad->getOpcode() == ISD::BITCAST)
8546     InputLoad = &InputLoad->getOperand(0);
8547   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
8548       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
8549     IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
8550     InputLoad = &InputLoad->getOperand(0);
8551   }
8552   if (InputLoad->getOpcode() != ISD::LOAD)
8553     return nullptr;
8554   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8555   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
8556 }
8557 
8558 // Convert the argument APFloat to a single precision APFloat if there is no
8559 // loss in information during the conversion to single precision APFloat and the
8560 // resulting number is not a denormal number. Return true if successful.
8561 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
8562   APFloat APFloatToConvert = ArgAPFloat;
8563   bool LosesInfo = true;
8564   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
8565                            &LosesInfo);
8566   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
8567   if (Success)
8568     ArgAPFloat = APFloatToConvert;
8569   return Success;
8570 }
8571 
8572 // Bitcast the argument APInt to a double and convert it to a single precision
8573 // APFloat, bitcast the APFloat to an APInt and assign it to the original
8574 // argument if there is no loss in information during the conversion from
8575 // double to single precision APFloat and the resulting number is not a denormal
8576 // number. Return true if successful.
8577 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
8578   double DpValue = ArgAPInt.bitsToDouble();
8579   APFloat APFloatDp(DpValue);
8580   bool Success = convertToNonDenormSingle(APFloatDp);
8581   if (Success)
8582     ArgAPInt = APFloatDp.bitcastToAPInt();
8583   return Success;
8584 }
8585 
8586 // If this is a case we can't handle, return null and let the default
8587 // expansion code take care of it.  If we CAN select this case, and if it
8588 // selects to a single instruction, return Op.  Otherwise, if we can codegen
8589 // this case more efficiently than a constant pool load, lower it to the
8590 // sequence of ops that should be used.
8591 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
8592                                              SelectionDAG &DAG) const {
8593   SDLoc dl(Op);
8594   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
8595   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
8596 
8597   // Check if this is a splat of a constant value.
8598   APInt APSplatBits, APSplatUndef;
8599   unsigned SplatBitSize;
8600   bool HasAnyUndefs;
8601   bool BVNIsConstantSplat =
8602       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
8603                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
8604 
8605   // If it is a splat of a double, check if we can shrink it to a 32 bit
8606   // non-denormal float which when converted back to double gives us the same
8607   // double. This is to exploit the XXSPLTIDP instruction.
8608   if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
8609       (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
8610       convertToNonDenormSingle(APSplatBits)) {
8611     SDValue SplatNode = DAG.getNode(
8612         PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
8613         DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
8614     return DAG.getBitcast(Op.getValueType(), SplatNode);
8615   }
8616 
8617   if (!BVNIsConstantSplat || SplatBitSize > 32) {
8618 
8619     bool IsPermutedLoad = false;
8620     const SDValue *InputLoad =
8621         getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
8622     // Handle load-and-splat patterns as we have instructions that will do this
8623     // in one go.
8624     if (InputLoad && DAG.isSplatValue(Op, true)) {
8625       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8626 
8627       // We have handling for 4 and 8 byte elements.
8628       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
8629 
8630       // Checking for a single use of this load, we have to check for vector
8631       // width (128 bits) / ElementSize uses (since each operand of the
8632       // BUILD_VECTOR is a separate use of the value.
8633       unsigned NumUsesOfInputLD = 128 / ElementSize;
8634       for (SDValue BVInOp : Op->ops())
8635         if (BVInOp.isUndef())
8636           NumUsesOfInputLD--;
8637       assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?");
8638       if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
8639           ((Subtarget.hasVSX() && ElementSize == 64) ||
8640            (Subtarget.hasP9Vector() && ElementSize == 32))) {
8641         SDValue Ops[] = {
8642           LD->getChain(),    // Chain
8643           LD->getBasePtr(),  // Ptr
8644           DAG.getValueType(Op.getValueType()) // VT
8645         };
8646         SDValue LdSplt = DAG.getMemIntrinsicNode(
8647             PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
8648             Ops, LD->getMemoryVT(), LD->getMemOperand());
8649         // Replace all uses of the output chain of the original load with the
8650         // output chain of the new load.
8651         DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
8652                                       LdSplt.getValue(1));
8653         return LdSplt;
8654       }
8655     }
8656 
8657     // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to
8658     // 32-bits can be lowered to VSX instructions under certain conditions.
8659     // Without VSX, there is no pattern more efficient than expanding the node.
8660     if (Subtarget.hasVSX() && Subtarget.isPPC64() &&
8661         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
8662                                         Subtarget.hasP8Vector()))
8663       return Op;
8664     return SDValue();
8665   }
8666 
8667   uint64_t SplatBits = APSplatBits.getZExtValue();
8668   uint64_t SplatUndef = APSplatUndef.getZExtValue();
8669   unsigned SplatSize = SplatBitSize / 8;
8670 
8671   // First, handle single instruction cases.
8672 
8673   // All zeros?
8674   if (SplatBits == 0) {
8675     // Canonicalize all zero vectors to be v4i32.
8676     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
8677       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
8678       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
8679     }
8680     return Op;
8681   }
8682 
8683   // We have XXSPLTIW for constant splats four bytes wide.
8684   // Given vector length is a multiple of 4, 2-byte splats can be replaced
8685   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
8686   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
8687   // turned into a 4-byte splat of 0xABABABAB.
8688   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
8689     return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2,
8690                                   Op.getValueType(), DAG, dl);
8691 
8692   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
8693     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
8694                                   dl);
8695 
8696   // We have XXSPLTIB for constant splats one byte wide.
8697   if (Subtarget.hasP9Vector() && SplatSize == 1)
8698     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
8699                                   dl);
8700 
8701   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
8702   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
8703                     (32-SplatBitSize));
8704   if (SextVal >= -16 && SextVal <= 15)
8705     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
8706                                   dl);
8707 
8708   // Two instruction sequences.
8709 
8710   // If this value is in the range [-32,30] and is even, use:
8711   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
8712   // If this value is in the range [17,31] and is odd, use:
8713   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
8714   // If this value is in the range [-31,-17] and is odd, use:
8715   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
8716   // Note the last two are three-instruction sequences.
8717   if (SextVal >= -32 && SextVal <= 31) {
8718     // To avoid having these optimizations undone by constant folding,
8719     // we convert to a pseudo that will be expanded later into one of
8720     // the above forms.
8721     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
8722     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
8723               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
8724     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
8725     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
8726     if (VT == Op.getValueType())
8727       return RetVal;
8728     else
8729       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
8730   }
8731 
8732   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
8733   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
8734   // for fneg/fabs.
8735   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
8736     // Make -1 and vspltisw -1:
8737     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
8738 
8739     // Make the VSLW intrinsic, computing 0x8000_0000.
8740     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
8741                                    OnesV, DAG, dl);
8742 
8743     // xor by OnesV to invert it.
8744     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
8745     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8746   }
8747 
8748   // Check to see if this is a wide variety of vsplti*, binop self cases.
8749   static const signed char SplatCsts[] = {
8750     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
8751     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
8752   };
8753 
8754   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
8755     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
8756     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
8757     int i = SplatCsts[idx];
8758 
8759     // Figure out what shift amount will be used by altivec if shifted by i in
8760     // this splat size.
8761     unsigned TypeShiftAmt = i & (SplatBitSize-1);
8762 
8763     // vsplti + shl self.
8764     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
8765       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
8766       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8767         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
8768         Intrinsic::ppc_altivec_vslw
8769       };
8770       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8771       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8772     }
8773 
8774     // vsplti + srl self.
8775     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8776       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
8777       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8778         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
8779         Intrinsic::ppc_altivec_vsrw
8780       };
8781       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8782       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8783     }
8784 
8785     // vsplti + rol self.
8786     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
8787                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
8788       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
8789       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8790         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
8791         Intrinsic::ppc_altivec_vrlw
8792       };
8793       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8794       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8795     }
8796 
8797     // t = vsplti c, result = vsldoi t, t, 1
8798     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
8799       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
8800       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
8801       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8802     }
8803     // t = vsplti c, result = vsldoi t, t, 2
8804     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
8805       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
8806       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
8807       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8808     }
8809     // t = vsplti c, result = vsldoi t, t, 3
8810     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
8811       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
8812       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
8813       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8814     }
8815   }
8816 
8817   return SDValue();
8818 }
8819 
8820 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
8821 /// the specified operations to build the shuffle.
8822 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
8823                                       SDValue RHS, SelectionDAG &DAG,
8824                                       const SDLoc &dl) {
8825   unsigned OpNum = (PFEntry >> 26) & 0x0F;
8826   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8827   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
8828 
8829   enum {
8830     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
8831     OP_VMRGHW,
8832     OP_VMRGLW,
8833     OP_VSPLTISW0,
8834     OP_VSPLTISW1,
8835     OP_VSPLTISW2,
8836     OP_VSPLTISW3,
8837     OP_VSLDOI4,
8838     OP_VSLDOI8,
8839     OP_VSLDOI12
8840   };
8841 
8842   if (OpNum == OP_COPY) {
8843     if (LHSID == (1*9+2)*9+3) return LHS;
8844     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
8845     return RHS;
8846   }
8847 
8848   SDValue OpLHS, OpRHS;
8849   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
8850   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
8851 
8852   int ShufIdxs[16];
8853   switch (OpNum) {
8854   default: llvm_unreachable("Unknown i32 permute!");
8855   case OP_VMRGHW:
8856     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
8857     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
8858     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
8859     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
8860     break;
8861   case OP_VMRGLW:
8862     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
8863     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
8864     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
8865     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
8866     break;
8867   case OP_VSPLTISW0:
8868     for (unsigned i = 0; i != 16; ++i)
8869       ShufIdxs[i] = (i&3)+0;
8870     break;
8871   case OP_VSPLTISW1:
8872     for (unsigned i = 0; i != 16; ++i)
8873       ShufIdxs[i] = (i&3)+4;
8874     break;
8875   case OP_VSPLTISW2:
8876     for (unsigned i = 0; i != 16; ++i)
8877       ShufIdxs[i] = (i&3)+8;
8878     break;
8879   case OP_VSPLTISW3:
8880     for (unsigned i = 0; i != 16; ++i)
8881       ShufIdxs[i] = (i&3)+12;
8882     break;
8883   case OP_VSLDOI4:
8884     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
8885   case OP_VSLDOI8:
8886     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
8887   case OP_VSLDOI12:
8888     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
8889   }
8890   EVT VT = OpLHS.getValueType();
8891   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
8892   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
8893   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
8894   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8895 }
8896 
8897 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
8898 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
8899 /// SDValue.
8900 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
8901                                            SelectionDAG &DAG) const {
8902   const unsigned BytesInVector = 16;
8903   bool IsLE = Subtarget.isLittleEndian();
8904   SDLoc dl(N);
8905   SDValue V1 = N->getOperand(0);
8906   SDValue V2 = N->getOperand(1);
8907   unsigned ShiftElts = 0, InsertAtByte = 0;
8908   bool Swap = false;
8909 
8910   // Shifts required to get the byte we want at element 7.
8911   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
8912                                    0, 15, 14, 13, 12, 11, 10, 9};
8913   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
8914                                 1, 2,  3,  4,  5,  6,  7,  8};
8915 
8916   ArrayRef<int> Mask = N->getMask();
8917   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
8918 
8919   // For each mask element, find out if we're just inserting something
8920   // from V2 into V1 or vice versa.
8921   // Possible permutations inserting an element from V2 into V1:
8922   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8923   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8924   //   ...
8925   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
8926   // Inserting from V1 into V2 will be similar, except mask range will be
8927   // [16,31].
8928 
8929   bool FoundCandidate = false;
8930   // If both vector operands for the shuffle are the same vector, the mask
8931   // will contain only elements from the first one and the second one will be
8932   // undef.
8933   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
8934   // Go through the mask of half-words to find an element that's being moved
8935   // from one vector to the other.
8936   for (unsigned i = 0; i < BytesInVector; ++i) {
8937     unsigned CurrentElement = Mask[i];
8938     // If 2nd operand is undefined, we should only look for element 7 in the
8939     // Mask.
8940     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
8941       continue;
8942 
8943     bool OtherElementsInOrder = true;
8944     // Examine the other elements in the Mask to see if they're in original
8945     // order.
8946     for (unsigned j = 0; j < BytesInVector; ++j) {
8947       if (j == i)
8948         continue;
8949       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
8950       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
8951       // in which we always assume we're always picking from the 1st operand.
8952       int MaskOffset =
8953           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
8954       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
8955         OtherElementsInOrder = false;
8956         break;
8957       }
8958     }
8959     // If other elements are in original order, we record the number of shifts
8960     // we need to get the element we want into element 7. Also record which byte
8961     // in the vector we should insert into.
8962     if (OtherElementsInOrder) {
8963       // If 2nd operand is undefined, we assume no shifts and no swapping.
8964       if (V2.isUndef()) {
8965         ShiftElts = 0;
8966         Swap = false;
8967       } else {
8968         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
8969         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
8970                          : BigEndianShifts[CurrentElement & 0xF];
8971         Swap = CurrentElement < BytesInVector;
8972       }
8973       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
8974       FoundCandidate = true;
8975       break;
8976     }
8977   }
8978 
8979   if (!FoundCandidate)
8980     return SDValue();
8981 
8982   // Candidate found, construct the proper SDAG sequence with VINSERTB,
8983   // optionally with VECSHL if shift is required.
8984   if (Swap)
8985     std::swap(V1, V2);
8986   if (V2.isUndef())
8987     V2 = V1;
8988   if (ShiftElts) {
8989     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8990                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8991     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
8992                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
8993   }
8994   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
8995                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
8996 }
8997 
8998 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
8999 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9000 /// SDValue.
9001 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9002                                            SelectionDAG &DAG) const {
9003   const unsigned NumHalfWords = 8;
9004   const unsigned BytesInVector = NumHalfWords * 2;
9005   // Check that the shuffle is on half-words.
9006   if (!isNByteElemShuffleMask(N, 2, 1))
9007     return SDValue();
9008 
9009   bool IsLE = Subtarget.isLittleEndian();
9010   SDLoc dl(N);
9011   SDValue V1 = N->getOperand(0);
9012   SDValue V2 = N->getOperand(1);
9013   unsigned ShiftElts = 0, InsertAtByte = 0;
9014   bool Swap = false;
9015 
9016   // Shifts required to get the half-word we want at element 3.
9017   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9018   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9019 
9020   uint32_t Mask = 0;
9021   uint32_t OriginalOrderLow = 0x1234567;
9022   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9023   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9024   // 32-bit space, only need 4-bit nibbles per element.
9025   for (unsigned i = 0; i < NumHalfWords; ++i) {
9026     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9027     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9028   }
9029 
9030   // For each mask element, find out if we're just inserting something
9031   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9032   // from V2 into V1:
9033   //   X, 1, 2, 3, 4, 5, 6, 7
9034   //   0, X, 2, 3, 4, 5, 6, 7
9035   //   0, 1, X, 3, 4, 5, 6, 7
9036   //   0, 1, 2, X, 4, 5, 6, 7
9037   //   0, 1, 2, 3, X, 5, 6, 7
9038   //   0, 1, 2, 3, 4, X, 6, 7
9039   //   0, 1, 2, 3, 4, 5, X, 7
9040   //   0, 1, 2, 3, 4, 5, 6, X
9041   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9042 
9043   bool FoundCandidate = false;
9044   // Go through the mask of half-words to find an element that's being moved
9045   // from one vector to the other.
9046   for (unsigned i = 0; i < NumHalfWords; ++i) {
9047     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9048     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9049     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9050     uint32_t TargetOrder = 0x0;
9051 
9052     // If both vector operands for the shuffle are the same vector, the mask
9053     // will contain only elements from the first one and the second one will be
9054     // undef.
9055     if (V2.isUndef()) {
9056       ShiftElts = 0;
9057       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9058       TargetOrder = OriginalOrderLow;
9059       Swap = false;
9060       // Skip if not the correct element or mask of other elements don't equal
9061       // to our expected order.
9062       if (MaskOneElt == VINSERTHSrcElem &&
9063           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9064         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9065         FoundCandidate = true;
9066         break;
9067       }
9068     } else { // If both operands are defined.
9069       // Target order is [8,15] if the current mask is between [0,7].
9070       TargetOrder =
9071           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9072       // Skip if mask of other elements don't equal our expected order.
9073       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9074         // We only need the last 3 bits for the number of shifts.
9075         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9076                          : BigEndianShifts[MaskOneElt & 0x7];
9077         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9078         Swap = MaskOneElt < NumHalfWords;
9079         FoundCandidate = true;
9080         break;
9081       }
9082     }
9083   }
9084 
9085   if (!FoundCandidate)
9086     return SDValue();
9087 
9088   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9089   // optionally with VECSHL if shift is required.
9090   if (Swap)
9091     std::swap(V1, V2);
9092   if (V2.isUndef())
9093     V2 = V1;
9094   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9095   if (ShiftElts) {
9096     // Double ShiftElts because we're left shifting on v16i8 type.
9097     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9098                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9099     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9100     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9101                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9102     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9103   }
9104   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9105   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9106                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9107   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9108 }
9109 
9110 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9111 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9112 /// return the default SDValue.
9113 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9114                                               SelectionDAG &DAG) const {
9115   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9116   // to v16i8. Peek through the bitcasts to get the actual operands.
9117   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9118   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9119 
9120   auto ShuffleMask = SVN->getMask();
9121   SDValue VecShuffle(SVN, 0);
9122   SDLoc DL(SVN);
9123 
9124   // Check that we have a four byte shuffle.
9125   if (!isNByteElemShuffleMask(SVN, 4, 1))
9126     return SDValue();
9127 
9128   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9129   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9130     std::swap(LHS, RHS);
9131     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9132     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9133   }
9134 
9135   // Ensure that the RHS is a vector of constants.
9136   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9137   if (!BVN)
9138     return SDValue();
9139 
9140   // Check if RHS is a splat of 4-bytes (or smaller).
9141   APInt APSplatValue, APSplatUndef;
9142   unsigned SplatBitSize;
9143   bool HasAnyUndefs;
9144   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9145                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9146       SplatBitSize > 32)
9147     return SDValue();
9148 
9149   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9150   // The instruction splats a constant C into two words of the source vector
9151   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9152   // Thus we check that the shuffle mask is the equivalent  of
9153   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9154   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9155   // within each word are consecutive, so we only need to check the first byte.
9156   SDValue Index;
9157   bool IsLE = Subtarget.isLittleEndian();
9158   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9159       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9160        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9161     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9162   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9163            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9164             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9165     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9166   else
9167     return SDValue();
9168 
9169   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9170   // for XXSPLTI32DX.
9171   unsigned SplatVal = APSplatValue.getZExtValue();
9172   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9173     SplatVal |= (SplatVal << SplatBitSize);
9174 
9175   SDValue SplatNode = DAG.getNode(
9176       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9177       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9178   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9179 }
9180 
9181 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9182 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9183 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9184 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9185 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9186   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9187   assert(Op.getValueType() == MVT::v1i128 &&
9188          "Only set v1i128 as custom, other type shouldn't reach here!");
9189   SDLoc dl(Op);
9190   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9191   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9192   unsigned SHLAmt = N1.getConstantOperandVal(0);
9193   if (SHLAmt % 8 == 0) {
9194     SmallVector<int, 16> Mask(16, 0);
9195     std::iota(Mask.begin(), Mask.end(), 0);
9196     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9197     if (SDValue Shuffle =
9198             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9199                                  DAG.getUNDEF(MVT::v16i8), Mask))
9200       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9201   }
9202   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9203   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9204                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9205   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9206                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9207   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9208   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9209 }
9210 
9211 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9212 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9213 /// return the code it can be lowered into.  Worst case, it can always be
9214 /// lowered into a vperm.
9215 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9216                                                SelectionDAG &DAG) const {
9217   SDLoc dl(Op);
9218   SDValue V1 = Op.getOperand(0);
9219   SDValue V2 = Op.getOperand(1);
9220   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9221 
9222   // Any nodes that were combined in the target-independent combiner prior
9223   // to vector legalization will not be sent to the target combine. Try to
9224   // combine it here.
9225   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9226     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9227       return NewShuffle;
9228     Op = NewShuffle;
9229     SVOp = cast<ShuffleVectorSDNode>(Op);
9230     V1 = Op.getOperand(0);
9231     V2 = Op.getOperand(1);
9232   }
9233   EVT VT = Op.getValueType();
9234   bool isLittleEndian = Subtarget.isLittleEndian();
9235 
9236   unsigned ShiftElts, InsertAtByte;
9237   bool Swap = false;
9238 
9239   // If this is a load-and-splat, we can do that with a single instruction
9240   // in some cases. However if the load has multiple uses, we don't want to
9241   // combine it because that will just produce multiple loads.
9242   bool IsPermutedLoad = false;
9243   const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
9244   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9245       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9246       InputLoad->hasOneUse()) {
9247     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9248     int SplatIdx =
9249       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9250 
9251     // The splat index for permuted loads will be in the left half of the vector
9252     // which is strictly wider than the loaded value by 8 bytes. So we need to
9253     // adjust the splat index to point to the correct address in memory.
9254     if (IsPermutedLoad) {
9255       assert(isLittleEndian && "Unexpected permuted load on big endian target");
9256       SplatIdx += IsFourByte ? 2 : 1;
9257       assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
9258              "Splat of a value outside of the loaded memory");
9259     }
9260 
9261     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9262     // For 4-byte load-and-splat, we need Power9.
9263     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9264       uint64_t Offset = 0;
9265       if (IsFourByte)
9266         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9267       else
9268         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9269 
9270       SDValue BasePtr = LD->getBasePtr();
9271       if (Offset != 0)
9272         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9273                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9274       SDValue Ops[] = {
9275         LD->getChain(),    // Chain
9276         BasePtr,           // BasePtr
9277         DAG.getValueType(Op.getValueType()) // VT
9278       };
9279       SDVTList VTL =
9280         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9281       SDValue LdSplt =
9282         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9283                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9284       DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
9285       if (LdSplt.getValueType() != SVOp->getValueType(0))
9286         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9287       return LdSplt;
9288     }
9289   }
9290   if (Subtarget.hasP9Vector() &&
9291       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9292                            isLittleEndian)) {
9293     if (Swap)
9294       std::swap(V1, V2);
9295     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9296     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9297     if (ShiftElts) {
9298       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9299                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9300       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9301                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9302       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9303     }
9304     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9305                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9306     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9307   }
9308 
9309   if (Subtarget.hasPrefixInstrs()) {
9310     SDValue SplatInsertNode;
9311     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9312       return SplatInsertNode;
9313   }
9314 
9315   if (Subtarget.hasP9Altivec()) {
9316     SDValue NewISDNode;
9317     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9318       return NewISDNode;
9319 
9320     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9321       return NewISDNode;
9322   }
9323 
9324   if (Subtarget.hasVSX() &&
9325       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9326     if (Swap)
9327       std::swap(V1, V2);
9328     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9329     SDValue Conv2 =
9330         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9331 
9332     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9333                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9334     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9335   }
9336 
9337   if (Subtarget.hasVSX() &&
9338     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9339     if (Swap)
9340       std::swap(V1, V2);
9341     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9342     SDValue Conv2 =
9343         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9344 
9345     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9346                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9347     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9348   }
9349 
9350   if (Subtarget.hasP9Vector()) {
9351      if (PPC::isXXBRHShuffleMask(SVOp)) {
9352       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9353       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9354       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9355     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9356       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9357       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9358       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9359     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9360       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9361       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9362       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9363     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9364       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9365       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9366       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9367     }
9368   }
9369 
9370   if (Subtarget.hasVSX()) {
9371     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9372       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9373 
9374       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9375       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9376                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
9377       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9378     }
9379 
9380     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9381     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9382       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9383       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9384       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9385     }
9386   }
9387 
9388   // Cases that are handled by instructions that take permute immediates
9389   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9390   // selected by the instruction selector.
9391   if (V2.isUndef()) {
9392     if (PPC::isSplatShuffleMask(SVOp, 1) ||
9393         PPC::isSplatShuffleMask(SVOp, 2) ||
9394         PPC::isSplatShuffleMask(SVOp, 4) ||
9395         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9396         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9397         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9398         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9399         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9400         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9401         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9402         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9403         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9404         (Subtarget.hasP8Altivec() && (
9405          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9406          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9407          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9408       return Op;
9409     }
9410   }
9411 
9412   // Altivec has a variety of "shuffle immediates" that take two vector inputs
9413   // and produce a fixed permutation.  If any of these match, do not lower to
9414   // VPERM.
9415   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9416   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9417       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9418       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9419       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9420       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9421       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9422       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9423       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9424       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9425       (Subtarget.hasP8Altivec() && (
9426        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9427        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9428        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9429     return Op;
9430 
9431   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
9432   // perfect shuffle table to emit an optimal matching sequence.
9433   ArrayRef<int> PermMask = SVOp->getMask();
9434 
9435   unsigned PFIndexes[4];
9436   bool isFourElementShuffle = true;
9437   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9438     unsigned EltNo = 8;   // Start out undef.
9439     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
9440       if (PermMask[i*4+j] < 0)
9441         continue;   // Undef, ignore it.
9442 
9443       unsigned ByteSource = PermMask[i*4+j];
9444       if ((ByteSource & 3) != j) {
9445         isFourElementShuffle = false;
9446         break;
9447       }
9448 
9449       if (EltNo == 8) {
9450         EltNo = ByteSource/4;
9451       } else if (EltNo != ByteSource/4) {
9452         isFourElementShuffle = false;
9453         break;
9454       }
9455     }
9456     PFIndexes[i] = EltNo;
9457   }
9458 
9459   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9460   // perfect shuffle vector to determine if it is cost effective to do this as
9461   // discrete instructions, or whether we should use a vperm.
9462   // For now, we skip this for little endian until such time as we have a
9463   // little-endian perfect shuffle table.
9464   if (isFourElementShuffle && !isLittleEndian) {
9465     // Compute the index in the perfect shuffle table.
9466     unsigned PFTableIndex =
9467       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9468 
9469     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9470     unsigned Cost  = (PFEntry >> 30);
9471 
9472     // Determining when to avoid vperm is tricky.  Many things affect the cost
9473     // of vperm, particularly how many times the perm mask needs to be computed.
9474     // For example, if the perm mask can be hoisted out of a loop or is already
9475     // used (perhaps because there are multiple permutes with the same shuffle
9476     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
9477     // the loop requires an extra register.
9478     //
9479     // As a compromise, we only emit discrete instructions if the shuffle can be
9480     // generated in 3 or fewer operations.  When we have loop information
9481     // available, if this block is within a loop, we should avoid using vperm
9482     // for 3-operation perms and use a constant pool load instead.
9483     if (Cost < 3)
9484       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9485   }
9486 
9487   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9488   // vector that will get spilled to the constant pool.
9489   if (V2.isUndef()) V2 = V1;
9490 
9491   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9492   // that it is in input element units, not in bytes.  Convert now.
9493 
9494   // For little endian, the order of the input vectors is reversed, and
9495   // the permutation mask is complemented with respect to 31.  This is
9496   // necessary to produce proper semantics with the big-endian-biased vperm
9497   // instruction.
9498   EVT EltVT = V1.getValueType().getVectorElementType();
9499   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9500 
9501   SmallVector<SDValue, 16> ResultMask;
9502   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9503     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9504 
9505     for (unsigned j = 0; j != BytesPerElement; ++j)
9506       if (isLittleEndian)
9507         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9508                                              dl, MVT::i32));
9509       else
9510         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9511                                              MVT::i32));
9512   }
9513 
9514   ShufflesHandledWithVPERM++;
9515   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9516   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
9517   LLVM_DEBUG(SVOp->dump());
9518   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
9519   LLVM_DEBUG(VPermMask.dump());
9520 
9521   if (isLittleEndian)
9522     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9523                        V2, V1, VPermMask);
9524   else
9525     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9526                        V1, V2, VPermMask);
9527 }
9528 
9529 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9530 /// vector comparison.  If it is, return true and fill in Opc/isDot with
9531 /// information about the intrinsic.
9532 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9533                                  bool &isDot, const PPCSubtarget &Subtarget) {
9534   unsigned IntrinsicID =
9535       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9536   CompareOpc = -1;
9537   isDot = false;
9538   switch (IntrinsicID) {
9539   default:
9540     return false;
9541   // Comparison predicates.
9542   case Intrinsic::ppc_altivec_vcmpbfp_p:
9543     CompareOpc = 966;
9544     isDot = true;
9545     break;
9546   case Intrinsic::ppc_altivec_vcmpeqfp_p:
9547     CompareOpc = 198;
9548     isDot = true;
9549     break;
9550   case Intrinsic::ppc_altivec_vcmpequb_p:
9551     CompareOpc = 6;
9552     isDot = true;
9553     break;
9554   case Intrinsic::ppc_altivec_vcmpequh_p:
9555     CompareOpc = 70;
9556     isDot = true;
9557     break;
9558   case Intrinsic::ppc_altivec_vcmpequw_p:
9559     CompareOpc = 134;
9560     isDot = true;
9561     break;
9562   case Intrinsic::ppc_altivec_vcmpequd_p:
9563     if (Subtarget.hasP8Altivec()) {
9564       CompareOpc = 199;
9565       isDot = true;
9566     } else
9567       return false;
9568     break;
9569   case Intrinsic::ppc_altivec_vcmpneb_p:
9570   case Intrinsic::ppc_altivec_vcmpneh_p:
9571   case Intrinsic::ppc_altivec_vcmpnew_p:
9572   case Intrinsic::ppc_altivec_vcmpnezb_p:
9573   case Intrinsic::ppc_altivec_vcmpnezh_p:
9574   case Intrinsic::ppc_altivec_vcmpnezw_p:
9575     if (Subtarget.hasP9Altivec()) {
9576       switch (IntrinsicID) {
9577       default:
9578         llvm_unreachable("Unknown comparison intrinsic.");
9579       case Intrinsic::ppc_altivec_vcmpneb_p:
9580         CompareOpc = 7;
9581         break;
9582       case Intrinsic::ppc_altivec_vcmpneh_p:
9583         CompareOpc = 71;
9584         break;
9585       case Intrinsic::ppc_altivec_vcmpnew_p:
9586         CompareOpc = 135;
9587         break;
9588       case Intrinsic::ppc_altivec_vcmpnezb_p:
9589         CompareOpc = 263;
9590         break;
9591       case Intrinsic::ppc_altivec_vcmpnezh_p:
9592         CompareOpc = 327;
9593         break;
9594       case Intrinsic::ppc_altivec_vcmpnezw_p:
9595         CompareOpc = 391;
9596         break;
9597       }
9598       isDot = true;
9599     } else
9600       return false;
9601     break;
9602   case Intrinsic::ppc_altivec_vcmpgefp_p:
9603     CompareOpc = 454;
9604     isDot = true;
9605     break;
9606   case Intrinsic::ppc_altivec_vcmpgtfp_p:
9607     CompareOpc = 710;
9608     isDot = true;
9609     break;
9610   case Intrinsic::ppc_altivec_vcmpgtsb_p:
9611     CompareOpc = 774;
9612     isDot = true;
9613     break;
9614   case Intrinsic::ppc_altivec_vcmpgtsh_p:
9615     CompareOpc = 838;
9616     isDot = true;
9617     break;
9618   case Intrinsic::ppc_altivec_vcmpgtsw_p:
9619     CompareOpc = 902;
9620     isDot = true;
9621     break;
9622   case Intrinsic::ppc_altivec_vcmpgtsd_p:
9623     if (Subtarget.hasP8Altivec()) {
9624       CompareOpc = 967;
9625       isDot = true;
9626     } else
9627       return false;
9628     break;
9629   case Intrinsic::ppc_altivec_vcmpgtub_p:
9630     CompareOpc = 518;
9631     isDot = true;
9632     break;
9633   case Intrinsic::ppc_altivec_vcmpgtuh_p:
9634     CompareOpc = 582;
9635     isDot = true;
9636     break;
9637   case Intrinsic::ppc_altivec_vcmpgtuw_p:
9638     CompareOpc = 646;
9639     isDot = true;
9640     break;
9641   case Intrinsic::ppc_altivec_vcmpgtud_p:
9642     if (Subtarget.hasP8Altivec()) {
9643       CompareOpc = 711;
9644       isDot = true;
9645     } else
9646       return false;
9647     break;
9648 
9649   case Intrinsic::ppc_altivec_vcmpequq:
9650   case Intrinsic::ppc_altivec_vcmpgtsq:
9651   case Intrinsic::ppc_altivec_vcmpgtuq:
9652     if (!Subtarget.isISA3_1())
9653       return false;
9654     switch (IntrinsicID) {
9655     default:
9656       llvm_unreachable("Unknown comparison intrinsic.");
9657     case Intrinsic::ppc_altivec_vcmpequq:
9658       CompareOpc = 455;
9659       break;
9660     case Intrinsic::ppc_altivec_vcmpgtsq:
9661       CompareOpc = 903;
9662       break;
9663     case Intrinsic::ppc_altivec_vcmpgtuq:
9664       CompareOpc = 647;
9665       break;
9666     }
9667     break;
9668 
9669   // VSX predicate comparisons use the same infrastructure
9670   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9671   case Intrinsic::ppc_vsx_xvcmpgedp_p:
9672   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9673   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9674   case Intrinsic::ppc_vsx_xvcmpgesp_p:
9675   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9676     if (Subtarget.hasVSX()) {
9677       switch (IntrinsicID) {
9678       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9679         CompareOpc = 99;
9680         break;
9681       case Intrinsic::ppc_vsx_xvcmpgedp_p:
9682         CompareOpc = 115;
9683         break;
9684       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9685         CompareOpc = 107;
9686         break;
9687       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9688         CompareOpc = 67;
9689         break;
9690       case Intrinsic::ppc_vsx_xvcmpgesp_p:
9691         CompareOpc = 83;
9692         break;
9693       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9694         CompareOpc = 75;
9695         break;
9696       }
9697       isDot = true;
9698     } else
9699       return false;
9700     break;
9701 
9702   // Normal Comparisons.
9703   case Intrinsic::ppc_altivec_vcmpbfp:
9704     CompareOpc = 966;
9705     break;
9706   case Intrinsic::ppc_altivec_vcmpeqfp:
9707     CompareOpc = 198;
9708     break;
9709   case Intrinsic::ppc_altivec_vcmpequb:
9710     CompareOpc = 6;
9711     break;
9712   case Intrinsic::ppc_altivec_vcmpequh:
9713     CompareOpc = 70;
9714     break;
9715   case Intrinsic::ppc_altivec_vcmpequw:
9716     CompareOpc = 134;
9717     break;
9718   case Intrinsic::ppc_altivec_vcmpequd:
9719     if (Subtarget.hasP8Altivec())
9720       CompareOpc = 199;
9721     else
9722       return false;
9723     break;
9724   case Intrinsic::ppc_altivec_vcmpneb:
9725   case Intrinsic::ppc_altivec_vcmpneh:
9726   case Intrinsic::ppc_altivec_vcmpnew:
9727   case Intrinsic::ppc_altivec_vcmpnezb:
9728   case Intrinsic::ppc_altivec_vcmpnezh:
9729   case Intrinsic::ppc_altivec_vcmpnezw:
9730     if (Subtarget.hasP9Altivec())
9731       switch (IntrinsicID) {
9732       default:
9733         llvm_unreachable("Unknown comparison intrinsic.");
9734       case Intrinsic::ppc_altivec_vcmpneb:
9735         CompareOpc = 7;
9736         break;
9737       case Intrinsic::ppc_altivec_vcmpneh:
9738         CompareOpc = 71;
9739         break;
9740       case Intrinsic::ppc_altivec_vcmpnew:
9741         CompareOpc = 135;
9742         break;
9743       case Intrinsic::ppc_altivec_vcmpnezb:
9744         CompareOpc = 263;
9745         break;
9746       case Intrinsic::ppc_altivec_vcmpnezh:
9747         CompareOpc = 327;
9748         break;
9749       case Intrinsic::ppc_altivec_vcmpnezw:
9750         CompareOpc = 391;
9751         break;
9752       }
9753     else
9754       return false;
9755     break;
9756   case Intrinsic::ppc_altivec_vcmpgefp:
9757     CompareOpc = 454;
9758     break;
9759   case Intrinsic::ppc_altivec_vcmpgtfp:
9760     CompareOpc = 710;
9761     break;
9762   case Intrinsic::ppc_altivec_vcmpgtsb:
9763     CompareOpc = 774;
9764     break;
9765   case Intrinsic::ppc_altivec_vcmpgtsh:
9766     CompareOpc = 838;
9767     break;
9768   case Intrinsic::ppc_altivec_vcmpgtsw:
9769     CompareOpc = 902;
9770     break;
9771   case Intrinsic::ppc_altivec_vcmpgtsd:
9772     if (Subtarget.hasP8Altivec())
9773       CompareOpc = 967;
9774     else
9775       return false;
9776     break;
9777   case Intrinsic::ppc_altivec_vcmpgtub:
9778     CompareOpc = 518;
9779     break;
9780   case Intrinsic::ppc_altivec_vcmpgtuh:
9781     CompareOpc = 582;
9782     break;
9783   case Intrinsic::ppc_altivec_vcmpgtuw:
9784     CompareOpc = 646;
9785     break;
9786   case Intrinsic::ppc_altivec_vcmpgtud:
9787     if (Subtarget.hasP8Altivec())
9788       CompareOpc = 711;
9789     else
9790       return false;
9791     break;
9792   case Intrinsic::ppc_altivec_vcmpequq_p:
9793   case Intrinsic::ppc_altivec_vcmpgtsq_p:
9794   case Intrinsic::ppc_altivec_vcmpgtuq_p:
9795     if (!Subtarget.isISA3_1())
9796       return false;
9797     switch (IntrinsicID) {
9798     default:
9799       llvm_unreachable("Unknown comparison intrinsic.");
9800     case Intrinsic::ppc_altivec_vcmpequq_p:
9801       CompareOpc = 455;
9802       break;
9803     case Intrinsic::ppc_altivec_vcmpgtsq_p:
9804       CompareOpc = 903;
9805       break;
9806     case Intrinsic::ppc_altivec_vcmpgtuq_p:
9807       CompareOpc = 647;
9808       break;
9809     }
9810     isDot = true;
9811     break;
9812   }
9813   return true;
9814 }
9815 
9816 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
9817 /// lower, do it, otherwise return null.
9818 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
9819                                                    SelectionDAG &DAG) const {
9820   unsigned IntrinsicID =
9821     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
9822 
9823   SDLoc dl(Op);
9824 
9825   switch (IntrinsicID) {
9826   case Intrinsic::thread_pointer:
9827     // Reads the thread pointer register, used for __builtin_thread_pointer.
9828     if (Subtarget.isPPC64())
9829       return DAG.getRegister(PPC::X13, MVT::i64);
9830     return DAG.getRegister(PPC::R2, MVT::i32);
9831 
9832   case Intrinsic::ppc_mma_disassemble_acc:
9833   case Intrinsic::ppc_vsx_disassemble_pair: {
9834     int NumVecs = 2;
9835     SDValue WideVec = Op.getOperand(1);
9836     if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
9837       NumVecs = 4;
9838       WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec);
9839     }
9840     SmallVector<SDValue, 4> RetOps;
9841     for (int VecNo = 0; VecNo < NumVecs; VecNo++) {
9842       SDValue Extract = DAG.getNode(
9843           PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec,
9844           DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
9845                                                      : VecNo,
9846                           dl, MVT::i64));
9847       RetOps.push_back(Extract);
9848     }
9849     return DAG.getMergeValues(RetOps, dl);
9850   }
9851   }
9852 
9853   // If this is a lowered altivec predicate compare, CompareOpc is set to the
9854   // opcode number of the comparison.
9855   int CompareOpc;
9856   bool isDot;
9857   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
9858     return SDValue();    // Don't custom lower most intrinsics.
9859 
9860   // If this is a non-dot comparison, make the VCMP node and we are done.
9861   if (!isDot) {
9862     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
9863                               Op.getOperand(1), Op.getOperand(2),
9864                               DAG.getConstant(CompareOpc, dl, MVT::i32));
9865     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
9866   }
9867 
9868   // Create the PPCISD altivec 'dot' comparison node.
9869   SDValue Ops[] = {
9870     Op.getOperand(2),  // LHS
9871     Op.getOperand(3),  // RHS
9872     DAG.getConstant(CompareOpc, dl, MVT::i32)
9873   };
9874   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
9875   SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
9876 
9877   // Now that we have the comparison, emit a copy from the CR to a GPR.
9878   // This is flagged to the above dot comparison.
9879   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
9880                                 DAG.getRegister(PPC::CR6, MVT::i32),
9881                                 CompNode.getValue(1));
9882 
9883   // Unpack the result based on how the target uses it.
9884   unsigned BitNo;   // Bit # of CR6.
9885   bool InvertBit;   // Invert result?
9886   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
9887   default:  // Can't happen, don't crash on invalid number though.
9888   case 0:   // Return the value of the EQ bit of CR6.
9889     BitNo = 0; InvertBit = false;
9890     break;
9891   case 1:   // Return the inverted value of the EQ bit of CR6.
9892     BitNo = 0; InvertBit = true;
9893     break;
9894   case 2:   // Return the value of the LT bit of CR6.
9895     BitNo = 2; InvertBit = false;
9896     break;
9897   case 3:   // Return the inverted value of the LT bit of CR6.
9898     BitNo = 2; InvertBit = true;
9899     break;
9900   }
9901 
9902   // Shift the bit into the low position.
9903   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
9904                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
9905   // Isolate the bit.
9906   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
9907                       DAG.getConstant(1, dl, MVT::i32));
9908 
9909   // If we are supposed to, toggle the bit.
9910   if (InvertBit)
9911     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
9912                         DAG.getConstant(1, dl, MVT::i32));
9913   return Flags;
9914 }
9915 
9916 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
9917                                                SelectionDAG &DAG) const {
9918   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
9919   // the beginning of the argument list.
9920   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
9921   SDLoc DL(Op);
9922   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
9923   case Intrinsic::ppc_cfence: {
9924     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
9925     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
9926     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
9927                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
9928                                                   Op.getOperand(ArgStart + 1)),
9929                                       Op.getOperand(0)),
9930                    0);
9931   }
9932   default:
9933     break;
9934   }
9935   return SDValue();
9936 }
9937 
9938 // Lower scalar BSWAP64 to xxbrd.
9939 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
9940   SDLoc dl(Op);
9941   // MTVSRDD
9942   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
9943                    Op.getOperand(0));
9944   // XXBRD
9945   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
9946   // MFVSRD
9947   int VectorIndex = 0;
9948   if (Subtarget.isLittleEndian())
9949     VectorIndex = 1;
9950   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
9951                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
9952   return Op;
9953 }
9954 
9955 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
9956 // compared to a value that is atomically loaded (atomic loads zero-extend).
9957 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
9958                                                 SelectionDAG &DAG) const {
9959   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
9960          "Expecting an atomic compare-and-swap here.");
9961   SDLoc dl(Op);
9962   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
9963   EVT MemVT = AtomicNode->getMemoryVT();
9964   if (MemVT.getSizeInBits() >= 32)
9965     return Op;
9966 
9967   SDValue CmpOp = Op.getOperand(2);
9968   // If this is already correctly zero-extended, leave it alone.
9969   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
9970   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
9971     return Op;
9972 
9973   // Clear the high bits of the compare operand.
9974   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
9975   SDValue NewCmpOp =
9976     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
9977                 DAG.getConstant(MaskVal, dl, MVT::i32));
9978 
9979   // Replace the existing compare operand with the properly zero-extended one.
9980   SmallVector<SDValue, 4> Ops;
9981   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
9982     Ops.push_back(AtomicNode->getOperand(i));
9983   Ops[2] = NewCmpOp;
9984   MachineMemOperand *MMO = AtomicNode->getMemOperand();
9985   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
9986   auto NodeTy =
9987     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
9988   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
9989 }
9990 
9991 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
9992                                                  SelectionDAG &DAG) const {
9993   SDLoc dl(Op);
9994   // Create a stack slot that is 16-byte aligned.
9995   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9996   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
9997   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9998   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9999 
10000   // Store the input value into Value#0 of the stack slot.
10001   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10002                                MachinePointerInfo());
10003   // Load it out.
10004   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10005 }
10006 
10007 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10008                                                   SelectionDAG &DAG) const {
10009   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10010          "Should only be called for ISD::INSERT_VECTOR_ELT");
10011 
10012   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10013   // We have legal lowering for constant indices but not for variable ones.
10014   if (!C)
10015     return SDValue();
10016 
10017   EVT VT = Op.getValueType();
10018   SDLoc dl(Op);
10019   SDValue V1 = Op.getOperand(0);
10020   SDValue V2 = Op.getOperand(1);
10021   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10022   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10023     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10024     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10025     unsigned InsertAtElement = C->getZExtValue();
10026     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10027     if (Subtarget.isLittleEndian()) {
10028       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10029     }
10030     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10031                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10032   }
10033   return Op;
10034 }
10035 
10036 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10037                                            SelectionDAG &DAG) const {
10038   SDLoc dl(Op);
10039   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10040   SDValue LoadChain = LN->getChain();
10041   SDValue BasePtr = LN->getBasePtr();
10042   EVT VT = Op.getValueType();
10043 
10044   if (VT != MVT::v256i1 && VT != MVT::v512i1)
10045     return Op;
10046 
10047   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10048   // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in
10049   // 2 or 4 vsx registers.
10050   assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&
10051          "Type unsupported without MMA");
10052   assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10053          "Type unsupported without paired vector support");
10054   Align Alignment = LN->getAlign();
10055   SmallVector<SDValue, 4> Loads;
10056   SmallVector<SDValue, 4> LoadChains;
10057   unsigned NumVecs = VT.getSizeInBits() / 128;
10058   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10059     SDValue Load =
10060         DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
10061                     LN->getPointerInfo().getWithOffset(Idx * 16),
10062                     commonAlignment(Alignment, Idx * 16),
10063                     LN->getMemOperand()->getFlags(), LN->getAAInfo());
10064     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10065                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10066     Loads.push_back(Load);
10067     LoadChains.push_back(Load.getValue(1));
10068   }
10069   if (Subtarget.isLittleEndian()) {
10070     std::reverse(Loads.begin(), Loads.end());
10071     std::reverse(LoadChains.begin(), LoadChains.end());
10072   }
10073   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10074   SDValue Value =
10075       DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD,
10076                   dl, VT, Loads);
10077   SDValue RetOps[] = {Value, TF};
10078   return DAG.getMergeValues(RetOps, dl);
10079 }
10080 
10081 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10082                                             SelectionDAG &DAG) const {
10083   SDLoc dl(Op);
10084   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10085   SDValue StoreChain = SN->getChain();
10086   SDValue BasePtr = SN->getBasePtr();
10087   SDValue Value = SN->getValue();
10088   EVT StoreVT = Value.getValueType();
10089 
10090   if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
10091     return Op;
10092 
10093   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10094   // Here we create 2 or 4 v16i8 stores to store the pair or accumulator
10095   // underlying registers individually.
10096   assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&
10097          "Type unsupported without MMA");
10098   assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10099          "Type unsupported without paired vector support");
10100   Align Alignment = SN->getAlign();
10101   SmallVector<SDValue, 4> Stores;
10102   unsigned NumVecs = 2;
10103   if (StoreVT == MVT::v512i1) {
10104     Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value);
10105     NumVecs = 4;
10106   }
10107   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10108     unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
10109     SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value,
10110                               DAG.getConstant(VecNum, dl, MVT::i64));
10111     SDValue Store =
10112         DAG.getStore(StoreChain, dl, Elt, BasePtr,
10113                      SN->getPointerInfo().getWithOffset(Idx * 16),
10114                      commonAlignment(Alignment, Idx * 16),
10115                      SN->getMemOperand()->getFlags(), SN->getAAInfo());
10116     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10117                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10118     Stores.push_back(Store);
10119   }
10120   SDValue TF = DAG.getTokenFactor(dl, Stores);
10121   return TF;
10122 }
10123 
10124 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10125   SDLoc dl(Op);
10126   if (Op.getValueType() == MVT::v4i32) {
10127     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10128 
10129     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10130     // +16 as shift amt.
10131     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10132     SDValue RHSSwap =   // = vrlw RHS, 16
10133       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10134 
10135     // Shrinkify inputs to v8i16.
10136     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10137     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10138     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10139 
10140     // Low parts multiplied together, generating 32-bit results (we ignore the
10141     // top parts).
10142     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10143                                         LHS, RHS, DAG, dl, MVT::v4i32);
10144 
10145     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10146                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10147     // Shift the high parts up 16 bits.
10148     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10149                               Neg16, DAG, dl);
10150     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10151   } else if (Op.getValueType() == MVT::v16i8) {
10152     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10153     bool isLittleEndian = Subtarget.isLittleEndian();
10154 
10155     // Multiply the even 8-bit parts, producing 16-bit sums.
10156     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10157                                            LHS, RHS, DAG, dl, MVT::v8i16);
10158     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10159 
10160     // Multiply the odd 8-bit parts, producing 16-bit sums.
10161     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10162                                           LHS, RHS, DAG, dl, MVT::v8i16);
10163     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10164 
10165     // Merge the results together.  Because vmuleub and vmuloub are
10166     // instructions with a big-endian bias, we must reverse the
10167     // element numbering and reverse the meaning of "odd" and "even"
10168     // when generating little endian code.
10169     int Ops[16];
10170     for (unsigned i = 0; i != 8; ++i) {
10171       if (isLittleEndian) {
10172         Ops[i*2  ] = 2*i;
10173         Ops[i*2+1] = 2*i+16;
10174       } else {
10175         Ops[i*2  ] = 2*i+1;
10176         Ops[i*2+1] = 2*i+1+16;
10177       }
10178     }
10179     if (isLittleEndian)
10180       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10181     else
10182       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10183   } else {
10184     llvm_unreachable("Unknown mul to lower!");
10185   }
10186 }
10187 
10188 SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
10189   bool IsStrict = Op->isStrictFPOpcode();
10190   if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
10191       !Subtarget.hasP9Vector())
10192     return SDValue();
10193 
10194   return Op;
10195 }
10196 
10197 // Custom lowering for fpext vf32 to v2f64
10198 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10199 
10200   assert(Op.getOpcode() == ISD::FP_EXTEND &&
10201          "Should only be called for ISD::FP_EXTEND");
10202 
10203   // FIXME: handle extends from half precision float vectors on P9.
10204   // We only want to custom lower an extend from v2f32 to v2f64.
10205   if (Op.getValueType() != MVT::v2f64 ||
10206       Op.getOperand(0).getValueType() != MVT::v2f32)
10207     return SDValue();
10208 
10209   SDLoc dl(Op);
10210   SDValue Op0 = Op.getOperand(0);
10211 
10212   switch (Op0.getOpcode()) {
10213   default:
10214     return SDValue();
10215   case ISD::EXTRACT_SUBVECTOR: {
10216     assert(Op0.getNumOperands() == 2 &&
10217            isa<ConstantSDNode>(Op0->getOperand(1)) &&
10218            "Node should have 2 operands with second one being a constant!");
10219 
10220     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10221       return SDValue();
10222 
10223     // Custom lower is only done for high or low doubleword.
10224     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10225     if (Idx % 2 != 0)
10226       return SDValue();
10227 
10228     // Since input is v4f32, at this point Idx is either 0 or 2.
10229     // Shift to get the doubleword position we want.
10230     int DWord = Idx >> 1;
10231 
10232     // High and low word positions are different on little endian.
10233     if (Subtarget.isLittleEndian())
10234       DWord ^= 0x1;
10235 
10236     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10237                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10238   }
10239   case ISD::FADD:
10240   case ISD::FMUL:
10241   case ISD::FSUB: {
10242     SDValue NewLoad[2];
10243     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10244       // Ensure both input are loads.
10245       SDValue LdOp = Op0.getOperand(i);
10246       if (LdOp.getOpcode() != ISD::LOAD)
10247         return SDValue();
10248       // Generate new load node.
10249       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10250       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10251       NewLoad[i] = DAG.getMemIntrinsicNode(
10252           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10253           LD->getMemoryVT(), LD->getMemOperand());
10254     }
10255     SDValue NewOp =
10256         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10257                     NewLoad[1], Op0.getNode()->getFlags());
10258     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10259                        DAG.getConstant(0, dl, MVT::i32));
10260   }
10261   case ISD::LOAD: {
10262     LoadSDNode *LD = cast<LoadSDNode>(Op0);
10263     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10264     SDValue NewLd = DAG.getMemIntrinsicNode(
10265         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10266         LD->getMemoryVT(), LD->getMemOperand());
10267     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10268                        DAG.getConstant(0, dl, MVT::i32));
10269   }
10270   }
10271   llvm_unreachable("ERROR:Should return for all cases within swtich.");
10272 }
10273 
10274 /// LowerOperation - Provide custom lowering hooks for some operations.
10275 ///
10276 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10277   switch (Op.getOpcode()) {
10278   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10279   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
10280   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
10281   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
10282   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
10283   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
10284   case ISD::SETCC:              return LowerSETCC(Op, DAG);
10285   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
10286   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
10287 
10288   // Variable argument lowering.
10289   case ISD::VASTART:            return LowerVASTART(Op, DAG);
10290   case ISD::VAARG:              return LowerVAARG(Op, DAG);
10291   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
10292 
10293   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
10294   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10295   case ISD::GET_DYNAMIC_AREA_OFFSET:
10296     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10297 
10298   // Exception handling lowering.
10299   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
10300   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
10301   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
10302 
10303   case ISD::LOAD:               return LowerLOAD(Op, DAG);
10304   case ISD::STORE:              return LowerSTORE(Op, DAG);
10305   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
10306   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
10307   case ISD::STRICT_FP_TO_UINT:
10308   case ISD::STRICT_FP_TO_SINT:
10309   case ISD::FP_TO_UINT:
10310   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10311   case ISD::STRICT_UINT_TO_FP:
10312   case ISD::STRICT_SINT_TO_FP:
10313   case ISD::UINT_TO_FP:
10314   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
10315   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
10316 
10317   // Lower 64-bit shifts.
10318   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
10319   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
10320   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
10321 
10322   case ISD::FSHL:               return LowerFunnelShift(Op, DAG);
10323   case ISD::FSHR:               return LowerFunnelShift(Op, DAG);
10324 
10325   // Vector-related lowering.
10326   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
10327   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
10328   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10329   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
10330   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
10331   case ISD::MUL:                return LowerMUL(Op, DAG);
10332   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
10333   case ISD::STRICT_FP_ROUND:
10334   case ISD::FP_ROUND:
10335     return LowerFP_ROUND(Op, DAG);
10336   case ISD::ROTL:               return LowerROTL(Op, DAG);
10337 
10338   // For counter-based loop handling.
10339   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
10340 
10341   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
10342 
10343   // Frame & Return address.
10344   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
10345   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
10346 
10347   case ISD::INTRINSIC_VOID:
10348     return LowerINTRINSIC_VOID(Op, DAG);
10349   case ISD::BSWAP:
10350     return LowerBSWAP(Op, DAG);
10351   case ISD::ATOMIC_CMP_SWAP:
10352     return LowerATOMIC_CMP_SWAP(Op, DAG);
10353   }
10354 }
10355 
10356 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10357                                            SmallVectorImpl<SDValue>&Results,
10358                                            SelectionDAG &DAG) const {
10359   SDLoc dl(N);
10360   switch (N->getOpcode()) {
10361   default:
10362     llvm_unreachable("Do not know how to custom type legalize this operation!");
10363   case ISD::READCYCLECOUNTER: {
10364     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10365     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10366 
10367     Results.push_back(
10368         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
10369     Results.push_back(RTB.getValue(2));
10370     break;
10371   }
10372   case ISD::INTRINSIC_W_CHAIN: {
10373     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10374         Intrinsic::loop_decrement)
10375       break;
10376 
10377     assert(N->getValueType(0) == MVT::i1 &&
10378            "Unexpected result type for CTR decrement intrinsic");
10379     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10380                                  N->getValueType(0));
10381     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10382     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10383                                  N->getOperand(1));
10384 
10385     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10386     Results.push_back(NewInt.getValue(1));
10387     break;
10388   }
10389   case ISD::VAARG: {
10390     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10391       return;
10392 
10393     EVT VT = N->getValueType(0);
10394 
10395     if (VT == MVT::i64) {
10396       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10397 
10398       Results.push_back(NewNode);
10399       Results.push_back(NewNode.getValue(1));
10400     }
10401     return;
10402   }
10403   case ISD::STRICT_FP_TO_SINT:
10404   case ISD::STRICT_FP_TO_UINT:
10405   case ISD::FP_TO_SINT:
10406   case ISD::FP_TO_UINT:
10407     // LowerFP_TO_INT() can only handle f32 and f64.
10408     if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
10409         MVT::ppcf128)
10410       return;
10411     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10412     return;
10413   case ISD::TRUNCATE: {
10414     if (!N->getValueType(0).isVector())
10415       return;
10416     SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
10417     if (Lowered)
10418       Results.push_back(Lowered);
10419     return;
10420   }
10421   case ISD::FSHL:
10422   case ISD::FSHR:
10423     // Don't handle funnel shifts here.
10424     return;
10425   case ISD::BITCAST:
10426     // Don't handle bitcast here.
10427     return;
10428   case ISD::FP_EXTEND:
10429     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
10430     if (Lowered)
10431       Results.push_back(Lowered);
10432     return;
10433   }
10434 }
10435 
10436 //===----------------------------------------------------------------------===//
10437 //  Other Lowering Code
10438 //===----------------------------------------------------------------------===//
10439 
10440 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10441   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10442   Function *Func = Intrinsic::getDeclaration(M, Id);
10443   return Builder.CreateCall(Func, {});
10444 }
10445 
10446 // The mappings for emitLeading/TrailingFence is taken from
10447 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10448 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10449                                                  Instruction *Inst,
10450                                                  AtomicOrdering Ord) const {
10451   if (Ord == AtomicOrdering::SequentiallyConsistent)
10452     return callIntrinsic(Builder, Intrinsic::ppc_sync);
10453   if (isReleaseOrStronger(Ord))
10454     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10455   return nullptr;
10456 }
10457 
10458 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10459                                                   Instruction *Inst,
10460                                                   AtomicOrdering Ord) const {
10461   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10462     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10463     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10464     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10465     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10466       return Builder.CreateCall(
10467           Intrinsic::getDeclaration(
10468               Builder.GetInsertBlock()->getParent()->getParent(),
10469               Intrinsic::ppc_cfence, {Inst->getType()}),
10470           {Inst});
10471     // FIXME: Can use isync for rmw operation.
10472     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10473   }
10474   return nullptr;
10475 }
10476 
10477 MachineBasicBlock *
10478 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10479                                     unsigned AtomicSize,
10480                                     unsigned BinOpcode,
10481                                     unsigned CmpOpcode,
10482                                     unsigned CmpPred) const {
10483   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10484   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10485 
10486   auto LoadMnemonic = PPC::LDARX;
10487   auto StoreMnemonic = PPC::STDCX;
10488   switch (AtomicSize) {
10489   default:
10490     llvm_unreachable("Unexpected size of atomic entity");
10491   case 1:
10492     LoadMnemonic = PPC::LBARX;
10493     StoreMnemonic = PPC::STBCX;
10494     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10495     break;
10496   case 2:
10497     LoadMnemonic = PPC::LHARX;
10498     StoreMnemonic = PPC::STHCX;
10499     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10500     break;
10501   case 4:
10502     LoadMnemonic = PPC::LWARX;
10503     StoreMnemonic = PPC::STWCX;
10504     break;
10505   case 8:
10506     LoadMnemonic = PPC::LDARX;
10507     StoreMnemonic = PPC::STDCX;
10508     break;
10509   }
10510 
10511   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10512   MachineFunction *F = BB->getParent();
10513   MachineFunction::iterator It = ++BB->getIterator();
10514 
10515   Register dest = MI.getOperand(0).getReg();
10516   Register ptrA = MI.getOperand(1).getReg();
10517   Register ptrB = MI.getOperand(2).getReg();
10518   Register incr = MI.getOperand(3).getReg();
10519   DebugLoc dl = MI.getDebugLoc();
10520 
10521   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10522   MachineBasicBlock *loop2MBB =
10523     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10524   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10525   F->insert(It, loopMBB);
10526   if (CmpOpcode)
10527     F->insert(It, loop2MBB);
10528   F->insert(It, exitMBB);
10529   exitMBB->splice(exitMBB->begin(), BB,
10530                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10531   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10532 
10533   MachineRegisterInfo &RegInfo = F->getRegInfo();
10534   Register TmpReg = (!BinOpcode) ? incr :
10535     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10536                                            : &PPC::GPRCRegClass);
10537 
10538   //  thisMBB:
10539   //   ...
10540   //   fallthrough --> loopMBB
10541   BB->addSuccessor(loopMBB);
10542 
10543   //  loopMBB:
10544   //   l[wd]arx dest, ptr
10545   //   add r0, dest, incr
10546   //   st[wd]cx. r0, ptr
10547   //   bne- loopMBB
10548   //   fallthrough --> exitMBB
10549 
10550   // For max/min...
10551   //  loopMBB:
10552   //   l[wd]arx dest, ptr
10553   //   cmpl?[wd] incr, dest
10554   //   bgt exitMBB
10555   //  loop2MBB:
10556   //   st[wd]cx. dest, ptr
10557   //   bne- loopMBB
10558   //   fallthrough --> exitMBB
10559 
10560   BB = loopMBB;
10561   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10562     .addReg(ptrA).addReg(ptrB);
10563   if (BinOpcode)
10564     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10565   if (CmpOpcode) {
10566     // Signed comparisons of byte or halfword values must be sign-extended.
10567     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10568       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10569       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10570               ExtReg).addReg(dest);
10571       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10572         .addReg(incr).addReg(ExtReg);
10573     } else
10574       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10575         .addReg(incr).addReg(dest);
10576 
10577     BuildMI(BB, dl, TII->get(PPC::BCC))
10578       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
10579     BB->addSuccessor(loop2MBB);
10580     BB->addSuccessor(exitMBB);
10581     BB = loop2MBB;
10582   }
10583   BuildMI(BB, dl, TII->get(StoreMnemonic))
10584     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
10585   BuildMI(BB, dl, TII->get(PPC::BCC))
10586     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
10587   BB->addSuccessor(loopMBB);
10588   BB->addSuccessor(exitMBB);
10589 
10590   //  exitMBB:
10591   //   ...
10592   BB = exitMBB;
10593   return BB;
10594 }
10595 
10596 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
10597     MachineInstr &MI, MachineBasicBlock *BB,
10598     bool is8bit, // operation
10599     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
10600   // If we support part-word atomic mnemonics, just use them
10601   if (Subtarget.hasPartwordAtomics())
10602     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
10603                             CmpPred);
10604 
10605   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10606   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10607   // In 64 bit mode we have to use 64 bits for addresses, even though the
10608   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
10609   // registers without caring whether they're 32 or 64, but here we're
10610   // doing actual arithmetic on the addresses.
10611   bool is64bit = Subtarget.isPPC64();
10612   bool isLittleEndian = Subtarget.isLittleEndian();
10613   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10614 
10615   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10616   MachineFunction *F = BB->getParent();
10617   MachineFunction::iterator It = ++BB->getIterator();
10618 
10619   Register dest = MI.getOperand(0).getReg();
10620   Register ptrA = MI.getOperand(1).getReg();
10621   Register ptrB = MI.getOperand(2).getReg();
10622   Register incr = MI.getOperand(3).getReg();
10623   DebugLoc dl = MI.getDebugLoc();
10624 
10625   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10626   MachineBasicBlock *loop2MBB =
10627       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10628   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10629   F->insert(It, loopMBB);
10630   if (CmpOpcode)
10631     F->insert(It, loop2MBB);
10632   F->insert(It, exitMBB);
10633   exitMBB->splice(exitMBB->begin(), BB,
10634                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10635   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10636 
10637   MachineRegisterInfo &RegInfo = F->getRegInfo();
10638   const TargetRegisterClass *RC =
10639       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10640   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
10641 
10642   Register PtrReg = RegInfo.createVirtualRegister(RC);
10643   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
10644   Register ShiftReg =
10645       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
10646   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
10647   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
10648   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
10649   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
10650   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
10651   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
10652   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
10653   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
10654   Register Ptr1Reg;
10655   Register TmpReg =
10656       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
10657 
10658   //  thisMBB:
10659   //   ...
10660   //   fallthrough --> loopMBB
10661   BB->addSuccessor(loopMBB);
10662 
10663   // The 4-byte load must be aligned, while a char or short may be
10664   // anywhere in the word.  Hence all this nasty bookkeeping code.
10665   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
10666   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
10667   //   xori shift, shift1, 24 [16]
10668   //   rlwinm ptr, ptr1, 0, 0, 29
10669   //   slw incr2, incr, shift
10670   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
10671   //   slw mask, mask2, shift
10672   //  loopMBB:
10673   //   lwarx tmpDest, ptr
10674   //   add tmp, tmpDest, incr2
10675   //   andc tmp2, tmpDest, mask
10676   //   and tmp3, tmp, mask
10677   //   or tmp4, tmp3, tmp2
10678   //   stwcx. tmp4, ptr
10679   //   bne- loopMBB
10680   //   fallthrough --> exitMBB
10681   //   srw dest, tmpDest, shift
10682   if (ptrA != ZeroReg) {
10683     Ptr1Reg = RegInfo.createVirtualRegister(RC);
10684     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10685         .addReg(ptrA)
10686         .addReg(ptrB);
10687   } else {
10688     Ptr1Reg = ptrB;
10689   }
10690   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
10691   // mode.
10692   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
10693       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
10694       .addImm(3)
10695       .addImm(27)
10696       .addImm(is8bit ? 28 : 27);
10697   if (!isLittleEndian)
10698     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
10699         .addReg(Shift1Reg)
10700         .addImm(is8bit ? 24 : 16);
10701   if (is64bit)
10702     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
10703         .addReg(Ptr1Reg)
10704         .addImm(0)
10705         .addImm(61);
10706   else
10707     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
10708         .addReg(Ptr1Reg)
10709         .addImm(0)
10710         .addImm(0)
10711         .addImm(29);
10712   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
10713   if (is8bit)
10714     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
10715   else {
10716     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
10717     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
10718         .addReg(Mask3Reg)
10719         .addImm(65535);
10720   }
10721   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
10722       .addReg(Mask2Reg)
10723       .addReg(ShiftReg);
10724 
10725   BB = loopMBB;
10726   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
10727       .addReg(ZeroReg)
10728       .addReg(PtrReg);
10729   if (BinOpcode)
10730     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
10731         .addReg(Incr2Reg)
10732         .addReg(TmpDestReg);
10733   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
10734       .addReg(TmpDestReg)
10735       .addReg(MaskReg);
10736   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
10737   if (CmpOpcode) {
10738     // For unsigned comparisons, we can directly compare the shifted values.
10739     // For signed comparisons we shift and sign extend.
10740     Register SReg = RegInfo.createVirtualRegister(GPRC);
10741     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
10742         .addReg(TmpDestReg)
10743         .addReg(MaskReg);
10744     unsigned ValueReg = SReg;
10745     unsigned CmpReg = Incr2Reg;
10746     if (CmpOpcode == PPC::CMPW) {
10747       ValueReg = RegInfo.createVirtualRegister(GPRC);
10748       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
10749           .addReg(SReg)
10750           .addReg(ShiftReg);
10751       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
10752       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
10753           .addReg(ValueReg);
10754       ValueReg = ValueSReg;
10755       CmpReg = incr;
10756     }
10757     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10758         .addReg(CmpReg)
10759         .addReg(ValueReg);
10760     BuildMI(BB, dl, TII->get(PPC::BCC))
10761         .addImm(CmpPred)
10762         .addReg(PPC::CR0)
10763         .addMBB(exitMBB);
10764     BB->addSuccessor(loop2MBB);
10765     BB->addSuccessor(exitMBB);
10766     BB = loop2MBB;
10767   }
10768   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
10769   BuildMI(BB, dl, TII->get(PPC::STWCX))
10770       .addReg(Tmp4Reg)
10771       .addReg(ZeroReg)
10772       .addReg(PtrReg);
10773   BuildMI(BB, dl, TII->get(PPC::BCC))
10774       .addImm(PPC::PRED_NE)
10775       .addReg(PPC::CR0)
10776       .addMBB(loopMBB);
10777   BB->addSuccessor(loopMBB);
10778   BB->addSuccessor(exitMBB);
10779 
10780   //  exitMBB:
10781   //   ...
10782   BB = exitMBB;
10783   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
10784       .addReg(TmpDestReg)
10785       .addReg(ShiftReg);
10786   return BB;
10787 }
10788 
10789 llvm::MachineBasicBlock *
10790 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
10791                                     MachineBasicBlock *MBB) const {
10792   DebugLoc DL = MI.getDebugLoc();
10793   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10794   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
10795 
10796   MachineFunction *MF = MBB->getParent();
10797   MachineRegisterInfo &MRI = MF->getRegInfo();
10798 
10799   const BasicBlock *BB = MBB->getBasicBlock();
10800   MachineFunction::iterator I = ++MBB->getIterator();
10801 
10802   Register DstReg = MI.getOperand(0).getReg();
10803   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
10804   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
10805   Register mainDstReg = MRI.createVirtualRegister(RC);
10806   Register restoreDstReg = MRI.createVirtualRegister(RC);
10807 
10808   MVT PVT = getPointerTy(MF->getDataLayout());
10809   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10810          "Invalid Pointer Size!");
10811   // For v = setjmp(buf), we generate
10812   //
10813   // thisMBB:
10814   //  SjLjSetup mainMBB
10815   //  bl mainMBB
10816   //  v_restore = 1
10817   //  b sinkMBB
10818   //
10819   // mainMBB:
10820   //  buf[LabelOffset] = LR
10821   //  v_main = 0
10822   //
10823   // sinkMBB:
10824   //  v = phi(main, restore)
10825   //
10826 
10827   MachineBasicBlock *thisMBB = MBB;
10828   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
10829   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
10830   MF->insert(I, mainMBB);
10831   MF->insert(I, sinkMBB);
10832 
10833   MachineInstrBuilder MIB;
10834 
10835   // Transfer the remainder of BB and its successor edges to sinkMBB.
10836   sinkMBB->splice(sinkMBB->begin(), MBB,
10837                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10838   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
10839 
10840   // Note that the structure of the jmp_buf used here is not compatible
10841   // with that used by libc, and is not designed to be. Specifically, it
10842   // stores only those 'reserved' registers that LLVM does not otherwise
10843   // understand how to spill. Also, by convention, by the time this
10844   // intrinsic is called, Clang has already stored the frame address in the
10845   // first slot of the buffer and stack address in the third. Following the
10846   // X86 target code, we'll store the jump address in the second slot. We also
10847   // need to save the TOC pointer (R2) to handle jumps between shared
10848   // libraries, and that will be stored in the fourth slot. The thread
10849   // identifier (R13) is not affected.
10850 
10851   // thisMBB:
10852   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10853   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10854   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10855 
10856   // Prepare IP either in reg.
10857   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
10858   Register LabelReg = MRI.createVirtualRegister(PtrRC);
10859   Register BufReg = MI.getOperand(1).getReg();
10860 
10861   if (Subtarget.is64BitELFABI()) {
10862     setUsesTOCBasePtr(*MBB->getParent());
10863     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
10864               .addReg(PPC::X2)
10865               .addImm(TOCOffset)
10866               .addReg(BufReg)
10867               .cloneMemRefs(MI);
10868   }
10869 
10870   // Naked functions never have a base pointer, and so we use r1. For all
10871   // other functions, this decision must be delayed until during PEI.
10872   unsigned BaseReg;
10873   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
10874     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
10875   else
10876     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
10877 
10878   MIB = BuildMI(*thisMBB, MI, DL,
10879                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
10880             .addReg(BaseReg)
10881             .addImm(BPOffset)
10882             .addReg(BufReg)
10883             .cloneMemRefs(MI);
10884 
10885   // Setup
10886   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
10887   MIB.addRegMask(TRI->getNoPreservedMask());
10888 
10889   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
10890 
10891   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
10892           .addMBB(mainMBB);
10893   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
10894 
10895   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
10896   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
10897 
10898   // mainMBB:
10899   //  mainDstReg = 0
10900   MIB =
10901       BuildMI(mainMBB, DL,
10902               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
10903 
10904   // Store IP
10905   if (Subtarget.isPPC64()) {
10906     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
10907             .addReg(LabelReg)
10908             .addImm(LabelOffset)
10909             .addReg(BufReg);
10910   } else {
10911     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
10912             .addReg(LabelReg)
10913             .addImm(LabelOffset)
10914             .addReg(BufReg);
10915   }
10916   MIB.cloneMemRefs(MI);
10917 
10918   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
10919   mainMBB->addSuccessor(sinkMBB);
10920 
10921   // sinkMBB:
10922   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
10923           TII->get(PPC::PHI), DstReg)
10924     .addReg(mainDstReg).addMBB(mainMBB)
10925     .addReg(restoreDstReg).addMBB(thisMBB);
10926 
10927   MI.eraseFromParent();
10928   return sinkMBB;
10929 }
10930 
10931 MachineBasicBlock *
10932 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
10933                                      MachineBasicBlock *MBB) const {
10934   DebugLoc DL = MI.getDebugLoc();
10935   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10936 
10937   MachineFunction *MF = MBB->getParent();
10938   MachineRegisterInfo &MRI = MF->getRegInfo();
10939 
10940   MVT PVT = getPointerTy(MF->getDataLayout());
10941   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10942          "Invalid Pointer Size!");
10943 
10944   const TargetRegisterClass *RC =
10945     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10946   Register Tmp = MRI.createVirtualRegister(RC);
10947   // Since FP is only updated here but NOT referenced, it's treated as GPR.
10948   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
10949   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
10950   unsigned BP =
10951       (PVT == MVT::i64)
10952           ? PPC::X30
10953           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
10954                                                               : PPC::R30);
10955 
10956   MachineInstrBuilder MIB;
10957 
10958   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10959   const int64_t SPOffset    = 2 * PVT.getStoreSize();
10960   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10961   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10962 
10963   Register BufReg = MI.getOperand(0).getReg();
10964 
10965   // Reload FP (the jumped-to function may not have had a
10966   // frame pointer, and if so, then its r31 will be restored
10967   // as necessary).
10968   if (PVT == MVT::i64) {
10969     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
10970             .addImm(0)
10971             .addReg(BufReg);
10972   } else {
10973     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
10974             .addImm(0)
10975             .addReg(BufReg);
10976   }
10977   MIB.cloneMemRefs(MI);
10978 
10979   // Reload IP
10980   if (PVT == MVT::i64) {
10981     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
10982             .addImm(LabelOffset)
10983             .addReg(BufReg);
10984   } else {
10985     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
10986             .addImm(LabelOffset)
10987             .addReg(BufReg);
10988   }
10989   MIB.cloneMemRefs(MI);
10990 
10991   // Reload SP
10992   if (PVT == MVT::i64) {
10993     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
10994             .addImm(SPOffset)
10995             .addReg(BufReg);
10996   } else {
10997     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
10998             .addImm(SPOffset)
10999             .addReg(BufReg);
11000   }
11001   MIB.cloneMemRefs(MI);
11002 
11003   // Reload BP
11004   if (PVT == MVT::i64) {
11005     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11006             .addImm(BPOffset)
11007             .addReg(BufReg);
11008   } else {
11009     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11010             .addImm(BPOffset)
11011             .addReg(BufReg);
11012   }
11013   MIB.cloneMemRefs(MI);
11014 
11015   // Reload TOC
11016   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11017     setUsesTOCBasePtr(*MBB->getParent());
11018     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11019               .addImm(TOCOffset)
11020               .addReg(BufReg)
11021               .cloneMemRefs(MI);
11022   }
11023 
11024   // Jump
11025   BuildMI(*MBB, MI, DL,
11026           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11027   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11028 
11029   MI.eraseFromParent();
11030   return MBB;
11031 }
11032 
11033 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11034   // If the function specifically requests inline stack probes, emit them.
11035   if (MF.getFunction().hasFnAttribute("probe-stack"))
11036     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11037            "inline-asm";
11038   return false;
11039 }
11040 
11041 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11042   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11043   unsigned StackAlign = TFI->getStackAlignment();
11044   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11045          "Unexpected stack alignment");
11046   // The default stack probe size is 4096 if the function has no
11047   // stack-probe-size attribute.
11048   unsigned StackProbeSize = 4096;
11049   const Function &Fn = MF.getFunction();
11050   if (Fn.hasFnAttribute("stack-probe-size"))
11051     Fn.getFnAttribute("stack-probe-size")
11052         .getValueAsString()
11053         .getAsInteger(0, StackProbeSize);
11054   // Round down to the stack alignment.
11055   StackProbeSize &= ~(StackAlign - 1);
11056   return StackProbeSize ? StackProbeSize : StackAlign;
11057 }
11058 
11059 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11060 // into three phases. In the first phase, it uses pseudo instruction
11061 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11062 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11063 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11064 // MaxCallFrameSize so that it can calculate correct data area pointer.
11065 MachineBasicBlock *
11066 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11067                                     MachineBasicBlock *MBB) const {
11068   const bool isPPC64 = Subtarget.isPPC64();
11069   MachineFunction *MF = MBB->getParent();
11070   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11071   DebugLoc DL = MI.getDebugLoc();
11072   const unsigned ProbeSize = getStackProbeSize(*MF);
11073   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11074   MachineRegisterInfo &MRI = MF->getRegInfo();
11075   // The CFG of probing stack looks as
11076   //         +-----+
11077   //         | MBB |
11078   //         +--+--+
11079   //            |
11080   //       +----v----+
11081   //  +--->+ TestMBB +---+
11082   //  |    +----+----+   |
11083   //  |         |        |
11084   //  |   +-----v----+   |
11085   //  +---+ BlockMBB |   |
11086   //      +----------+   |
11087   //                     |
11088   //       +---------+   |
11089   //       | TailMBB +<--+
11090   //       +---------+
11091   // In MBB, calculate previous frame pointer and final stack pointer.
11092   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11093   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11094   // TailMBB is spliced via \p MI.
11095   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11096   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11097   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11098 
11099   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11100   MF->insert(MBBIter, TestMBB);
11101   MF->insert(MBBIter, BlockMBB);
11102   MF->insert(MBBIter, TailMBB);
11103 
11104   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11105   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11106 
11107   Register DstReg = MI.getOperand(0).getReg();
11108   Register NegSizeReg = MI.getOperand(1).getReg();
11109   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11110   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11111   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11112   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11113 
11114   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11115   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11116   // NegSize.
11117   unsigned ProbeOpc;
11118   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11119     ProbeOpc =
11120         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11121   else
11122     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11123     // and NegSizeReg will be allocated in the same phyreg to avoid
11124     // redundant copy when NegSizeReg has only one use which is current MI and
11125     // will be replaced by PREPARE_PROBED_ALLOCA then.
11126     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11127                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11128   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11129       .addDef(ActualNegSizeReg)
11130       .addReg(NegSizeReg)
11131       .add(MI.getOperand(2))
11132       .add(MI.getOperand(3));
11133 
11134   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11135   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11136           FinalStackPtr)
11137       .addReg(SPReg)
11138       .addReg(ActualNegSizeReg);
11139 
11140   // Materialize a scratch register for update.
11141   int64_t NegProbeSize = -(int64_t)ProbeSize;
11142   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11143   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11144   if (!isInt<16>(NegProbeSize)) {
11145     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11146     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11147         .addImm(NegProbeSize >> 16);
11148     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11149             ScratchReg)
11150         .addReg(TempReg)
11151         .addImm(NegProbeSize & 0xFFFF);
11152   } else
11153     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11154         .addImm(NegProbeSize);
11155 
11156   {
11157     // Probing leading residual part.
11158     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11159     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11160         .addReg(ActualNegSizeReg)
11161         .addReg(ScratchReg);
11162     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11163     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11164         .addReg(Div)
11165         .addReg(ScratchReg);
11166     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11167     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11168         .addReg(Mul)
11169         .addReg(ActualNegSizeReg);
11170     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11171         .addReg(FramePointer)
11172         .addReg(SPReg)
11173         .addReg(NegMod);
11174   }
11175 
11176   {
11177     // Remaining part should be multiple of ProbeSize.
11178     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11179     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11180         .addReg(SPReg)
11181         .addReg(FinalStackPtr);
11182     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11183         .addImm(PPC::PRED_EQ)
11184         .addReg(CmpResult)
11185         .addMBB(TailMBB);
11186     TestMBB->addSuccessor(BlockMBB);
11187     TestMBB->addSuccessor(TailMBB);
11188   }
11189 
11190   {
11191     // Touch the block.
11192     // |P...|P...|P...
11193     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11194         .addReg(FramePointer)
11195         .addReg(SPReg)
11196         .addReg(ScratchReg);
11197     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11198     BlockMBB->addSuccessor(TestMBB);
11199   }
11200 
11201   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11202   // DYNAREAOFFSET pseudo instruction to get the future result.
11203   Register MaxCallFrameSizeReg =
11204       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11205   BuildMI(TailMBB, DL,
11206           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11207           MaxCallFrameSizeReg)
11208       .add(MI.getOperand(2))
11209       .add(MI.getOperand(3));
11210   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11211       .addReg(SPReg)
11212       .addReg(MaxCallFrameSizeReg);
11213 
11214   // Splice instructions after MI to TailMBB.
11215   TailMBB->splice(TailMBB->end(), MBB,
11216                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11217   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11218   MBB->addSuccessor(TestMBB);
11219 
11220   // Delete the pseudo instruction.
11221   MI.eraseFromParent();
11222 
11223   ++NumDynamicAllocaProbed;
11224   return TailMBB;
11225 }
11226 
11227 MachineBasicBlock *
11228 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11229                                                MachineBasicBlock *BB) const {
11230   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11231       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11232     if (Subtarget.is64BitELFABI() &&
11233         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11234         !Subtarget.isUsingPCRelativeCalls()) {
11235       // Call lowering should have added an r2 operand to indicate a dependence
11236       // on the TOC base pointer value. It can't however, because there is no
11237       // way to mark the dependence as implicit there, and so the stackmap code
11238       // will confuse it with a regular operand. Instead, add the dependence
11239       // here.
11240       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11241     }
11242 
11243     return emitPatchPoint(MI, BB);
11244   }
11245 
11246   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11247       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11248     return emitEHSjLjSetJmp(MI, BB);
11249   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11250              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11251     return emitEHSjLjLongJmp(MI, BB);
11252   }
11253 
11254   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11255 
11256   // To "insert" these instructions we actually have to insert their
11257   // control-flow patterns.
11258   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11259   MachineFunction::iterator It = ++BB->getIterator();
11260 
11261   MachineFunction *F = BB->getParent();
11262 
11263   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11264       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11265       MI.getOpcode() == PPC::SELECT_I8) {
11266     SmallVector<MachineOperand, 2> Cond;
11267     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11268         MI.getOpcode() == PPC::SELECT_CC_I8)
11269       Cond.push_back(MI.getOperand(4));
11270     else
11271       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11272     Cond.push_back(MI.getOperand(1));
11273 
11274     DebugLoc dl = MI.getDebugLoc();
11275     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11276                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11277   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11278              MI.getOpcode() == PPC::SELECT_CC_F8 ||
11279              MI.getOpcode() == PPC::SELECT_CC_F16 ||
11280              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11281              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11282              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11283              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11284              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11285              MI.getOpcode() == PPC::SELECT_CC_SPE ||
11286              MI.getOpcode() == PPC::SELECT_F4 ||
11287              MI.getOpcode() == PPC::SELECT_F8 ||
11288              MI.getOpcode() == PPC::SELECT_F16 ||
11289              MI.getOpcode() == PPC::SELECT_SPE ||
11290              MI.getOpcode() == PPC::SELECT_SPE4 ||
11291              MI.getOpcode() == PPC::SELECT_VRRC ||
11292              MI.getOpcode() == PPC::SELECT_VSFRC ||
11293              MI.getOpcode() == PPC::SELECT_VSSRC ||
11294              MI.getOpcode() == PPC::SELECT_VSRC) {
11295     // The incoming instruction knows the destination vreg to set, the
11296     // condition code register to branch on, the true/false values to
11297     // select between, and a branch opcode to use.
11298 
11299     //  thisMBB:
11300     //  ...
11301     //   TrueVal = ...
11302     //   cmpTY ccX, r1, r2
11303     //   bCC copy1MBB
11304     //   fallthrough --> copy0MBB
11305     MachineBasicBlock *thisMBB = BB;
11306     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11307     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11308     DebugLoc dl = MI.getDebugLoc();
11309     F->insert(It, copy0MBB);
11310     F->insert(It, sinkMBB);
11311 
11312     // Transfer the remainder of BB and its successor edges to sinkMBB.
11313     sinkMBB->splice(sinkMBB->begin(), BB,
11314                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11315     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11316 
11317     // Next, add the true and fallthrough blocks as its successors.
11318     BB->addSuccessor(copy0MBB);
11319     BB->addSuccessor(sinkMBB);
11320 
11321     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11322         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11323         MI.getOpcode() == PPC::SELECT_F16 ||
11324         MI.getOpcode() == PPC::SELECT_SPE4 ||
11325         MI.getOpcode() == PPC::SELECT_SPE ||
11326         MI.getOpcode() == PPC::SELECT_VRRC ||
11327         MI.getOpcode() == PPC::SELECT_VSFRC ||
11328         MI.getOpcode() == PPC::SELECT_VSSRC ||
11329         MI.getOpcode() == PPC::SELECT_VSRC) {
11330       BuildMI(BB, dl, TII->get(PPC::BC))
11331           .addReg(MI.getOperand(1).getReg())
11332           .addMBB(sinkMBB);
11333     } else {
11334       unsigned SelectPred = MI.getOperand(4).getImm();
11335       BuildMI(BB, dl, TII->get(PPC::BCC))
11336           .addImm(SelectPred)
11337           .addReg(MI.getOperand(1).getReg())
11338           .addMBB(sinkMBB);
11339     }
11340 
11341     //  copy0MBB:
11342     //   %FalseValue = ...
11343     //   # fallthrough to sinkMBB
11344     BB = copy0MBB;
11345 
11346     // Update machine-CFG edges
11347     BB->addSuccessor(sinkMBB);
11348 
11349     //  sinkMBB:
11350     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11351     //  ...
11352     BB = sinkMBB;
11353     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11354         .addReg(MI.getOperand(3).getReg())
11355         .addMBB(copy0MBB)
11356         .addReg(MI.getOperand(2).getReg())
11357         .addMBB(thisMBB);
11358   } else if (MI.getOpcode() == PPC::ReadTB) {
11359     // To read the 64-bit time-base register on a 32-bit target, we read the
11360     // two halves. Should the counter have wrapped while it was being read, we
11361     // need to try again.
11362     // ...
11363     // readLoop:
11364     // mfspr Rx,TBU # load from TBU
11365     // mfspr Ry,TB  # load from TB
11366     // mfspr Rz,TBU # load from TBU
11367     // cmpw crX,Rx,Rz # check if 'old'='new'
11368     // bne readLoop   # branch if they're not equal
11369     // ...
11370 
11371     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11372     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11373     DebugLoc dl = MI.getDebugLoc();
11374     F->insert(It, readMBB);
11375     F->insert(It, sinkMBB);
11376 
11377     // Transfer the remainder of BB and its successor edges to sinkMBB.
11378     sinkMBB->splice(sinkMBB->begin(), BB,
11379                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11380     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11381 
11382     BB->addSuccessor(readMBB);
11383     BB = readMBB;
11384 
11385     MachineRegisterInfo &RegInfo = F->getRegInfo();
11386     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11387     Register LoReg = MI.getOperand(0).getReg();
11388     Register HiReg = MI.getOperand(1).getReg();
11389 
11390     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11391     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11392     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11393 
11394     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11395 
11396     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11397         .addReg(HiReg)
11398         .addReg(ReadAgainReg);
11399     BuildMI(BB, dl, TII->get(PPC::BCC))
11400         .addImm(PPC::PRED_NE)
11401         .addReg(CmpReg)
11402         .addMBB(readMBB);
11403 
11404     BB->addSuccessor(readMBB);
11405     BB->addSuccessor(sinkMBB);
11406   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11407     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11408   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11409     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11410   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11411     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11412   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11413     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11414 
11415   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11416     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11417   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11418     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11419   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11420     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11421   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11422     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11423 
11424   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11425     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11426   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11427     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11428   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11429     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11430   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11431     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11432 
11433   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11434     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11435   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11436     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11437   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11438     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11439   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11440     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11441 
11442   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11443     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11444   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11445     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11446   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11447     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11448   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11449     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11450 
11451   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11452     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11453   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11454     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11455   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11456     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11457   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11458     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11459 
11460   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11461     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11462   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11463     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11464   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11465     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11466   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11467     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11468 
11469   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11470     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11471   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11472     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11473   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11474     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11475   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11476     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11477 
11478   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11479     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11480   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11481     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11482   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11483     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
11484   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11485     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
11486 
11487   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11488     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
11489   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11490     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
11491   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11492     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
11493   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11494     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
11495 
11496   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11497     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
11498   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11499     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
11500   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11501     BB = EmitAtomicBinary(MI, BB, 4, 0);
11502   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11503     BB = EmitAtomicBinary(MI, BB, 8, 0);
11504   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11505            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11506            (Subtarget.hasPartwordAtomics() &&
11507             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11508            (Subtarget.hasPartwordAtomics() &&
11509             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11510     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11511 
11512     auto LoadMnemonic = PPC::LDARX;
11513     auto StoreMnemonic = PPC::STDCX;
11514     switch (MI.getOpcode()) {
11515     default:
11516       llvm_unreachable("Compare and swap of unknown size");
11517     case PPC::ATOMIC_CMP_SWAP_I8:
11518       LoadMnemonic = PPC::LBARX;
11519       StoreMnemonic = PPC::STBCX;
11520       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11521       break;
11522     case PPC::ATOMIC_CMP_SWAP_I16:
11523       LoadMnemonic = PPC::LHARX;
11524       StoreMnemonic = PPC::STHCX;
11525       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11526       break;
11527     case PPC::ATOMIC_CMP_SWAP_I32:
11528       LoadMnemonic = PPC::LWARX;
11529       StoreMnemonic = PPC::STWCX;
11530       break;
11531     case PPC::ATOMIC_CMP_SWAP_I64:
11532       LoadMnemonic = PPC::LDARX;
11533       StoreMnemonic = PPC::STDCX;
11534       break;
11535     }
11536     Register dest = MI.getOperand(0).getReg();
11537     Register ptrA = MI.getOperand(1).getReg();
11538     Register ptrB = MI.getOperand(2).getReg();
11539     Register oldval = MI.getOperand(3).getReg();
11540     Register newval = MI.getOperand(4).getReg();
11541     DebugLoc dl = MI.getDebugLoc();
11542 
11543     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11544     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11545     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11546     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11547     F->insert(It, loop1MBB);
11548     F->insert(It, loop2MBB);
11549     F->insert(It, midMBB);
11550     F->insert(It, exitMBB);
11551     exitMBB->splice(exitMBB->begin(), BB,
11552                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11553     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11554 
11555     //  thisMBB:
11556     //   ...
11557     //   fallthrough --> loopMBB
11558     BB->addSuccessor(loop1MBB);
11559 
11560     // loop1MBB:
11561     //   l[bhwd]arx dest, ptr
11562     //   cmp[wd] dest, oldval
11563     //   bne- midMBB
11564     // loop2MBB:
11565     //   st[bhwd]cx. newval, ptr
11566     //   bne- loopMBB
11567     //   b exitBB
11568     // midMBB:
11569     //   st[bhwd]cx. dest, ptr
11570     // exitBB:
11571     BB = loop1MBB;
11572     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11573     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11574         .addReg(oldval)
11575         .addReg(dest);
11576     BuildMI(BB, dl, TII->get(PPC::BCC))
11577         .addImm(PPC::PRED_NE)
11578         .addReg(PPC::CR0)
11579         .addMBB(midMBB);
11580     BB->addSuccessor(loop2MBB);
11581     BB->addSuccessor(midMBB);
11582 
11583     BB = loop2MBB;
11584     BuildMI(BB, dl, TII->get(StoreMnemonic))
11585         .addReg(newval)
11586         .addReg(ptrA)
11587         .addReg(ptrB);
11588     BuildMI(BB, dl, TII->get(PPC::BCC))
11589         .addImm(PPC::PRED_NE)
11590         .addReg(PPC::CR0)
11591         .addMBB(loop1MBB);
11592     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11593     BB->addSuccessor(loop1MBB);
11594     BB->addSuccessor(exitMBB);
11595 
11596     BB = midMBB;
11597     BuildMI(BB, dl, TII->get(StoreMnemonic))
11598         .addReg(dest)
11599         .addReg(ptrA)
11600         .addReg(ptrB);
11601     BB->addSuccessor(exitMBB);
11602 
11603     //  exitMBB:
11604     //   ...
11605     BB = exitMBB;
11606   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
11607              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
11608     // We must use 64-bit registers for addresses when targeting 64-bit,
11609     // since we're actually doing arithmetic on them.  Other registers
11610     // can be 32-bit.
11611     bool is64bit = Subtarget.isPPC64();
11612     bool isLittleEndian = Subtarget.isLittleEndian();
11613     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
11614 
11615     Register dest = MI.getOperand(0).getReg();
11616     Register ptrA = MI.getOperand(1).getReg();
11617     Register ptrB = MI.getOperand(2).getReg();
11618     Register oldval = MI.getOperand(3).getReg();
11619     Register newval = MI.getOperand(4).getReg();
11620     DebugLoc dl = MI.getDebugLoc();
11621 
11622     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11623     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11624     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11625     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11626     F->insert(It, loop1MBB);
11627     F->insert(It, loop2MBB);
11628     F->insert(It, midMBB);
11629     F->insert(It, exitMBB);
11630     exitMBB->splice(exitMBB->begin(), BB,
11631                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11632     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11633 
11634     MachineRegisterInfo &RegInfo = F->getRegInfo();
11635     const TargetRegisterClass *RC =
11636         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11637     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11638 
11639     Register PtrReg = RegInfo.createVirtualRegister(RC);
11640     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11641     Register ShiftReg =
11642         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11643     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
11644     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
11645     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
11646     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
11647     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11648     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11649     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11650     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11651     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11652     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11653     Register Ptr1Reg;
11654     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
11655     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11656     //  thisMBB:
11657     //   ...
11658     //   fallthrough --> loopMBB
11659     BB->addSuccessor(loop1MBB);
11660 
11661     // The 4-byte load must be aligned, while a char or short may be
11662     // anywhere in the word.  Hence all this nasty bookkeeping code.
11663     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11664     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11665     //   xori shift, shift1, 24 [16]
11666     //   rlwinm ptr, ptr1, 0, 0, 29
11667     //   slw newval2, newval, shift
11668     //   slw oldval2, oldval,shift
11669     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11670     //   slw mask, mask2, shift
11671     //   and newval3, newval2, mask
11672     //   and oldval3, oldval2, mask
11673     // loop1MBB:
11674     //   lwarx tmpDest, ptr
11675     //   and tmp, tmpDest, mask
11676     //   cmpw tmp, oldval3
11677     //   bne- midMBB
11678     // loop2MBB:
11679     //   andc tmp2, tmpDest, mask
11680     //   or tmp4, tmp2, newval3
11681     //   stwcx. tmp4, ptr
11682     //   bne- loop1MBB
11683     //   b exitBB
11684     // midMBB:
11685     //   stwcx. tmpDest, ptr
11686     // exitBB:
11687     //   srw dest, tmpDest, shift
11688     if (ptrA != ZeroReg) {
11689       Ptr1Reg = RegInfo.createVirtualRegister(RC);
11690       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11691           .addReg(ptrA)
11692           .addReg(ptrB);
11693     } else {
11694       Ptr1Reg = ptrB;
11695     }
11696 
11697     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11698     // mode.
11699     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11700         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11701         .addImm(3)
11702         .addImm(27)
11703         .addImm(is8bit ? 28 : 27);
11704     if (!isLittleEndian)
11705       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11706           .addReg(Shift1Reg)
11707           .addImm(is8bit ? 24 : 16);
11708     if (is64bit)
11709       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11710           .addReg(Ptr1Reg)
11711           .addImm(0)
11712           .addImm(61);
11713     else
11714       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11715           .addReg(Ptr1Reg)
11716           .addImm(0)
11717           .addImm(0)
11718           .addImm(29);
11719     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
11720         .addReg(newval)
11721         .addReg(ShiftReg);
11722     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
11723         .addReg(oldval)
11724         .addReg(ShiftReg);
11725     if (is8bit)
11726       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11727     else {
11728       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11729       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11730           .addReg(Mask3Reg)
11731           .addImm(65535);
11732     }
11733     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11734         .addReg(Mask2Reg)
11735         .addReg(ShiftReg);
11736     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
11737         .addReg(NewVal2Reg)
11738         .addReg(MaskReg);
11739     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
11740         .addReg(OldVal2Reg)
11741         .addReg(MaskReg);
11742 
11743     BB = loop1MBB;
11744     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11745         .addReg(ZeroReg)
11746         .addReg(PtrReg);
11747     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
11748         .addReg(TmpDestReg)
11749         .addReg(MaskReg);
11750     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
11751         .addReg(TmpReg)
11752         .addReg(OldVal3Reg);
11753     BuildMI(BB, dl, TII->get(PPC::BCC))
11754         .addImm(PPC::PRED_NE)
11755         .addReg(PPC::CR0)
11756         .addMBB(midMBB);
11757     BB->addSuccessor(loop2MBB);
11758     BB->addSuccessor(midMBB);
11759 
11760     BB = loop2MBB;
11761     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11762         .addReg(TmpDestReg)
11763         .addReg(MaskReg);
11764     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
11765         .addReg(Tmp2Reg)
11766         .addReg(NewVal3Reg);
11767     BuildMI(BB, dl, TII->get(PPC::STWCX))
11768         .addReg(Tmp4Reg)
11769         .addReg(ZeroReg)
11770         .addReg(PtrReg);
11771     BuildMI(BB, dl, TII->get(PPC::BCC))
11772         .addImm(PPC::PRED_NE)
11773         .addReg(PPC::CR0)
11774         .addMBB(loop1MBB);
11775     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11776     BB->addSuccessor(loop1MBB);
11777     BB->addSuccessor(exitMBB);
11778 
11779     BB = midMBB;
11780     BuildMI(BB, dl, TII->get(PPC::STWCX))
11781         .addReg(TmpDestReg)
11782         .addReg(ZeroReg)
11783         .addReg(PtrReg);
11784     BB->addSuccessor(exitMBB);
11785 
11786     //  exitMBB:
11787     //   ...
11788     BB = exitMBB;
11789     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11790         .addReg(TmpReg)
11791         .addReg(ShiftReg);
11792   } else if (MI.getOpcode() == PPC::FADDrtz) {
11793     // This pseudo performs an FADD with rounding mode temporarily forced
11794     // to round-to-zero.  We emit this via custom inserter since the FPSCR
11795     // is not modeled at the SelectionDAG level.
11796     Register Dest = MI.getOperand(0).getReg();
11797     Register Src1 = MI.getOperand(1).getReg();
11798     Register Src2 = MI.getOperand(2).getReg();
11799     DebugLoc dl = MI.getDebugLoc();
11800 
11801     MachineRegisterInfo &RegInfo = F->getRegInfo();
11802     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11803 
11804     // Save FPSCR value.
11805     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
11806 
11807     // Set rounding mode to round-to-zero.
11808     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
11809         .addImm(31)
11810         .addReg(PPC::RM, RegState::ImplicitDefine);
11811 
11812     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
11813         .addImm(30)
11814         .addReg(PPC::RM, RegState::ImplicitDefine);
11815 
11816     // Perform addition.
11817     auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
11818                    .addReg(Src1)
11819                    .addReg(Src2);
11820     if (MI.getFlag(MachineInstr::NoFPExcept))
11821       MIB.setMIFlag(MachineInstr::NoFPExcept);
11822 
11823     // Restore FPSCR value.
11824     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
11825   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11826              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
11827              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11828              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
11829     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11830                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
11831                           ? PPC::ANDI8_rec
11832                           : PPC::ANDI_rec;
11833     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11834                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
11835 
11836     MachineRegisterInfo &RegInfo = F->getRegInfo();
11837     Register Dest = RegInfo.createVirtualRegister(
11838         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
11839 
11840     DebugLoc Dl = MI.getDebugLoc();
11841     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
11842         .addReg(MI.getOperand(1).getReg())
11843         .addImm(1);
11844     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11845             MI.getOperand(0).getReg())
11846         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
11847   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
11848     DebugLoc Dl = MI.getDebugLoc();
11849     MachineRegisterInfo &RegInfo = F->getRegInfo();
11850     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11851     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
11852     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11853             MI.getOperand(0).getReg())
11854         .addReg(CRReg);
11855   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
11856     DebugLoc Dl = MI.getDebugLoc();
11857     unsigned Imm = MI.getOperand(1).getImm();
11858     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
11859     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11860             MI.getOperand(0).getReg())
11861         .addReg(PPC::CR0EQ);
11862   } else if (MI.getOpcode() == PPC::SETRNDi) {
11863     DebugLoc dl = MI.getDebugLoc();
11864     Register OldFPSCRReg = MI.getOperand(0).getReg();
11865 
11866     // Save FPSCR value.
11867     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11868 
11869     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
11870     // the following settings:
11871     //   00 Round to nearest
11872     //   01 Round to 0
11873     //   10 Round to +inf
11874     //   11 Round to -inf
11875 
11876     // When the operand is immediate, using the two least significant bits of
11877     // the immediate to set the bits 62:63 of FPSCR.
11878     unsigned Mode = MI.getOperand(1).getImm();
11879     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
11880         .addImm(31)
11881         .addReg(PPC::RM, RegState::ImplicitDefine);
11882 
11883     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
11884         .addImm(30)
11885         .addReg(PPC::RM, RegState::ImplicitDefine);
11886   } else if (MI.getOpcode() == PPC::SETRND) {
11887     DebugLoc dl = MI.getDebugLoc();
11888 
11889     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
11890     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
11891     // If the target doesn't have DirectMove, we should use stack to do the
11892     // conversion, because the target doesn't have the instructions like mtvsrd
11893     // or mfvsrd to do this conversion directly.
11894     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
11895       if (Subtarget.hasDirectMove()) {
11896         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
11897           .addReg(SrcReg);
11898       } else {
11899         // Use stack to do the register copy.
11900         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
11901         MachineRegisterInfo &RegInfo = F->getRegInfo();
11902         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
11903         if (RC == &PPC::F8RCRegClass) {
11904           // Copy register from F8RCRegClass to G8RCRegclass.
11905           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
11906                  "Unsupported RegClass.");
11907 
11908           StoreOp = PPC::STFD;
11909           LoadOp = PPC::LD;
11910         } else {
11911           // Copy register from G8RCRegClass to F8RCRegclass.
11912           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
11913                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
11914                  "Unsupported RegClass.");
11915         }
11916 
11917         MachineFrameInfo &MFI = F->getFrameInfo();
11918         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
11919 
11920         MachineMemOperand *MMOStore = F->getMachineMemOperand(
11921             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11922             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
11923             MFI.getObjectAlign(FrameIdx));
11924 
11925         // Store the SrcReg into the stack.
11926         BuildMI(*BB, MI, dl, TII->get(StoreOp))
11927           .addReg(SrcReg)
11928           .addImm(0)
11929           .addFrameIndex(FrameIdx)
11930           .addMemOperand(MMOStore);
11931 
11932         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
11933             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11934             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
11935             MFI.getObjectAlign(FrameIdx));
11936 
11937         // Load from the stack where SrcReg is stored, and save to DestReg,
11938         // so we have done the RegClass conversion from RegClass::SrcReg to
11939         // RegClass::DestReg.
11940         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
11941           .addImm(0)
11942           .addFrameIndex(FrameIdx)
11943           .addMemOperand(MMOLoad);
11944       }
11945     };
11946 
11947     Register OldFPSCRReg = MI.getOperand(0).getReg();
11948 
11949     // Save FPSCR value.
11950     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11951 
11952     // When the operand is gprc register, use two least significant bits of the
11953     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
11954     //
11955     // copy OldFPSCRTmpReg, OldFPSCRReg
11956     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
11957     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
11958     // copy NewFPSCRReg, NewFPSCRTmpReg
11959     // mtfsf 255, NewFPSCRReg
11960     MachineOperand SrcOp = MI.getOperand(1);
11961     MachineRegisterInfo &RegInfo = F->getRegInfo();
11962     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11963 
11964     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
11965 
11966     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11967     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11968 
11969     // The first operand of INSERT_SUBREG should be a register which has
11970     // subregisters, we only care about its RegClass, so we should use an
11971     // IMPLICIT_DEF register.
11972     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
11973     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
11974       .addReg(ImDefReg)
11975       .add(SrcOp)
11976       .addImm(1);
11977 
11978     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11979     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
11980       .addReg(OldFPSCRTmpReg)
11981       .addReg(ExtSrcReg)
11982       .addImm(0)
11983       .addImm(62);
11984 
11985     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11986     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
11987 
11988     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
11989     // bits of FPSCR.
11990     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
11991       .addImm(255)
11992       .addReg(NewFPSCRReg)
11993       .addImm(0)
11994       .addImm(0);
11995   } else if (MI.getOpcode() == PPC::SETFLM) {
11996     DebugLoc Dl = MI.getDebugLoc();
11997 
11998     // Result of setflm is previous FPSCR content, so we need to save it first.
11999     Register OldFPSCRReg = MI.getOperand(0).getReg();
12000     BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
12001 
12002     // Put bits in 32:63 to FPSCR.
12003     Register NewFPSCRReg = MI.getOperand(1).getReg();
12004     BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
12005         .addImm(255)
12006         .addReg(NewFPSCRReg)
12007         .addImm(0)
12008         .addImm(0);
12009   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12010              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12011     return emitProbedAlloca(MI, BB);
12012   } else {
12013     llvm_unreachable("Unexpected instr type to insert");
12014   }
12015 
12016   MI.eraseFromParent(); // The pseudo instruction is gone now.
12017   return BB;
12018 }
12019 
12020 //===----------------------------------------------------------------------===//
12021 // Target Optimization Hooks
12022 //===----------------------------------------------------------------------===//
12023 
12024 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12025   // For the estimates, convergence is quadratic, so we essentially double the
12026   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12027   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12028   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12029   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12030   if (VT.getScalarType() == MVT::f64)
12031     RefinementSteps++;
12032   return RefinementSteps;
12033 }
12034 
12035 SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
12036                                             const DenormalMode &Mode) const {
12037   // We only have VSX Vector Test for software Square Root.
12038   EVT VT = Op.getValueType();
12039   if (!isTypeLegal(MVT::i1) ||
12040       (VT != MVT::f64 &&
12041        ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
12042     return SDValue();
12043 
12044   SDLoc DL(Op);
12045   // The output register of FTSQRT is CR field.
12046   SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
12047   // ftsqrt BF,FRB
12048   // Let e_b be the unbiased exponent of the double-precision
12049   // floating-point operand in register FRB.
12050   // fe_flag is set to 1 if either of the following conditions occurs.
12051   //   - The double-precision floating-point operand in register FRB is a zero,
12052   //     a NaN, or an infinity, or a negative value.
12053   //   - e_b is less than or equal to -970.
12054   // Otherwise fe_flag is set to 0.
12055   // Both VSX and non-VSX versions would set EQ bit in the CR if the number is
12056   // not eligible for iteration. (zero/negative/infinity/nan or unbiased
12057   // exponent is less than -970)
12058   SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32);
12059   return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1,
12060                                     FTSQRT, SRIdxVal),
12061                  0);
12062 }
12063 
12064 SDValue
12065 PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
12066                                                SelectionDAG &DAG) const {
12067   // We only have VSX Vector Square Root.
12068   EVT VT = Op.getValueType();
12069   if (VT != MVT::f64 &&
12070       ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
12071     return TargetLowering::getSqrtResultForDenormInput(Op, DAG);
12072 
12073   return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op);
12074 }
12075 
12076 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12077                                            int Enabled, int &RefinementSteps,
12078                                            bool &UseOneConstNR,
12079                                            bool Reciprocal) const {
12080   EVT VT = Operand.getValueType();
12081   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12082       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12083       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12084       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12085     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12086       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12087 
12088     // The Newton-Raphson computation with a single constant does not provide
12089     // enough accuracy on some CPUs.
12090     UseOneConstNR = !Subtarget.needsTwoConstNR();
12091     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12092   }
12093   return SDValue();
12094 }
12095 
12096 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12097                                             int Enabled,
12098                                             int &RefinementSteps) const {
12099   EVT VT = Operand.getValueType();
12100   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12101       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12102       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12103       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12104     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12105       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12106     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12107   }
12108   return SDValue();
12109 }
12110 
12111 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12112   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12113   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12114   // enabled for division), this functionality is redundant with the default
12115   // combiner logic (once the division -> reciprocal/multiply transformation
12116   // has taken place). As a result, this matters more for older cores than for
12117   // newer ones.
12118 
12119   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12120   // reciprocal if there are two or more FDIVs (for embedded cores with only
12121   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12122   switch (Subtarget.getCPUDirective()) {
12123   default:
12124     return 3;
12125   case PPC::DIR_440:
12126   case PPC::DIR_A2:
12127   case PPC::DIR_E500:
12128   case PPC::DIR_E500mc:
12129   case PPC::DIR_E5500:
12130     return 2;
12131   }
12132 }
12133 
12134 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12135 // collapsed, and so we need to look through chains of them.
12136 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12137                                      int64_t& Offset, SelectionDAG &DAG) {
12138   if (DAG.isBaseWithConstantOffset(Loc)) {
12139     Base = Loc.getOperand(0);
12140     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12141 
12142     // The base might itself be a base plus an offset, and if so, accumulate
12143     // that as well.
12144     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12145   }
12146 }
12147 
12148 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12149                             unsigned Bytes, int Dist,
12150                             SelectionDAG &DAG) {
12151   if (VT.getSizeInBits() / 8 != Bytes)
12152     return false;
12153 
12154   SDValue BaseLoc = Base->getBasePtr();
12155   if (Loc.getOpcode() == ISD::FrameIndex) {
12156     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12157       return false;
12158     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12159     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12160     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12161     int FS  = MFI.getObjectSize(FI);
12162     int BFS = MFI.getObjectSize(BFI);
12163     if (FS != BFS || FS != (int)Bytes) return false;
12164     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12165   }
12166 
12167   SDValue Base1 = Loc, Base2 = BaseLoc;
12168   int64_t Offset1 = 0, Offset2 = 0;
12169   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12170   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12171   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12172     return true;
12173 
12174   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12175   const GlobalValue *GV1 = nullptr;
12176   const GlobalValue *GV2 = nullptr;
12177   Offset1 = 0;
12178   Offset2 = 0;
12179   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12180   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12181   if (isGA1 && isGA2 && GV1 == GV2)
12182     return Offset1 == (Offset2 + Dist*Bytes);
12183   return false;
12184 }
12185 
12186 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12187 // not enforce equality of the chain operands.
12188 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12189                             unsigned Bytes, int Dist,
12190                             SelectionDAG &DAG) {
12191   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12192     EVT VT = LS->getMemoryVT();
12193     SDValue Loc = LS->getBasePtr();
12194     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12195   }
12196 
12197   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12198     EVT VT;
12199     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12200     default: return false;
12201     case Intrinsic::ppc_altivec_lvx:
12202     case Intrinsic::ppc_altivec_lvxl:
12203     case Intrinsic::ppc_vsx_lxvw4x:
12204     case Intrinsic::ppc_vsx_lxvw4x_be:
12205       VT = MVT::v4i32;
12206       break;
12207     case Intrinsic::ppc_vsx_lxvd2x:
12208     case Intrinsic::ppc_vsx_lxvd2x_be:
12209       VT = MVT::v2f64;
12210       break;
12211     case Intrinsic::ppc_altivec_lvebx:
12212       VT = MVT::i8;
12213       break;
12214     case Intrinsic::ppc_altivec_lvehx:
12215       VT = MVT::i16;
12216       break;
12217     case Intrinsic::ppc_altivec_lvewx:
12218       VT = MVT::i32;
12219       break;
12220     }
12221 
12222     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12223   }
12224 
12225   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12226     EVT VT;
12227     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12228     default: return false;
12229     case Intrinsic::ppc_altivec_stvx:
12230     case Intrinsic::ppc_altivec_stvxl:
12231     case Intrinsic::ppc_vsx_stxvw4x:
12232       VT = MVT::v4i32;
12233       break;
12234     case Intrinsic::ppc_vsx_stxvd2x:
12235       VT = MVT::v2f64;
12236       break;
12237     case Intrinsic::ppc_vsx_stxvw4x_be:
12238       VT = MVT::v4i32;
12239       break;
12240     case Intrinsic::ppc_vsx_stxvd2x_be:
12241       VT = MVT::v2f64;
12242       break;
12243     case Intrinsic::ppc_altivec_stvebx:
12244       VT = MVT::i8;
12245       break;
12246     case Intrinsic::ppc_altivec_stvehx:
12247       VT = MVT::i16;
12248       break;
12249     case Intrinsic::ppc_altivec_stvewx:
12250       VT = MVT::i32;
12251       break;
12252     }
12253 
12254     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12255   }
12256 
12257   return false;
12258 }
12259 
12260 // Return true is there is a nearyby consecutive load to the one provided
12261 // (regardless of alignment). We search up and down the chain, looking though
12262 // token factors and other loads (but nothing else). As a result, a true result
12263 // indicates that it is safe to create a new consecutive load adjacent to the
12264 // load provided.
12265 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12266   SDValue Chain = LD->getChain();
12267   EVT VT = LD->getMemoryVT();
12268 
12269   SmallSet<SDNode *, 16> LoadRoots;
12270   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12271   SmallSet<SDNode *, 16> Visited;
12272 
12273   // First, search up the chain, branching to follow all token-factor operands.
12274   // If we find a consecutive load, then we're done, otherwise, record all
12275   // nodes just above the top-level loads and token factors.
12276   while (!Queue.empty()) {
12277     SDNode *ChainNext = Queue.pop_back_val();
12278     if (!Visited.insert(ChainNext).second)
12279       continue;
12280 
12281     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12282       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12283         return true;
12284 
12285       if (!Visited.count(ChainLD->getChain().getNode()))
12286         Queue.push_back(ChainLD->getChain().getNode());
12287     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12288       for (const SDUse &O : ChainNext->ops())
12289         if (!Visited.count(O.getNode()))
12290           Queue.push_back(O.getNode());
12291     } else
12292       LoadRoots.insert(ChainNext);
12293   }
12294 
12295   // Second, search down the chain, starting from the top-level nodes recorded
12296   // in the first phase. These top-level nodes are the nodes just above all
12297   // loads and token factors. Starting with their uses, recursively look though
12298   // all loads (just the chain uses) and token factors to find a consecutive
12299   // load.
12300   Visited.clear();
12301   Queue.clear();
12302 
12303   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12304        IE = LoadRoots.end(); I != IE; ++I) {
12305     Queue.push_back(*I);
12306 
12307     while (!Queue.empty()) {
12308       SDNode *LoadRoot = Queue.pop_back_val();
12309       if (!Visited.insert(LoadRoot).second)
12310         continue;
12311 
12312       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12313         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12314           return true;
12315 
12316       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12317            UE = LoadRoot->use_end(); UI != UE; ++UI)
12318         if (((isa<MemSDNode>(*UI) &&
12319             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12320             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12321           Queue.push_back(*UI);
12322     }
12323   }
12324 
12325   return false;
12326 }
12327 
12328 /// This function is called when we have proved that a SETCC node can be replaced
12329 /// by subtraction (and other supporting instructions) so that the result of
12330 /// comparison is kept in a GPR instead of CR. This function is purely for
12331 /// codegen purposes and has some flags to guide the codegen process.
12332 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12333                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12334   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12335 
12336   // Zero extend the operands to the largest legal integer. Originally, they
12337   // must be of a strictly smaller size.
12338   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12339                          DAG.getConstant(Size, DL, MVT::i32));
12340   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12341                          DAG.getConstant(Size, DL, MVT::i32));
12342 
12343   // Swap if needed. Depends on the condition code.
12344   if (Swap)
12345     std::swap(Op0, Op1);
12346 
12347   // Subtract extended integers.
12348   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12349 
12350   // Move the sign bit to the least significant position and zero out the rest.
12351   // Now the least significant bit carries the result of original comparison.
12352   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12353                              DAG.getConstant(Size - 1, DL, MVT::i32));
12354   auto Final = Shifted;
12355 
12356   // Complement the result if needed. Based on the condition code.
12357   if (Complement)
12358     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12359                         DAG.getConstant(1, DL, MVT::i64));
12360 
12361   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12362 }
12363 
12364 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12365                                                   DAGCombinerInfo &DCI) const {
12366   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12367 
12368   SelectionDAG &DAG = DCI.DAG;
12369   SDLoc DL(N);
12370 
12371   // Size of integers being compared has a critical role in the following
12372   // analysis, so we prefer to do this when all types are legal.
12373   if (!DCI.isAfterLegalizeDAG())
12374     return SDValue();
12375 
12376   // If all users of SETCC extend its value to a legal integer type
12377   // then we replace SETCC with a subtraction
12378   for (SDNode::use_iterator UI = N->use_begin(),
12379        UE = N->use_end(); UI != UE; ++UI) {
12380     if (UI->getOpcode() != ISD::ZERO_EXTEND)
12381       return SDValue();
12382   }
12383 
12384   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12385   auto OpSize = N->getOperand(0).getValueSizeInBits();
12386 
12387   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12388 
12389   if (OpSize < Size) {
12390     switch (CC) {
12391     default: break;
12392     case ISD::SETULT:
12393       return generateEquivalentSub(N, Size, false, false, DL, DAG);
12394     case ISD::SETULE:
12395       return generateEquivalentSub(N, Size, true, true, DL, DAG);
12396     case ISD::SETUGT:
12397       return generateEquivalentSub(N, Size, false, true, DL, DAG);
12398     case ISD::SETUGE:
12399       return generateEquivalentSub(N, Size, true, false, DL, DAG);
12400     }
12401   }
12402 
12403   return SDValue();
12404 }
12405 
12406 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12407                                                   DAGCombinerInfo &DCI) const {
12408   SelectionDAG &DAG = DCI.DAG;
12409   SDLoc dl(N);
12410 
12411   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12412   // If we're tracking CR bits, we need to be careful that we don't have:
12413   //   trunc(binary-ops(zext(x), zext(y)))
12414   // or
12415   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12416   // such that we're unnecessarily moving things into GPRs when it would be
12417   // better to keep them in CR bits.
12418 
12419   // Note that trunc here can be an actual i1 trunc, or can be the effective
12420   // truncation that comes from a setcc or select_cc.
12421   if (N->getOpcode() == ISD::TRUNCATE &&
12422       N->getValueType(0) != MVT::i1)
12423     return SDValue();
12424 
12425   if (N->getOperand(0).getValueType() != MVT::i32 &&
12426       N->getOperand(0).getValueType() != MVT::i64)
12427     return SDValue();
12428 
12429   if (N->getOpcode() == ISD::SETCC ||
12430       N->getOpcode() == ISD::SELECT_CC) {
12431     // If we're looking at a comparison, then we need to make sure that the
12432     // high bits (all except for the first) don't matter the result.
12433     ISD::CondCode CC =
12434       cast<CondCodeSDNode>(N->getOperand(
12435         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12436     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12437 
12438     if (ISD::isSignedIntSetCC(CC)) {
12439       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12440           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12441         return SDValue();
12442     } else if (ISD::isUnsignedIntSetCC(CC)) {
12443       if (!DAG.MaskedValueIsZero(N->getOperand(0),
12444                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12445           !DAG.MaskedValueIsZero(N->getOperand(1),
12446                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
12447         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12448                                              : SDValue());
12449     } else {
12450       // This is neither a signed nor an unsigned comparison, just make sure
12451       // that the high bits are equal.
12452       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12453       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12454 
12455       // We don't really care about what is known about the first bit (if
12456       // anything), so pretend that it is known zero for both to ensure they can
12457       // be compared as constants.
12458       Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0);
12459       Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0);
12460 
12461       if (!Op1Known.isConstant() || !Op2Known.isConstant() ||
12462           Op1Known.getConstant() != Op2Known.getConstant())
12463         return SDValue();
12464     }
12465   }
12466 
12467   // We now know that the higher-order bits are irrelevant, we just need to
12468   // make sure that all of the intermediate operations are bit operations, and
12469   // all inputs are extensions.
12470   if (N->getOperand(0).getOpcode() != ISD::AND &&
12471       N->getOperand(0).getOpcode() != ISD::OR  &&
12472       N->getOperand(0).getOpcode() != ISD::XOR &&
12473       N->getOperand(0).getOpcode() != ISD::SELECT &&
12474       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12475       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12476       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12477       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12478       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12479     return SDValue();
12480 
12481   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12482       N->getOperand(1).getOpcode() != ISD::AND &&
12483       N->getOperand(1).getOpcode() != ISD::OR  &&
12484       N->getOperand(1).getOpcode() != ISD::XOR &&
12485       N->getOperand(1).getOpcode() != ISD::SELECT &&
12486       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
12487       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
12488       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
12489       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
12490       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
12491     return SDValue();
12492 
12493   SmallVector<SDValue, 4> Inputs;
12494   SmallVector<SDValue, 8> BinOps, PromOps;
12495   SmallPtrSet<SDNode *, 16> Visited;
12496 
12497   for (unsigned i = 0; i < 2; ++i) {
12498     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12499           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12500           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12501           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12502         isa<ConstantSDNode>(N->getOperand(i)))
12503       Inputs.push_back(N->getOperand(i));
12504     else
12505       BinOps.push_back(N->getOperand(i));
12506 
12507     if (N->getOpcode() == ISD::TRUNCATE)
12508       break;
12509   }
12510 
12511   // Visit all inputs, collect all binary operations (and, or, xor and
12512   // select) that are all fed by extensions.
12513   while (!BinOps.empty()) {
12514     SDValue BinOp = BinOps.back();
12515     BinOps.pop_back();
12516 
12517     if (!Visited.insert(BinOp.getNode()).second)
12518       continue;
12519 
12520     PromOps.push_back(BinOp);
12521 
12522     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12523       // The condition of the select is not promoted.
12524       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12525         continue;
12526       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12527         continue;
12528 
12529       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12530             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12531             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12532            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12533           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12534         Inputs.push_back(BinOp.getOperand(i));
12535       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12536                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12537                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12538                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12539                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
12540                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12541                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12542                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12543                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
12544         BinOps.push_back(BinOp.getOperand(i));
12545       } else {
12546         // We have an input that is not an extension or another binary
12547         // operation; we'll abort this transformation.
12548         return SDValue();
12549       }
12550     }
12551   }
12552 
12553   // Make sure that this is a self-contained cluster of operations (which
12554   // is not quite the same thing as saying that everything has only one
12555   // use).
12556   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12557     if (isa<ConstantSDNode>(Inputs[i]))
12558       continue;
12559 
12560     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12561                               UE = Inputs[i].getNode()->use_end();
12562          UI != UE; ++UI) {
12563       SDNode *User = *UI;
12564       if (User != N && !Visited.count(User))
12565         return SDValue();
12566 
12567       // Make sure that we're not going to promote the non-output-value
12568       // operand(s) or SELECT or SELECT_CC.
12569       // FIXME: Although we could sometimes handle this, and it does occur in
12570       // practice that one of the condition inputs to the select is also one of
12571       // the outputs, we currently can't deal with this.
12572       if (User->getOpcode() == ISD::SELECT) {
12573         if (User->getOperand(0) == Inputs[i])
12574           return SDValue();
12575       } else if (User->getOpcode() == ISD::SELECT_CC) {
12576         if (User->getOperand(0) == Inputs[i] ||
12577             User->getOperand(1) == Inputs[i])
12578           return SDValue();
12579       }
12580     }
12581   }
12582 
12583   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12584     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12585                               UE = PromOps[i].getNode()->use_end();
12586          UI != UE; ++UI) {
12587       SDNode *User = *UI;
12588       if (User != N && !Visited.count(User))
12589         return SDValue();
12590 
12591       // Make sure that we're not going to promote the non-output-value
12592       // operand(s) or SELECT or SELECT_CC.
12593       // FIXME: Although we could sometimes handle this, and it does occur in
12594       // practice that one of the condition inputs to the select is also one of
12595       // the outputs, we currently can't deal with this.
12596       if (User->getOpcode() == ISD::SELECT) {
12597         if (User->getOperand(0) == PromOps[i])
12598           return SDValue();
12599       } else if (User->getOpcode() == ISD::SELECT_CC) {
12600         if (User->getOperand(0) == PromOps[i] ||
12601             User->getOperand(1) == PromOps[i])
12602           return SDValue();
12603       }
12604     }
12605   }
12606 
12607   // Replace all inputs with the extension operand.
12608   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12609     // Constants may have users outside the cluster of to-be-promoted nodes,
12610     // and so we need to replace those as we do the promotions.
12611     if (isa<ConstantSDNode>(Inputs[i]))
12612       continue;
12613     else
12614       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
12615   }
12616 
12617   std::list<HandleSDNode> PromOpHandles;
12618   for (auto &PromOp : PromOps)
12619     PromOpHandles.emplace_back(PromOp);
12620 
12621   // Replace all operations (these are all the same, but have a different
12622   // (i1) return type). DAG.getNode will validate that the types of
12623   // a binary operator match, so go through the list in reverse so that
12624   // we've likely promoted both operands first. Any intermediate truncations or
12625   // extensions disappear.
12626   while (!PromOpHandles.empty()) {
12627     SDValue PromOp = PromOpHandles.back().getValue();
12628     PromOpHandles.pop_back();
12629 
12630     if (PromOp.getOpcode() == ISD::TRUNCATE ||
12631         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
12632         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
12633         PromOp.getOpcode() == ISD::ANY_EXTEND) {
12634       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
12635           PromOp.getOperand(0).getValueType() != MVT::i1) {
12636         // The operand is not yet ready (see comment below).
12637         PromOpHandles.emplace_front(PromOp);
12638         continue;
12639       }
12640 
12641       SDValue RepValue = PromOp.getOperand(0);
12642       if (isa<ConstantSDNode>(RepValue))
12643         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
12644 
12645       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
12646       continue;
12647     }
12648 
12649     unsigned C;
12650     switch (PromOp.getOpcode()) {
12651     default:             C = 0; break;
12652     case ISD::SELECT:    C = 1; break;
12653     case ISD::SELECT_CC: C = 2; break;
12654     }
12655 
12656     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12657          PromOp.getOperand(C).getValueType() != MVT::i1) ||
12658         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12659          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
12660       // The to-be-promoted operands of this node have not yet been
12661       // promoted (this should be rare because we're going through the
12662       // list backward, but if one of the operands has several users in
12663       // this cluster of to-be-promoted nodes, it is possible).
12664       PromOpHandles.emplace_front(PromOp);
12665       continue;
12666     }
12667 
12668     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12669                                 PromOp.getNode()->op_end());
12670 
12671     // If there are any constant inputs, make sure they're replaced now.
12672     for (unsigned i = 0; i < 2; ++i)
12673       if (isa<ConstantSDNode>(Ops[C+i]))
12674         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
12675 
12676     DAG.ReplaceAllUsesOfValueWith(PromOp,
12677       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
12678   }
12679 
12680   // Now we're left with the initial truncation itself.
12681   if (N->getOpcode() == ISD::TRUNCATE)
12682     return N->getOperand(0);
12683 
12684   // Otherwise, this is a comparison. The operands to be compared have just
12685   // changed type (to i1), but everything else is the same.
12686   return SDValue(N, 0);
12687 }
12688 
12689 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
12690                                                   DAGCombinerInfo &DCI) const {
12691   SelectionDAG &DAG = DCI.DAG;
12692   SDLoc dl(N);
12693 
12694   // If we're tracking CR bits, we need to be careful that we don't have:
12695   //   zext(binary-ops(trunc(x), trunc(y)))
12696   // or
12697   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
12698   // such that we're unnecessarily moving things into CR bits that can more
12699   // efficiently stay in GPRs. Note that if we're not certain that the high
12700   // bits are set as required by the final extension, we still may need to do
12701   // some masking to get the proper behavior.
12702 
12703   // This same functionality is important on PPC64 when dealing with
12704   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
12705   // the return values of functions. Because it is so similar, it is handled
12706   // here as well.
12707 
12708   if (N->getValueType(0) != MVT::i32 &&
12709       N->getValueType(0) != MVT::i64)
12710     return SDValue();
12711 
12712   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
12713         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
12714     return SDValue();
12715 
12716   if (N->getOperand(0).getOpcode() != ISD::AND &&
12717       N->getOperand(0).getOpcode() != ISD::OR  &&
12718       N->getOperand(0).getOpcode() != ISD::XOR &&
12719       N->getOperand(0).getOpcode() != ISD::SELECT &&
12720       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
12721     return SDValue();
12722 
12723   SmallVector<SDValue, 4> Inputs;
12724   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
12725   SmallPtrSet<SDNode *, 16> Visited;
12726 
12727   // Visit all inputs, collect all binary operations (and, or, xor and
12728   // select) that are all fed by truncations.
12729   while (!BinOps.empty()) {
12730     SDValue BinOp = BinOps.back();
12731     BinOps.pop_back();
12732 
12733     if (!Visited.insert(BinOp.getNode()).second)
12734       continue;
12735 
12736     PromOps.push_back(BinOp);
12737 
12738     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12739       // The condition of the select is not promoted.
12740       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12741         continue;
12742       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12743         continue;
12744 
12745       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12746           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12747         Inputs.push_back(BinOp.getOperand(i));
12748       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12749                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12750                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12751                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12752                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
12753         BinOps.push_back(BinOp.getOperand(i));
12754       } else {
12755         // We have an input that is not a truncation or another binary
12756         // operation; we'll abort this transformation.
12757         return SDValue();
12758       }
12759     }
12760   }
12761 
12762   // The operands of a select that must be truncated when the select is
12763   // promoted because the operand is actually part of the to-be-promoted set.
12764   DenseMap<SDNode *, EVT> SelectTruncOp[2];
12765 
12766   // Make sure that this is a self-contained cluster of operations (which
12767   // is not quite the same thing as saying that everything has only one
12768   // use).
12769   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12770     if (isa<ConstantSDNode>(Inputs[i]))
12771       continue;
12772 
12773     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12774                               UE = Inputs[i].getNode()->use_end();
12775          UI != UE; ++UI) {
12776       SDNode *User = *UI;
12777       if (User != N && !Visited.count(User))
12778         return SDValue();
12779 
12780       // If we're going to promote the non-output-value operand(s) or SELECT or
12781       // SELECT_CC, record them for truncation.
12782       if (User->getOpcode() == ISD::SELECT) {
12783         if (User->getOperand(0) == Inputs[i])
12784           SelectTruncOp[0].insert(std::make_pair(User,
12785                                     User->getOperand(0).getValueType()));
12786       } else if (User->getOpcode() == ISD::SELECT_CC) {
12787         if (User->getOperand(0) == Inputs[i])
12788           SelectTruncOp[0].insert(std::make_pair(User,
12789                                     User->getOperand(0).getValueType()));
12790         if (User->getOperand(1) == Inputs[i])
12791           SelectTruncOp[1].insert(std::make_pair(User,
12792                                     User->getOperand(1).getValueType()));
12793       }
12794     }
12795   }
12796 
12797   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12798     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12799                               UE = PromOps[i].getNode()->use_end();
12800          UI != UE; ++UI) {
12801       SDNode *User = *UI;
12802       if (User != N && !Visited.count(User))
12803         return SDValue();
12804 
12805       // If we're going to promote the non-output-value operand(s) or SELECT or
12806       // SELECT_CC, record them for truncation.
12807       if (User->getOpcode() == ISD::SELECT) {
12808         if (User->getOperand(0) == PromOps[i])
12809           SelectTruncOp[0].insert(std::make_pair(User,
12810                                     User->getOperand(0).getValueType()));
12811       } else if (User->getOpcode() == ISD::SELECT_CC) {
12812         if (User->getOperand(0) == PromOps[i])
12813           SelectTruncOp[0].insert(std::make_pair(User,
12814                                     User->getOperand(0).getValueType()));
12815         if (User->getOperand(1) == PromOps[i])
12816           SelectTruncOp[1].insert(std::make_pair(User,
12817                                     User->getOperand(1).getValueType()));
12818       }
12819     }
12820   }
12821 
12822   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
12823   bool ReallyNeedsExt = false;
12824   if (N->getOpcode() != ISD::ANY_EXTEND) {
12825     // If all of the inputs are not already sign/zero extended, then
12826     // we'll still need to do that at the end.
12827     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12828       if (isa<ConstantSDNode>(Inputs[i]))
12829         continue;
12830 
12831       unsigned OpBits =
12832         Inputs[i].getOperand(0).getValueSizeInBits();
12833       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
12834 
12835       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
12836            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
12837                                   APInt::getHighBitsSet(OpBits,
12838                                                         OpBits-PromBits))) ||
12839           (N->getOpcode() == ISD::SIGN_EXTEND &&
12840            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
12841              (OpBits-(PromBits-1)))) {
12842         ReallyNeedsExt = true;
12843         break;
12844       }
12845     }
12846   }
12847 
12848   // Replace all inputs, either with the truncation operand, or a
12849   // truncation or extension to the final output type.
12850   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12851     // Constant inputs need to be replaced with the to-be-promoted nodes that
12852     // use them because they might have users outside of the cluster of
12853     // promoted nodes.
12854     if (isa<ConstantSDNode>(Inputs[i]))
12855       continue;
12856 
12857     SDValue InSrc = Inputs[i].getOperand(0);
12858     if (Inputs[i].getValueType() == N->getValueType(0))
12859       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
12860     else if (N->getOpcode() == ISD::SIGN_EXTEND)
12861       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12862         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
12863     else if (N->getOpcode() == ISD::ZERO_EXTEND)
12864       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12865         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
12866     else
12867       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12868         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
12869   }
12870 
12871   std::list<HandleSDNode> PromOpHandles;
12872   for (auto &PromOp : PromOps)
12873     PromOpHandles.emplace_back(PromOp);
12874 
12875   // Replace all operations (these are all the same, but have a different
12876   // (promoted) return type). DAG.getNode will validate that the types of
12877   // a binary operator match, so go through the list in reverse so that
12878   // we've likely promoted both operands first.
12879   while (!PromOpHandles.empty()) {
12880     SDValue PromOp = PromOpHandles.back().getValue();
12881     PromOpHandles.pop_back();
12882 
12883     unsigned C;
12884     switch (PromOp.getOpcode()) {
12885     default:             C = 0; break;
12886     case ISD::SELECT:    C = 1; break;
12887     case ISD::SELECT_CC: C = 2; break;
12888     }
12889 
12890     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12891          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
12892         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12893          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
12894       // The to-be-promoted operands of this node have not yet been
12895       // promoted (this should be rare because we're going through the
12896       // list backward, but if one of the operands has several users in
12897       // this cluster of to-be-promoted nodes, it is possible).
12898       PromOpHandles.emplace_front(PromOp);
12899       continue;
12900     }
12901 
12902     // For SELECT and SELECT_CC nodes, we do a similar check for any
12903     // to-be-promoted comparison inputs.
12904     if (PromOp.getOpcode() == ISD::SELECT ||
12905         PromOp.getOpcode() == ISD::SELECT_CC) {
12906       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
12907            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
12908           (SelectTruncOp[1].count(PromOp.getNode()) &&
12909            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
12910         PromOpHandles.emplace_front(PromOp);
12911         continue;
12912       }
12913     }
12914 
12915     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12916                                 PromOp.getNode()->op_end());
12917 
12918     // If this node has constant inputs, then they'll need to be promoted here.
12919     for (unsigned i = 0; i < 2; ++i) {
12920       if (!isa<ConstantSDNode>(Ops[C+i]))
12921         continue;
12922       if (Ops[C+i].getValueType() == N->getValueType(0))
12923         continue;
12924 
12925       if (N->getOpcode() == ISD::SIGN_EXTEND)
12926         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12927       else if (N->getOpcode() == ISD::ZERO_EXTEND)
12928         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12929       else
12930         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12931     }
12932 
12933     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
12934     // truncate them again to the original value type.
12935     if (PromOp.getOpcode() == ISD::SELECT ||
12936         PromOp.getOpcode() == ISD::SELECT_CC) {
12937       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
12938       if (SI0 != SelectTruncOp[0].end())
12939         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
12940       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
12941       if (SI1 != SelectTruncOp[1].end())
12942         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
12943     }
12944 
12945     DAG.ReplaceAllUsesOfValueWith(PromOp,
12946       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
12947   }
12948 
12949   // Now we're left with the initial extension itself.
12950   if (!ReallyNeedsExt)
12951     return N->getOperand(0);
12952 
12953   // To zero extend, just mask off everything except for the first bit (in the
12954   // i1 case).
12955   if (N->getOpcode() == ISD::ZERO_EXTEND)
12956     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
12957                        DAG.getConstant(APInt::getLowBitsSet(
12958                                          N->getValueSizeInBits(0), PromBits),
12959                                        dl, N->getValueType(0)));
12960 
12961   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
12962          "Invalid extension type");
12963   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
12964   SDValue ShiftCst =
12965       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
12966   return DAG.getNode(
12967       ISD::SRA, dl, N->getValueType(0),
12968       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
12969       ShiftCst);
12970 }
12971 
12972 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
12973                                         DAGCombinerInfo &DCI) const {
12974   assert(N->getOpcode() == ISD::SETCC &&
12975          "Should be called with a SETCC node");
12976 
12977   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12978   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
12979     SDValue LHS = N->getOperand(0);
12980     SDValue RHS = N->getOperand(1);
12981 
12982     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
12983     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
12984         LHS.hasOneUse())
12985       std::swap(LHS, RHS);
12986 
12987     // x == 0-y --> x+y == 0
12988     // x != 0-y --> x+y != 0
12989     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
12990         RHS.hasOneUse()) {
12991       SDLoc DL(N);
12992       SelectionDAG &DAG = DCI.DAG;
12993       EVT VT = N->getValueType(0);
12994       EVT OpVT = LHS.getValueType();
12995       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
12996       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
12997     }
12998   }
12999 
13000   return DAGCombineTruncBoolExt(N, DCI);
13001 }
13002 
13003 // Is this an extending load from an f32 to an f64?
13004 static bool isFPExtLoad(SDValue Op) {
13005   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13006     return LD->getExtensionType() == ISD::EXTLOAD &&
13007       Op.getValueType() == MVT::f64;
13008   return false;
13009 }
13010 
13011 /// Reduces the number of fp-to-int conversion when building a vector.
13012 ///
13013 /// If this vector is built out of floating to integer conversions,
13014 /// transform it to a vector built out of floating point values followed by a
13015 /// single floating to integer conversion of the vector.
13016 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13017 /// becomes (fptosi (build_vector ($A, $B, ...)))
13018 SDValue PPCTargetLowering::
13019 combineElementTruncationToVectorTruncation(SDNode *N,
13020                                            DAGCombinerInfo &DCI) const {
13021   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13022          "Should be called with a BUILD_VECTOR node");
13023 
13024   SelectionDAG &DAG = DCI.DAG;
13025   SDLoc dl(N);
13026 
13027   SDValue FirstInput = N->getOperand(0);
13028   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13029          "The input operand must be an fp-to-int conversion.");
13030 
13031   // This combine happens after legalization so the fp_to_[su]i nodes are
13032   // already converted to PPCSISD nodes.
13033   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13034   if (FirstConversion == PPCISD::FCTIDZ ||
13035       FirstConversion == PPCISD::FCTIDUZ ||
13036       FirstConversion == PPCISD::FCTIWZ ||
13037       FirstConversion == PPCISD::FCTIWUZ) {
13038     bool IsSplat = true;
13039     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13040       FirstConversion == PPCISD::FCTIWUZ;
13041     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13042     SmallVector<SDValue, 4> Ops;
13043     EVT TargetVT = N->getValueType(0);
13044     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13045       SDValue NextOp = N->getOperand(i);
13046       if (NextOp.getOpcode() != PPCISD::MFVSR)
13047         return SDValue();
13048       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13049       if (NextConversion != FirstConversion)
13050         return SDValue();
13051       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13052       // This is not valid if the input was originally double precision. It is
13053       // also not profitable to do unless this is an extending load in which
13054       // case doing this combine will allow us to combine consecutive loads.
13055       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13056         return SDValue();
13057       if (N->getOperand(i) != FirstInput)
13058         IsSplat = false;
13059     }
13060 
13061     // If this is a splat, we leave it as-is since there will be only a single
13062     // fp-to-int conversion followed by a splat of the integer. This is better
13063     // for 32-bit and smaller ints and neutral for 64-bit ints.
13064     if (IsSplat)
13065       return SDValue();
13066 
13067     // Now that we know we have the right type of node, get its operands
13068     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13069       SDValue In = N->getOperand(i).getOperand(0);
13070       if (Is32Bit) {
13071         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13072         // here, we know that all inputs are extending loads so this is safe).
13073         if (In.isUndef())
13074           Ops.push_back(DAG.getUNDEF(SrcVT));
13075         else {
13076           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13077                                       MVT::f32, In.getOperand(0),
13078                                       DAG.getIntPtrConstant(1, dl));
13079           Ops.push_back(Trunc);
13080         }
13081       } else
13082         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13083     }
13084 
13085     unsigned Opcode;
13086     if (FirstConversion == PPCISD::FCTIDZ ||
13087         FirstConversion == PPCISD::FCTIWZ)
13088       Opcode = ISD::FP_TO_SINT;
13089     else
13090       Opcode = ISD::FP_TO_UINT;
13091 
13092     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13093     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13094     return DAG.getNode(Opcode, dl, TargetVT, BV);
13095   }
13096   return SDValue();
13097 }
13098 
13099 /// Reduce the number of loads when building a vector.
13100 ///
13101 /// Building a vector out of multiple loads can be converted to a load
13102 /// of the vector type if the loads are consecutive. If the loads are
13103 /// consecutive but in descending order, a shuffle is added at the end
13104 /// to reorder the vector.
13105 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13106   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13107          "Should be called with a BUILD_VECTOR node");
13108 
13109   SDLoc dl(N);
13110 
13111   // Return early for non byte-sized type, as they can't be consecutive.
13112   if (!N->getValueType(0).getVectorElementType().isByteSized())
13113     return SDValue();
13114 
13115   bool InputsAreConsecutiveLoads = true;
13116   bool InputsAreReverseConsecutive = true;
13117   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13118   SDValue FirstInput = N->getOperand(0);
13119   bool IsRoundOfExtLoad = false;
13120 
13121   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13122       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13123     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13124     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13125   }
13126   // Not a build vector of (possibly fp_rounded) loads.
13127   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13128       N->getNumOperands() == 1)
13129     return SDValue();
13130 
13131   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13132     // If any inputs are fp_round(extload), they all must be.
13133     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13134       return SDValue();
13135 
13136     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13137       N->getOperand(i);
13138     if (NextInput.getOpcode() != ISD::LOAD)
13139       return SDValue();
13140 
13141     SDValue PreviousInput =
13142       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13143     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13144     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13145 
13146     // If any inputs are fp_round(extload), they all must be.
13147     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13148       return SDValue();
13149 
13150     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13151       InputsAreConsecutiveLoads = false;
13152     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13153       InputsAreReverseConsecutive = false;
13154 
13155     // Exit early if the loads are neither consecutive nor reverse consecutive.
13156     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13157       return SDValue();
13158   }
13159 
13160   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13161          "The loads cannot be both consecutive and reverse consecutive.");
13162 
13163   SDValue FirstLoadOp =
13164     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13165   SDValue LastLoadOp =
13166     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13167                        N->getOperand(N->getNumOperands()-1);
13168 
13169   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13170   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13171   if (InputsAreConsecutiveLoads) {
13172     assert(LD1 && "Input needs to be a LoadSDNode.");
13173     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13174                        LD1->getBasePtr(), LD1->getPointerInfo(),
13175                        LD1->getAlignment());
13176   }
13177   if (InputsAreReverseConsecutive) {
13178     assert(LDL && "Input needs to be a LoadSDNode.");
13179     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13180                                LDL->getBasePtr(), LDL->getPointerInfo(),
13181                                LDL->getAlignment());
13182     SmallVector<int, 16> Ops;
13183     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13184       Ops.push_back(i);
13185 
13186     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13187                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13188   }
13189   return SDValue();
13190 }
13191 
13192 // This function adds the required vector_shuffle needed to get
13193 // the elements of the vector extract in the correct position
13194 // as specified by the CorrectElems encoding.
13195 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13196                                       SDValue Input, uint64_t Elems,
13197                                       uint64_t CorrectElems) {
13198   SDLoc dl(N);
13199 
13200   unsigned NumElems = Input.getValueType().getVectorNumElements();
13201   SmallVector<int, 16> ShuffleMask(NumElems, -1);
13202 
13203   // Knowing the element indices being extracted from the original
13204   // vector and the order in which they're being inserted, just put
13205   // them at element indices required for the instruction.
13206   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13207     if (DAG.getDataLayout().isLittleEndian())
13208       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13209     else
13210       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13211     CorrectElems = CorrectElems >> 8;
13212     Elems = Elems >> 8;
13213   }
13214 
13215   SDValue Shuffle =
13216       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13217                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13218 
13219   EVT VT = N->getValueType(0);
13220   SDValue Conv = DAG.getBitcast(VT, Shuffle);
13221 
13222   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13223                                Input.getValueType().getVectorElementType(),
13224                                VT.getVectorNumElements());
13225   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13226                      DAG.getValueType(ExtVT));
13227 }
13228 
13229 // Look for build vector patterns where input operands come from sign
13230 // extended vector_extract elements of specific indices. If the correct indices
13231 // aren't used, add a vector shuffle to fix up the indices and create
13232 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13233 // during instruction selection.
13234 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13235   // This array encodes the indices that the vector sign extend instructions
13236   // extract from when extending from one type to another for both BE and LE.
13237   // The right nibble of each byte corresponds to the LE incides.
13238   // and the left nibble of each byte corresponds to the BE incides.
13239   // For example: 0x3074B8FC  byte->word
13240   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13241   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13242   // For example: 0x000070F8  byte->double word
13243   // For LE: the allowed indices are: 0x0,0x8
13244   // For BE: the allowed indices are: 0x7,0xF
13245   uint64_t TargetElems[] = {
13246       0x3074B8FC, // b->w
13247       0x000070F8, // b->d
13248       0x10325476, // h->w
13249       0x00003074, // h->d
13250       0x00001032, // w->d
13251   };
13252 
13253   uint64_t Elems = 0;
13254   int Index;
13255   SDValue Input;
13256 
13257   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13258     if (!Op)
13259       return false;
13260     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13261         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13262       return false;
13263 
13264     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13265     // of the right width.
13266     SDValue Extract = Op.getOperand(0);
13267     if (Extract.getOpcode() == ISD::ANY_EXTEND)
13268       Extract = Extract.getOperand(0);
13269     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13270       return false;
13271 
13272     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13273     if (!ExtOp)
13274       return false;
13275 
13276     Index = ExtOp->getZExtValue();
13277     if (Input && Input != Extract.getOperand(0))
13278       return false;
13279 
13280     if (!Input)
13281       Input = Extract.getOperand(0);
13282 
13283     Elems = Elems << 8;
13284     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13285     Elems |= Index;
13286 
13287     return true;
13288   };
13289 
13290   // If the build vector operands aren't sign extended vector extracts,
13291   // of the same input vector, then return.
13292   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13293     if (!isSExtOfVecExtract(N->getOperand(i))) {
13294       return SDValue();
13295     }
13296   }
13297 
13298   // If the vector extract indicies are not correct, add the appropriate
13299   // vector_shuffle.
13300   int TgtElemArrayIdx;
13301   int InputSize = Input.getValueType().getScalarSizeInBits();
13302   int OutputSize = N->getValueType(0).getScalarSizeInBits();
13303   if (InputSize + OutputSize == 40)
13304     TgtElemArrayIdx = 0;
13305   else if (InputSize + OutputSize == 72)
13306     TgtElemArrayIdx = 1;
13307   else if (InputSize + OutputSize == 48)
13308     TgtElemArrayIdx = 2;
13309   else if (InputSize + OutputSize == 80)
13310     TgtElemArrayIdx = 3;
13311   else if (InputSize + OutputSize == 96)
13312     TgtElemArrayIdx = 4;
13313   else
13314     return SDValue();
13315 
13316   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13317   CorrectElems = DAG.getDataLayout().isLittleEndian()
13318                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13319                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13320   if (Elems != CorrectElems) {
13321     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13322   }
13323 
13324   // Regular lowering will catch cases where a shuffle is not needed.
13325   return SDValue();
13326 }
13327 
13328 // Look for the pattern of a load from a narrow width to i128, feeding
13329 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
13330 // (LXVRZX). This node represents a zero extending load that will be matched
13331 // to the Load VSX Vector Rightmost instructions.
13332 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
13333   SDLoc DL(N);
13334 
13335   // This combine is only eligible for a BUILD_VECTOR of v1i128.
13336   if (N->getValueType(0) != MVT::v1i128)
13337     return SDValue();
13338 
13339   SDValue Operand = N->getOperand(0);
13340   // Proceed with the transformation if the operand to the BUILD_VECTOR
13341   // is a load instruction.
13342   if (Operand.getOpcode() != ISD::LOAD)
13343     return SDValue();
13344 
13345   LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand);
13346   EVT MemoryType = LD->getMemoryVT();
13347 
13348   // This transformation is only valid if the we are loading either a byte,
13349   // halfword, word, or doubleword.
13350   bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
13351                      MemoryType == MVT::i32 || MemoryType == MVT::i64;
13352 
13353   // Ensure that the load from the narrow width is being zero extended to i128.
13354   if (!ValidLDType ||
13355       (LD->getExtensionType() != ISD::ZEXTLOAD &&
13356        LD->getExtensionType() != ISD::EXTLOAD))
13357     return SDValue();
13358 
13359   SDValue LoadOps[] = {
13360       LD->getChain(), LD->getBasePtr(),
13361       DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
13362 
13363   return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
13364                                  DAG.getVTList(MVT::v1i128, MVT::Other),
13365                                  LoadOps, MemoryType, LD->getMemOperand());
13366 }
13367 
13368 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13369                                                  DAGCombinerInfo &DCI) const {
13370   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13371          "Should be called with a BUILD_VECTOR node");
13372 
13373   SelectionDAG &DAG = DCI.DAG;
13374   SDLoc dl(N);
13375 
13376   if (!Subtarget.hasVSX())
13377     return SDValue();
13378 
13379   // The target independent DAG combiner will leave a build_vector of
13380   // float-to-int conversions intact. We can generate MUCH better code for
13381   // a float-to-int conversion of a vector of floats.
13382   SDValue FirstInput = N->getOperand(0);
13383   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13384     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13385     if (Reduced)
13386       return Reduced;
13387   }
13388 
13389   // If we're building a vector out of consecutive loads, just load that
13390   // vector type.
13391   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13392   if (Reduced)
13393     return Reduced;
13394 
13395   // If we're building a vector out of extended elements from another vector
13396   // we have P9 vector integer extend instructions. The code assumes legal
13397   // input types (i.e. it can't handle things like v4i16) so do not run before
13398   // legalization.
13399   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13400     Reduced = combineBVOfVecSExt(N, DAG);
13401     if (Reduced)
13402       return Reduced;
13403   }
13404 
13405   // On Power10, the Load VSX Vector Rightmost instructions can be utilized
13406   // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
13407   // is a load from <valid narrow width> to i128.
13408   if (Subtarget.isISA3_1()) {
13409     SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
13410     if (BVOfZLoad)
13411       return BVOfZLoad;
13412   }
13413 
13414   if (N->getValueType(0) != MVT::v2f64)
13415     return SDValue();
13416 
13417   // Looking for:
13418   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13419   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13420       FirstInput.getOpcode() != ISD::UINT_TO_FP)
13421     return SDValue();
13422   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13423       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13424     return SDValue();
13425   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13426     return SDValue();
13427 
13428   SDValue Ext1 = FirstInput.getOperand(0);
13429   SDValue Ext2 = N->getOperand(1).getOperand(0);
13430   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13431      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13432     return SDValue();
13433 
13434   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13435   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13436   if (!Ext1Op || !Ext2Op)
13437     return SDValue();
13438   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13439       Ext1.getOperand(0) != Ext2.getOperand(0))
13440     return SDValue();
13441 
13442   int FirstElem = Ext1Op->getZExtValue();
13443   int SecondElem = Ext2Op->getZExtValue();
13444   int SubvecIdx;
13445   if (FirstElem == 0 && SecondElem == 1)
13446     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13447   else if (FirstElem == 2 && SecondElem == 3)
13448     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13449   else
13450     return SDValue();
13451 
13452   SDValue SrcVec = Ext1.getOperand(0);
13453   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13454     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13455   return DAG.getNode(NodeType, dl, MVT::v2f64,
13456                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13457 }
13458 
13459 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13460                                               DAGCombinerInfo &DCI) const {
13461   assert((N->getOpcode() == ISD::SINT_TO_FP ||
13462           N->getOpcode() == ISD::UINT_TO_FP) &&
13463          "Need an int -> FP conversion node here");
13464 
13465   if (useSoftFloat() || !Subtarget.has64BitSupport())
13466     return SDValue();
13467 
13468   SelectionDAG &DAG = DCI.DAG;
13469   SDLoc dl(N);
13470   SDValue Op(N, 0);
13471 
13472   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13473   // from the hardware.
13474   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13475     return SDValue();
13476   if (!Op.getOperand(0).getValueType().isSimple())
13477     return SDValue();
13478   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13479       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13480     return SDValue();
13481 
13482   SDValue FirstOperand(Op.getOperand(0));
13483   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13484     (FirstOperand.getValueType() == MVT::i8 ||
13485      FirstOperand.getValueType() == MVT::i16);
13486   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
13487     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
13488     bool DstDouble = Op.getValueType() == MVT::f64;
13489     unsigned ConvOp = Signed ?
13490       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
13491       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
13492     SDValue WidthConst =
13493       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
13494                             dl, false);
13495     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
13496     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
13497     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
13498                                          DAG.getVTList(MVT::f64, MVT::Other),
13499                                          Ops, MVT::i8, LDN->getMemOperand());
13500 
13501     // For signed conversion, we need to sign-extend the value in the VSR
13502     if (Signed) {
13503       SDValue ExtOps[] = { Ld, WidthConst };
13504       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
13505       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
13506     } else
13507       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
13508   }
13509 
13510 
13511   // For i32 intermediate values, unfortunately, the conversion functions
13512   // leave the upper 32 bits of the value are undefined. Within the set of
13513   // scalar instructions, we have no method for zero- or sign-extending the
13514   // value. Thus, we cannot handle i32 intermediate values here.
13515   if (Op.getOperand(0).getValueType() == MVT::i32)
13516     return SDValue();
13517 
13518   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
13519          "UINT_TO_FP is supported only with FPCVT");
13520 
13521   // If we have FCFIDS, then use it when converting to single-precision.
13522   // Otherwise, convert to double-precision and then round.
13523   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13524                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
13525                                                             : PPCISD::FCFIDS)
13526                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
13527                                                             : PPCISD::FCFID);
13528   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13529                   ? MVT::f32
13530                   : MVT::f64;
13531 
13532   // If we're converting from a float, to an int, and back to a float again,
13533   // then we don't need the store/load pair at all.
13534   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
13535        Subtarget.hasFPCVT()) ||
13536       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
13537     SDValue Src = Op.getOperand(0).getOperand(0);
13538     if (Src.getValueType() == MVT::f32) {
13539       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
13540       DCI.AddToWorklist(Src.getNode());
13541     } else if (Src.getValueType() != MVT::f64) {
13542       // Make sure that we don't pick up a ppc_fp128 source value.
13543       return SDValue();
13544     }
13545 
13546     unsigned FCTOp =
13547       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
13548                                                         PPCISD::FCTIDUZ;
13549 
13550     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
13551     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
13552 
13553     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
13554       FP = DAG.getNode(ISD::FP_ROUND, dl,
13555                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
13556       DCI.AddToWorklist(FP.getNode());
13557     }
13558 
13559     return FP;
13560   }
13561 
13562   return SDValue();
13563 }
13564 
13565 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
13566 // builtins) into loads with swaps.
13567 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
13568                                               DAGCombinerInfo &DCI) const {
13569   SelectionDAG &DAG = DCI.DAG;
13570   SDLoc dl(N);
13571   SDValue Chain;
13572   SDValue Base;
13573   MachineMemOperand *MMO;
13574 
13575   switch (N->getOpcode()) {
13576   default:
13577     llvm_unreachable("Unexpected opcode for little endian VSX load");
13578   case ISD::LOAD: {
13579     LoadSDNode *LD = cast<LoadSDNode>(N);
13580     Chain = LD->getChain();
13581     Base = LD->getBasePtr();
13582     MMO = LD->getMemOperand();
13583     // If the MMO suggests this isn't a load of a full vector, leave
13584     // things alone.  For a built-in, we have to make the change for
13585     // correctness, so if there is a size problem that will be a bug.
13586     if (MMO->getSize() < 16)
13587       return SDValue();
13588     break;
13589   }
13590   case ISD::INTRINSIC_W_CHAIN: {
13591     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13592     Chain = Intrin->getChain();
13593     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
13594     // us what we want. Get operand 2 instead.
13595     Base = Intrin->getOperand(2);
13596     MMO = Intrin->getMemOperand();
13597     break;
13598   }
13599   }
13600 
13601   MVT VecTy = N->getValueType(0).getSimpleVT();
13602 
13603   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
13604   // aligned and the type is a vector with elements up to 4 bytes
13605   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13606       VecTy.getScalarSizeInBits() <= 32) {
13607     return SDValue();
13608   }
13609 
13610   SDValue LoadOps[] = { Chain, Base };
13611   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
13612                                          DAG.getVTList(MVT::v2f64, MVT::Other),
13613                                          LoadOps, MVT::v2f64, MMO);
13614 
13615   DCI.AddToWorklist(Load.getNode());
13616   Chain = Load.getValue(1);
13617   SDValue Swap = DAG.getNode(
13618       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
13619   DCI.AddToWorklist(Swap.getNode());
13620 
13621   // Add a bitcast if the resulting load type doesn't match v2f64.
13622   if (VecTy != MVT::v2f64) {
13623     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
13624     DCI.AddToWorklist(N.getNode());
13625     // Package {bitcast value, swap's chain} to match Load's shape.
13626     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
13627                        N, Swap.getValue(1));
13628   }
13629 
13630   return Swap;
13631 }
13632 
13633 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
13634 // builtins) into stores with swaps.
13635 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
13636                                                DAGCombinerInfo &DCI) const {
13637   SelectionDAG &DAG = DCI.DAG;
13638   SDLoc dl(N);
13639   SDValue Chain;
13640   SDValue Base;
13641   unsigned SrcOpnd;
13642   MachineMemOperand *MMO;
13643 
13644   switch (N->getOpcode()) {
13645   default:
13646     llvm_unreachable("Unexpected opcode for little endian VSX store");
13647   case ISD::STORE: {
13648     StoreSDNode *ST = cast<StoreSDNode>(N);
13649     Chain = ST->getChain();
13650     Base = ST->getBasePtr();
13651     MMO = ST->getMemOperand();
13652     SrcOpnd = 1;
13653     // If the MMO suggests this isn't a store of a full vector, leave
13654     // things alone.  For a built-in, we have to make the change for
13655     // correctness, so if there is a size problem that will be a bug.
13656     if (MMO->getSize() < 16)
13657       return SDValue();
13658     break;
13659   }
13660   case ISD::INTRINSIC_VOID: {
13661     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13662     Chain = Intrin->getChain();
13663     // Intrin->getBasePtr() oddly does not get what we want.
13664     Base = Intrin->getOperand(3);
13665     MMO = Intrin->getMemOperand();
13666     SrcOpnd = 2;
13667     break;
13668   }
13669   }
13670 
13671   SDValue Src = N->getOperand(SrcOpnd);
13672   MVT VecTy = Src.getValueType().getSimpleVT();
13673 
13674   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
13675   // aligned and the type is a vector with elements up to 4 bytes
13676   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13677       VecTy.getScalarSizeInBits() <= 32) {
13678     return SDValue();
13679   }
13680 
13681   // All stores are done as v2f64 and possible bit cast.
13682   if (VecTy != MVT::v2f64) {
13683     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
13684     DCI.AddToWorklist(Src.getNode());
13685   }
13686 
13687   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
13688                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
13689   DCI.AddToWorklist(Swap.getNode());
13690   Chain = Swap.getValue(1);
13691   SDValue StoreOps[] = { Chain, Swap, Base };
13692   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
13693                                           DAG.getVTList(MVT::Other),
13694                                           StoreOps, VecTy, MMO);
13695   DCI.AddToWorklist(Store.getNode());
13696   return Store;
13697 }
13698 
13699 // Handle DAG combine for STORE (FP_TO_INT F).
13700 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
13701                                                DAGCombinerInfo &DCI) const {
13702 
13703   SelectionDAG &DAG = DCI.DAG;
13704   SDLoc dl(N);
13705   unsigned Opcode = N->getOperand(1).getOpcode();
13706 
13707   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
13708          && "Not a FP_TO_INT Instruction!");
13709 
13710   SDValue Val = N->getOperand(1).getOperand(0);
13711   EVT Op1VT = N->getOperand(1).getValueType();
13712   EVT ResVT = Val.getValueType();
13713 
13714   if (!isTypeLegal(ResVT))
13715     return SDValue();
13716 
13717   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
13718   bool ValidTypeForStoreFltAsInt =
13719         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
13720          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
13721 
13722   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
13723       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
13724     return SDValue();
13725 
13726   // Extend f32 values to f64
13727   if (ResVT.getScalarSizeInBits() == 32) {
13728     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
13729     DCI.AddToWorklist(Val.getNode());
13730   }
13731 
13732   // Set signed or unsigned conversion opcode.
13733   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
13734                           PPCISD::FP_TO_SINT_IN_VSR :
13735                           PPCISD::FP_TO_UINT_IN_VSR;
13736 
13737   Val = DAG.getNode(ConvOpcode,
13738                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
13739   DCI.AddToWorklist(Val.getNode());
13740 
13741   // Set number of bytes being converted.
13742   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
13743   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
13744                     DAG.getIntPtrConstant(ByteSize, dl, false),
13745                     DAG.getValueType(Op1VT) };
13746 
13747   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
13748           DAG.getVTList(MVT::Other), Ops,
13749           cast<StoreSDNode>(N)->getMemoryVT(),
13750           cast<StoreSDNode>(N)->getMemOperand());
13751 
13752   DCI.AddToWorklist(Val.getNode());
13753   return Val;
13754 }
13755 
13756 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
13757   // Check that the source of the element keeps flipping
13758   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
13759   bool PrevElemFromFirstVec = Mask[0] < NumElts;
13760   for (int i = 1, e = Mask.size(); i < e; i++) {
13761     if (PrevElemFromFirstVec && Mask[i] < NumElts)
13762       return false;
13763     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
13764       return false;
13765     PrevElemFromFirstVec = !PrevElemFromFirstVec;
13766   }
13767   return true;
13768 }
13769 
13770 static bool isSplatBV(SDValue Op) {
13771   if (Op.getOpcode() != ISD::BUILD_VECTOR)
13772     return false;
13773   SDValue FirstOp;
13774 
13775   // Find first non-undef input.
13776   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
13777     FirstOp = Op.getOperand(i);
13778     if (!FirstOp.isUndef())
13779       break;
13780   }
13781 
13782   // All inputs are undef or the same as the first non-undef input.
13783   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
13784     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
13785       return false;
13786   return true;
13787 }
13788 
13789 static SDValue isScalarToVec(SDValue Op) {
13790   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
13791     return Op;
13792   if (Op.getOpcode() != ISD::BITCAST)
13793     return SDValue();
13794   Op = Op.getOperand(0);
13795   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
13796     return Op;
13797   return SDValue();
13798 }
13799 
13800 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
13801                                             int LHSMaxIdx, int RHSMinIdx,
13802                                             int RHSMaxIdx, int HalfVec) {
13803   for (int i = 0, e = ShuffV.size(); i < e; i++) {
13804     int Idx = ShuffV[i];
13805     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
13806       ShuffV[i] += HalfVec;
13807   }
13808 }
13809 
13810 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
13811 // the original is:
13812 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
13813 // In such a case, just change the shuffle mask to extract the element
13814 // from the permuted index.
13815 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
13816   SDLoc dl(OrigSToV);
13817   EVT VT = OrigSToV.getValueType();
13818   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
13819          "Expecting a SCALAR_TO_VECTOR here");
13820   SDValue Input = OrigSToV.getOperand(0);
13821 
13822   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
13823     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
13824     SDValue OrigVector = Input.getOperand(0);
13825 
13826     // Can't handle non-const element indices or different vector types
13827     // for the input to the extract and the output of the scalar_to_vector.
13828     if (Idx && VT == OrigVector.getValueType()) {
13829       SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
13830       NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
13831       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
13832     }
13833   }
13834   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
13835                      OrigSToV.getOperand(0));
13836 }
13837 
13838 // On little endian subtargets, combine shuffles such as:
13839 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
13840 // into:
13841 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
13842 // because the latter can be matched to a single instruction merge.
13843 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
13844 // to put the value into element zero. Adjust the shuffle mask so that the
13845 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
13846 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
13847                                                 SelectionDAG &DAG) const {
13848   SDValue LHS = SVN->getOperand(0);
13849   SDValue RHS = SVN->getOperand(1);
13850   auto Mask = SVN->getMask();
13851   int NumElts = LHS.getValueType().getVectorNumElements();
13852   SDValue Res(SVN, 0);
13853   SDLoc dl(SVN);
13854 
13855   // None of these combines are useful on big endian systems since the ISA
13856   // already has a big endian bias.
13857   if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
13858     return Res;
13859 
13860   // If this is not a shuffle of a shuffle and the first element comes from
13861   // the second vector, canonicalize to the commuted form. This will make it
13862   // more likely to match one of the single instruction patterns.
13863   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
13864       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
13865     std::swap(LHS, RHS);
13866     Res = DAG.getCommutedVectorShuffle(*SVN);
13867     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
13868   }
13869 
13870   // Adjust the shuffle mask if either input vector comes from a
13871   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
13872   // form (to prevent the need for a swap).
13873   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
13874   SDValue SToVLHS = isScalarToVec(LHS);
13875   SDValue SToVRHS = isScalarToVec(RHS);
13876   if (SToVLHS || SToVRHS) {
13877     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
13878                             : SToVRHS.getValueType().getVectorNumElements();
13879     int NumEltsOut = ShuffV.size();
13880 
13881     // Initially assume that neither input is permuted. These will be adjusted
13882     // accordingly if either input is.
13883     int LHSMaxIdx = -1;
13884     int RHSMinIdx = -1;
13885     int RHSMaxIdx = -1;
13886     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
13887 
13888     // Get the permuted scalar to vector nodes for the source(s) that come from
13889     // ISD::SCALAR_TO_VECTOR.
13890     if (SToVLHS) {
13891       // Set up the values for the shuffle vector fixup.
13892       LHSMaxIdx = NumEltsOut / NumEltsIn;
13893       SToVLHS = getSToVPermuted(SToVLHS, DAG);
13894       if (SToVLHS.getValueType() != LHS.getValueType())
13895         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
13896       LHS = SToVLHS;
13897     }
13898     if (SToVRHS) {
13899       RHSMinIdx = NumEltsOut;
13900       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
13901       SToVRHS = getSToVPermuted(SToVRHS, DAG);
13902       if (SToVRHS.getValueType() != RHS.getValueType())
13903         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
13904       RHS = SToVRHS;
13905     }
13906 
13907     // Fix up the shuffle mask to reflect where the desired element actually is.
13908     // The minimum and maximum indices that correspond to element zero for both
13909     // the LHS and RHS are computed and will control which shuffle mask entries
13910     // are to be changed. For example, if the RHS is permuted, any shuffle mask
13911     // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
13912     // HalfVec to refer to the corresponding element in the permuted vector.
13913     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
13914                                     HalfVec);
13915     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
13916 
13917     // We may have simplified away the shuffle. We won't be able to do anything
13918     // further with it here.
13919     if (!isa<ShuffleVectorSDNode>(Res))
13920       return Res;
13921     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
13922   }
13923 
13924   // The common case after we commuted the shuffle is that the RHS is a splat
13925   // and we have elements coming in from the splat at indices that are not
13926   // conducive to using a merge.
13927   // Example:
13928   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
13929   if (!isSplatBV(RHS))
13930     return Res;
13931 
13932   // We are looking for a mask such that all even elements are from
13933   // one vector and all odd elements from the other.
13934   if (!isAlternatingShuffMask(Mask, NumElts))
13935     return Res;
13936 
13937   // Adjust the mask so we are pulling in the same index from the splat
13938   // as the index from the interesting vector in consecutive elements.
13939   // Example (even elements from first vector):
13940   // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
13941   if (Mask[0] < NumElts)
13942     for (int i = 1, e = Mask.size(); i < e; i += 2)
13943       ShuffV[i] = (ShuffV[i - 1] + NumElts);
13944   // Example (odd elements from first vector):
13945   // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
13946   else
13947     for (int i = 0, e = Mask.size(); i < e; i += 2)
13948       ShuffV[i] = (ShuffV[i + 1] + NumElts);
13949 
13950   // If the RHS has undefs, we need to remove them since we may have created
13951   // a shuffle that adds those instead of the splat value.
13952   SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
13953   RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
13954 
13955   Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
13956   return Res;
13957 }
13958 
13959 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
13960                                                 LSBaseSDNode *LSBase,
13961                                                 DAGCombinerInfo &DCI) const {
13962   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
13963         "Not a reverse memop pattern!");
13964 
13965   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
13966     auto Mask = SVN->getMask();
13967     int i = 0;
13968     auto I = Mask.rbegin();
13969     auto E = Mask.rend();
13970 
13971     for (; I != E; ++I) {
13972       if (*I != i)
13973         return false;
13974       i++;
13975     }
13976     return true;
13977   };
13978 
13979   SelectionDAG &DAG = DCI.DAG;
13980   EVT VT = SVN->getValueType(0);
13981 
13982   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
13983     return SDValue();
13984 
13985   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
13986   // See comment in PPCVSXSwapRemoval.cpp.
13987   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
13988   if (!Subtarget.hasP9Vector())
13989     return SDValue();
13990 
13991   if(!IsElementReverse(SVN))
13992     return SDValue();
13993 
13994   if (LSBase->getOpcode() == ISD::LOAD) {
13995     SDLoc dl(SVN);
13996     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
13997     return DAG.getMemIntrinsicNode(
13998         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
13999         LSBase->getMemoryVT(), LSBase->getMemOperand());
14000   }
14001 
14002   if (LSBase->getOpcode() == ISD::STORE) {
14003     SDLoc dl(LSBase);
14004     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14005                           LSBase->getBasePtr()};
14006     return DAG.getMemIntrinsicNode(
14007         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14008         LSBase->getMemoryVT(), LSBase->getMemOperand());
14009   }
14010 
14011   llvm_unreachable("Expected a load or store node here");
14012 }
14013 
14014 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14015                                              DAGCombinerInfo &DCI) const {
14016   SelectionDAG &DAG = DCI.DAG;
14017   SDLoc dl(N);
14018   switch (N->getOpcode()) {
14019   default: break;
14020   case ISD::ADD:
14021     return combineADD(N, DCI);
14022   case ISD::SHL:
14023     return combineSHL(N, DCI);
14024   case ISD::SRA:
14025     return combineSRA(N, DCI);
14026   case ISD::SRL:
14027     return combineSRL(N, DCI);
14028   case ISD::MUL:
14029     return combineMUL(N, DCI);
14030   case ISD::FMA:
14031   case PPCISD::FNMSUB:
14032     return combineFMALike(N, DCI);
14033   case PPCISD::SHL:
14034     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14035         return N->getOperand(0);
14036     break;
14037   case PPCISD::SRL:
14038     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14039         return N->getOperand(0);
14040     break;
14041   case PPCISD::SRA:
14042     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14043       if (C->isNullValue() ||   //  0 >>s V -> 0.
14044           C->isAllOnesValue())    // -1 >>s V -> -1.
14045         return N->getOperand(0);
14046     }
14047     break;
14048   case ISD::SIGN_EXTEND:
14049   case ISD::ZERO_EXTEND:
14050   case ISD::ANY_EXTEND:
14051     return DAGCombineExtBoolTrunc(N, DCI);
14052   case ISD::TRUNCATE:
14053     return combineTRUNCATE(N, DCI);
14054   case ISD::SETCC:
14055     if (SDValue CSCC = combineSetCC(N, DCI))
14056       return CSCC;
14057     LLVM_FALLTHROUGH;
14058   case ISD::SELECT_CC:
14059     return DAGCombineTruncBoolExt(N, DCI);
14060   case ISD::SINT_TO_FP:
14061   case ISD::UINT_TO_FP:
14062     return combineFPToIntToFP(N, DCI);
14063   case ISD::VECTOR_SHUFFLE:
14064     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14065       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14066       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14067     }
14068     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14069   case ISD::STORE: {
14070 
14071     EVT Op1VT = N->getOperand(1).getValueType();
14072     unsigned Opcode = N->getOperand(1).getOpcode();
14073 
14074     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14075       SDValue Val= combineStoreFPToInt(N, DCI);
14076       if (Val)
14077         return Val;
14078     }
14079 
14080     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14081       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14082       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14083       if (Val)
14084         return Val;
14085     }
14086 
14087     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14088     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14089         N->getOperand(1).getNode()->hasOneUse() &&
14090         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14091          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14092 
14093       // STBRX can only handle simple types and it makes no sense to store less
14094       // two bytes in byte-reversed order.
14095       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14096       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14097         break;
14098 
14099       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14100       // Do an any-extend to 32-bits if this is a half-word input.
14101       if (BSwapOp.getValueType() == MVT::i16)
14102         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14103 
14104       // If the type of BSWAP operand is wider than stored memory width
14105       // it need to be shifted to the right side before STBRX.
14106       if (Op1VT.bitsGT(mVT)) {
14107         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14108         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14109                               DAG.getConstant(Shift, dl, MVT::i32));
14110         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14111         if (Op1VT == MVT::i64)
14112           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14113       }
14114 
14115       SDValue Ops[] = {
14116         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14117       };
14118       return
14119         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14120                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14121                                 cast<StoreSDNode>(N)->getMemOperand());
14122     }
14123 
14124     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14125     // So it can increase the chance of CSE constant construction.
14126     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14127         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14128       // Need to sign-extended to 64-bits to handle negative values.
14129       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14130       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14131                                     MemVT.getSizeInBits());
14132       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14133 
14134       // DAG.getTruncStore() can't be used here because it doesn't accept
14135       // the general (base + offset) addressing mode.
14136       // So we use UpdateNodeOperands and setTruncatingStore instead.
14137       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14138                              N->getOperand(3));
14139       cast<StoreSDNode>(N)->setTruncatingStore(true);
14140       return SDValue(N, 0);
14141     }
14142 
14143     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14144     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14145     if (Op1VT.isSimple()) {
14146       MVT StoreVT = Op1VT.getSimpleVT();
14147       if (Subtarget.needsSwapsForVSXMemOps() &&
14148           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14149            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14150         return expandVSXStoreForLE(N, DCI);
14151     }
14152     break;
14153   }
14154   case ISD::LOAD: {
14155     LoadSDNode *LD = cast<LoadSDNode>(N);
14156     EVT VT = LD->getValueType(0);
14157 
14158     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14159     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14160     if (VT.isSimple()) {
14161       MVT LoadVT = VT.getSimpleVT();
14162       if (Subtarget.needsSwapsForVSXMemOps() &&
14163           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14164            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14165         return expandVSXLoadForLE(N, DCI);
14166     }
14167 
14168     // We sometimes end up with a 64-bit integer load, from which we extract
14169     // two single-precision floating-point numbers. This happens with
14170     // std::complex<float>, and other similar structures, because of the way we
14171     // canonicalize structure copies. However, if we lack direct moves,
14172     // then the final bitcasts from the extracted integer values to the
14173     // floating-point numbers turn into store/load pairs. Even with direct moves,
14174     // just loading the two floating-point numbers is likely better.
14175     auto ReplaceTwoFloatLoad = [&]() {
14176       if (VT != MVT::i64)
14177         return false;
14178 
14179       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14180           LD->isVolatile())
14181         return false;
14182 
14183       //  We're looking for a sequence like this:
14184       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14185       //      t16: i64 = srl t13, Constant:i32<32>
14186       //    t17: i32 = truncate t16
14187       //  t18: f32 = bitcast t17
14188       //    t19: i32 = truncate t13
14189       //  t20: f32 = bitcast t19
14190 
14191       if (!LD->hasNUsesOfValue(2, 0))
14192         return false;
14193 
14194       auto UI = LD->use_begin();
14195       while (UI.getUse().getResNo() != 0) ++UI;
14196       SDNode *Trunc = *UI++;
14197       while (UI.getUse().getResNo() != 0) ++UI;
14198       SDNode *RightShift = *UI;
14199       if (Trunc->getOpcode() != ISD::TRUNCATE)
14200         std::swap(Trunc, RightShift);
14201 
14202       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14203           Trunc->getValueType(0) != MVT::i32 ||
14204           !Trunc->hasOneUse())
14205         return false;
14206       if (RightShift->getOpcode() != ISD::SRL ||
14207           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14208           RightShift->getConstantOperandVal(1) != 32 ||
14209           !RightShift->hasOneUse())
14210         return false;
14211 
14212       SDNode *Trunc2 = *RightShift->use_begin();
14213       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14214           Trunc2->getValueType(0) != MVT::i32 ||
14215           !Trunc2->hasOneUse())
14216         return false;
14217 
14218       SDNode *Bitcast = *Trunc->use_begin();
14219       SDNode *Bitcast2 = *Trunc2->use_begin();
14220 
14221       if (Bitcast->getOpcode() != ISD::BITCAST ||
14222           Bitcast->getValueType(0) != MVT::f32)
14223         return false;
14224       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14225           Bitcast2->getValueType(0) != MVT::f32)
14226         return false;
14227 
14228       if (Subtarget.isLittleEndian())
14229         std::swap(Bitcast, Bitcast2);
14230 
14231       // Bitcast has the second float (in memory-layout order) and Bitcast2
14232       // has the first one.
14233 
14234       SDValue BasePtr = LD->getBasePtr();
14235       if (LD->isIndexed()) {
14236         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14237                "Non-pre-inc AM on PPC?");
14238         BasePtr =
14239           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14240                       LD->getOffset());
14241       }
14242 
14243       auto MMOFlags =
14244           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14245       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14246                                       LD->getPointerInfo(), LD->getAlignment(),
14247                                       MMOFlags, LD->getAAInfo());
14248       SDValue AddPtr =
14249         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14250                     BasePtr, DAG.getIntPtrConstant(4, dl));
14251       SDValue FloatLoad2 = DAG.getLoad(
14252           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14253           LD->getPointerInfo().getWithOffset(4),
14254           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14255 
14256       if (LD->isIndexed()) {
14257         // Note that DAGCombine should re-form any pre-increment load(s) from
14258         // what is produced here if that makes sense.
14259         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14260       }
14261 
14262       DCI.CombineTo(Bitcast2, FloatLoad);
14263       DCI.CombineTo(Bitcast, FloatLoad2);
14264 
14265       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14266                                     SDValue(FloatLoad2.getNode(), 1));
14267       return true;
14268     };
14269 
14270     if (ReplaceTwoFloatLoad())
14271       return SDValue(N, 0);
14272 
14273     EVT MemVT = LD->getMemoryVT();
14274     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
14275     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
14276     if (LD->isUnindexed() && VT.isVector() &&
14277         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
14278           // P8 and later hardware should just use LOAD.
14279           !Subtarget.hasP8Vector() &&
14280           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
14281            VT == MVT::v4f32))) &&
14282         LD->getAlign() < ABIAlignment) {
14283       // This is a type-legal unaligned Altivec load.
14284       SDValue Chain = LD->getChain();
14285       SDValue Ptr = LD->getBasePtr();
14286       bool isLittleEndian = Subtarget.isLittleEndian();
14287 
14288       // This implements the loading of unaligned vectors as described in
14289       // the venerable Apple Velocity Engine overview. Specifically:
14290       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
14291       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
14292       //
14293       // The general idea is to expand a sequence of one or more unaligned
14294       // loads into an alignment-based permutation-control instruction (lvsl
14295       // or lvsr), a series of regular vector loads (which always truncate
14296       // their input address to an aligned address), and a series of
14297       // permutations.  The results of these permutations are the requested
14298       // loaded values.  The trick is that the last "extra" load is not taken
14299       // from the address you might suspect (sizeof(vector) bytes after the
14300       // last requested load), but rather sizeof(vector) - 1 bytes after the
14301       // last requested vector. The point of this is to avoid a page fault if
14302       // the base address happened to be aligned. This works because if the
14303       // base address is aligned, then adding less than a full vector length
14304       // will cause the last vector in the sequence to be (re)loaded.
14305       // Otherwise, the next vector will be fetched as you might suspect was
14306       // necessary.
14307 
14308       // We might be able to reuse the permutation generation from
14309       // a different base address offset from this one by an aligned amount.
14310       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14311       // optimization later.
14312       Intrinsic::ID Intr, IntrLD, IntrPerm;
14313       MVT PermCntlTy, PermTy, LDTy;
14314       Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14315                             : Intrinsic::ppc_altivec_lvsl;
14316       IntrLD = Intrinsic::ppc_altivec_lvx;
14317       IntrPerm = Intrinsic::ppc_altivec_vperm;
14318       PermCntlTy = MVT::v16i8;
14319       PermTy = MVT::v4i32;
14320       LDTy = MVT::v4i32;
14321 
14322       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14323 
14324       // Create the new MMO for the new base load. It is like the original MMO,
14325       // but represents an area in memory almost twice the vector size centered
14326       // on the original address. If the address is unaligned, we might start
14327       // reading up to (sizeof(vector)-1) bytes below the address of the
14328       // original unaligned load.
14329       MachineFunction &MF = DAG.getMachineFunction();
14330       MachineMemOperand *BaseMMO =
14331         MF.getMachineMemOperand(LD->getMemOperand(),
14332                                 -(long)MemVT.getStoreSize()+1,
14333                                 2*MemVT.getStoreSize()-1);
14334 
14335       // Create the new base load.
14336       SDValue LDXIntID =
14337           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14338       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14339       SDValue BaseLoad =
14340         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14341                                 DAG.getVTList(PermTy, MVT::Other),
14342                                 BaseLoadOps, LDTy, BaseMMO);
14343 
14344       // Note that the value of IncOffset (which is provided to the next
14345       // load's pointer info offset value, and thus used to calculate the
14346       // alignment), and the value of IncValue (which is actually used to
14347       // increment the pointer value) are different! This is because we
14348       // require the next load to appear to be aligned, even though it
14349       // is actually offset from the base pointer by a lesser amount.
14350       int IncOffset = VT.getSizeInBits() / 8;
14351       int IncValue = IncOffset;
14352 
14353       // Walk (both up and down) the chain looking for another load at the real
14354       // (aligned) offset (the alignment of the other load does not matter in
14355       // this case). If found, then do not use the offset reduction trick, as
14356       // that will prevent the loads from being later combined (as they would
14357       // otherwise be duplicates).
14358       if (!findConsecutiveLoad(LD, DAG))
14359         --IncValue;
14360 
14361       SDValue Increment =
14362           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14363       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14364 
14365       MachineMemOperand *ExtraMMO =
14366         MF.getMachineMemOperand(LD->getMemOperand(),
14367                                 1, 2*MemVT.getStoreSize()-1);
14368       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14369       SDValue ExtraLoad =
14370         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14371                                 DAG.getVTList(PermTy, MVT::Other),
14372                                 ExtraLoadOps, LDTy, ExtraMMO);
14373 
14374       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14375         BaseLoad.getValue(1), ExtraLoad.getValue(1));
14376 
14377       // Because vperm has a big-endian bias, we must reverse the order
14378       // of the input vectors and complement the permute control vector
14379       // when generating little endian code.  We have already handled the
14380       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14381       // and ExtraLoad here.
14382       SDValue Perm;
14383       if (isLittleEndian)
14384         Perm = BuildIntrinsicOp(IntrPerm,
14385                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14386       else
14387         Perm = BuildIntrinsicOp(IntrPerm,
14388                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14389 
14390       if (VT != PermTy)
14391         Perm = Subtarget.hasAltivec()
14392                    ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
14393                    : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
14394                                  DAG.getTargetConstant(1, dl, MVT::i64));
14395                                // second argument is 1 because this rounding
14396                                // is always exact.
14397 
14398       // The output of the permutation is our loaded result, the TokenFactor is
14399       // our new chain.
14400       DCI.CombineTo(N, Perm, TF);
14401       return SDValue(N, 0);
14402     }
14403     }
14404     break;
14405     case ISD::INTRINSIC_WO_CHAIN: {
14406       bool isLittleEndian = Subtarget.isLittleEndian();
14407       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14408       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14409                                            : Intrinsic::ppc_altivec_lvsl);
14410       if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
14411         SDValue Add = N->getOperand(1);
14412 
14413         int Bits = 4 /* 16 byte alignment */;
14414 
14415         if (DAG.MaskedValueIsZero(Add->getOperand(1),
14416                                   APInt::getAllOnesValue(Bits /* alignment */)
14417                                       .zext(Add.getScalarValueSizeInBits()))) {
14418           SDNode *BasePtr = Add->getOperand(0).getNode();
14419           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14420                                     UE = BasePtr->use_end();
14421                UI != UE; ++UI) {
14422             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14423                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
14424                     IID) {
14425               // We've found another LVSL/LVSR, and this address is an aligned
14426               // multiple of that one. The results will be the same, so use the
14427               // one we've just found instead.
14428 
14429               return SDValue(*UI, 0);
14430             }
14431           }
14432         }
14433 
14434         if (isa<ConstantSDNode>(Add->getOperand(1))) {
14435           SDNode *BasePtr = Add->getOperand(0).getNode();
14436           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14437                UE = BasePtr->use_end(); UI != UE; ++UI) {
14438             if (UI->getOpcode() == ISD::ADD &&
14439                 isa<ConstantSDNode>(UI->getOperand(1)) &&
14440                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
14441                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
14442                 (1ULL << Bits) == 0) {
14443               SDNode *OtherAdd = *UI;
14444               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
14445                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
14446                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14447                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
14448                   return SDValue(*VI, 0);
14449                 }
14450               }
14451             }
14452           }
14453         }
14454       }
14455 
14456       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
14457       // Expose the vabsduw/h/b opportunity for down stream
14458       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
14459           (IID == Intrinsic::ppc_altivec_vmaxsw ||
14460            IID == Intrinsic::ppc_altivec_vmaxsh ||
14461            IID == Intrinsic::ppc_altivec_vmaxsb)) {
14462         SDValue V1 = N->getOperand(1);
14463         SDValue V2 = N->getOperand(2);
14464         if ((V1.getSimpleValueType() == MVT::v4i32 ||
14465              V1.getSimpleValueType() == MVT::v8i16 ||
14466              V1.getSimpleValueType() == MVT::v16i8) &&
14467             V1.getSimpleValueType() == V2.getSimpleValueType()) {
14468           // (0-a, a)
14469           if (V1.getOpcode() == ISD::SUB &&
14470               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
14471               V1.getOperand(1) == V2) {
14472             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
14473           }
14474           // (a, 0-a)
14475           if (V2.getOpcode() == ISD::SUB &&
14476               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
14477               V2.getOperand(1) == V1) {
14478             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14479           }
14480           // (x-y, y-x)
14481           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
14482               V1.getOperand(0) == V2.getOperand(1) &&
14483               V1.getOperand(1) == V2.getOperand(0)) {
14484             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14485           }
14486         }
14487       }
14488     }
14489 
14490     break;
14491   case ISD::INTRINSIC_W_CHAIN:
14492     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14493     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14494     if (Subtarget.needsSwapsForVSXMemOps()) {
14495       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14496       default:
14497         break;
14498       case Intrinsic::ppc_vsx_lxvw4x:
14499       case Intrinsic::ppc_vsx_lxvd2x:
14500         return expandVSXLoadForLE(N, DCI);
14501       }
14502     }
14503     break;
14504   case ISD::INTRINSIC_VOID:
14505     // For little endian, VSX stores require generating xxswapd/stxvd2x.
14506     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14507     if (Subtarget.needsSwapsForVSXMemOps()) {
14508       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14509       default:
14510         break;
14511       case Intrinsic::ppc_vsx_stxvw4x:
14512       case Intrinsic::ppc_vsx_stxvd2x:
14513         return expandVSXStoreForLE(N, DCI);
14514       }
14515     }
14516     break;
14517   case ISD::BSWAP:
14518     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
14519     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
14520         N->getOperand(0).hasOneUse() &&
14521         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
14522          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
14523           N->getValueType(0) == MVT::i64))) {
14524       SDValue Load = N->getOperand(0);
14525       LoadSDNode *LD = cast<LoadSDNode>(Load);
14526       // Create the byte-swapping load.
14527       SDValue Ops[] = {
14528         LD->getChain(),    // Chain
14529         LD->getBasePtr(),  // Ptr
14530         DAG.getValueType(N->getValueType(0)) // VT
14531       };
14532       SDValue BSLoad =
14533         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
14534                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
14535                                               MVT::i64 : MVT::i32, MVT::Other),
14536                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
14537 
14538       // If this is an i16 load, insert the truncate.
14539       SDValue ResVal = BSLoad;
14540       if (N->getValueType(0) == MVT::i16)
14541         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
14542 
14543       // First, combine the bswap away.  This makes the value produced by the
14544       // load dead.
14545       DCI.CombineTo(N, ResVal);
14546 
14547       // Next, combine the load away, we give it a bogus result value but a real
14548       // chain result.  The result value is dead because the bswap is dead.
14549       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
14550 
14551       // Return N so it doesn't get rechecked!
14552       return SDValue(N, 0);
14553     }
14554     break;
14555   case PPCISD::VCMP:
14556     // If a VCMP_rec node already exists with exactly the same operands as this
14557     // node, use its result instead of this node (VCMP_rec computes both a CR6
14558     // and a normal output).
14559     //
14560     if (!N->getOperand(0).hasOneUse() &&
14561         !N->getOperand(1).hasOneUse() &&
14562         !N->getOperand(2).hasOneUse()) {
14563 
14564       // Scan all of the users of the LHS, looking for VCMP_rec's that match.
14565       SDNode *VCMPrecNode = nullptr;
14566 
14567       SDNode *LHSN = N->getOperand(0).getNode();
14568       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
14569            UI != E; ++UI)
14570         if (UI->getOpcode() == PPCISD::VCMP_rec &&
14571             UI->getOperand(1) == N->getOperand(1) &&
14572             UI->getOperand(2) == N->getOperand(2) &&
14573             UI->getOperand(0) == N->getOperand(0)) {
14574           VCMPrecNode = *UI;
14575           break;
14576         }
14577 
14578       // If there is no VCMP_rec node, or if the flag value has a single use,
14579       // don't transform this.
14580       if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1))
14581         break;
14582 
14583       // Look at the (necessarily single) use of the flag value.  If it has a
14584       // chain, this transformation is more complex.  Note that multiple things
14585       // could use the value result, which we should ignore.
14586       SDNode *FlagUser = nullptr;
14587       for (SDNode::use_iterator UI = VCMPrecNode->use_begin();
14588            FlagUser == nullptr; ++UI) {
14589         assert(UI != VCMPrecNode->use_end() && "Didn't find user!");
14590         SDNode *User = *UI;
14591         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
14592           if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) {
14593             FlagUser = User;
14594             break;
14595           }
14596         }
14597       }
14598 
14599       // If the user is a MFOCRF instruction, we know this is safe.
14600       // Otherwise we give up for right now.
14601       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
14602         return SDValue(VCMPrecNode, 0);
14603     }
14604     break;
14605   case ISD::BRCOND: {
14606     SDValue Cond = N->getOperand(1);
14607     SDValue Target = N->getOperand(2);
14608 
14609     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14610         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
14611           Intrinsic::loop_decrement) {
14612 
14613       // We now need to make the intrinsic dead (it cannot be instruction
14614       // selected).
14615       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
14616       assert(Cond.getNode()->hasOneUse() &&
14617              "Counter decrement has more than one use");
14618 
14619       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
14620                          N->getOperand(0), Target);
14621     }
14622   }
14623   break;
14624   case ISD::BR_CC: {
14625     // If this is a branch on an altivec predicate comparison, lower this so
14626     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
14627     // lowering is done pre-legalize, because the legalizer lowers the predicate
14628     // compare down to code that is difficult to reassemble.
14629     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
14630     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
14631 
14632     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
14633     // value. If so, pass-through the AND to get to the intrinsic.
14634     if (LHS.getOpcode() == ISD::AND &&
14635         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14636         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
14637           Intrinsic::loop_decrement &&
14638         isa<ConstantSDNode>(LHS.getOperand(1)) &&
14639         !isNullConstant(LHS.getOperand(1)))
14640       LHS = LHS.getOperand(0);
14641 
14642     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14643         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
14644           Intrinsic::loop_decrement &&
14645         isa<ConstantSDNode>(RHS)) {
14646       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
14647              "Counter decrement comparison is not EQ or NE");
14648 
14649       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14650       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
14651                     (CC == ISD::SETNE && !Val);
14652 
14653       // We now need to make the intrinsic dead (it cannot be instruction
14654       // selected).
14655       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
14656       assert(LHS.getNode()->hasOneUse() &&
14657              "Counter decrement has more than one use");
14658 
14659       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
14660                          N->getOperand(0), N->getOperand(4));
14661     }
14662 
14663     int CompareOpc;
14664     bool isDot;
14665 
14666     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14667         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
14668         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
14669       assert(isDot && "Can't compare against a vector result!");
14670 
14671       // If this is a comparison against something other than 0/1, then we know
14672       // that the condition is never/always true.
14673       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14674       if (Val != 0 && Val != 1) {
14675         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
14676           return N->getOperand(0);
14677         // Always !=, turn it into an unconditional branch.
14678         return DAG.getNode(ISD::BR, dl, MVT::Other,
14679                            N->getOperand(0), N->getOperand(4));
14680       }
14681 
14682       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
14683 
14684       // Create the PPCISD altivec 'dot' comparison node.
14685       SDValue Ops[] = {
14686         LHS.getOperand(2),  // LHS of compare
14687         LHS.getOperand(3),  // RHS of compare
14688         DAG.getConstant(CompareOpc, dl, MVT::i32)
14689       };
14690       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
14691       SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
14692 
14693       // Unpack the result based on how the target uses it.
14694       PPC::Predicate CompOpc;
14695       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
14696       default:  // Can't happen, don't crash on invalid number though.
14697       case 0:   // Branch on the value of the EQ bit of CR6.
14698         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
14699         break;
14700       case 1:   // Branch on the inverted value of the EQ bit of CR6.
14701         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
14702         break;
14703       case 2:   // Branch on the value of the LT bit of CR6.
14704         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
14705         break;
14706       case 3:   // Branch on the inverted value of the LT bit of CR6.
14707         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
14708         break;
14709       }
14710 
14711       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
14712                          DAG.getConstant(CompOpc, dl, MVT::i32),
14713                          DAG.getRegister(PPC::CR6, MVT::i32),
14714                          N->getOperand(4), CompNode.getValue(1));
14715     }
14716     break;
14717   }
14718   case ISD::BUILD_VECTOR:
14719     return DAGCombineBuildVector(N, DCI);
14720   case ISD::ABS:
14721     return combineABS(N, DCI);
14722   case ISD::VSELECT:
14723     return combineVSelect(N, DCI);
14724   }
14725 
14726   return SDValue();
14727 }
14728 
14729 SDValue
14730 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
14731                                  SelectionDAG &DAG,
14732                                  SmallVectorImpl<SDNode *> &Created) const {
14733   // fold (sdiv X, pow2)
14734   EVT VT = N->getValueType(0);
14735   if (VT == MVT::i64 && !Subtarget.isPPC64())
14736     return SDValue();
14737   if ((VT != MVT::i32 && VT != MVT::i64) ||
14738       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
14739     return SDValue();
14740 
14741   SDLoc DL(N);
14742   SDValue N0 = N->getOperand(0);
14743 
14744   bool IsNegPow2 = (-Divisor).isPowerOf2();
14745   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
14746   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
14747 
14748   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
14749   Created.push_back(Op.getNode());
14750 
14751   if (IsNegPow2) {
14752     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
14753     Created.push_back(Op.getNode());
14754   }
14755 
14756   return Op;
14757 }
14758 
14759 //===----------------------------------------------------------------------===//
14760 // Inline Assembly Support
14761 //===----------------------------------------------------------------------===//
14762 
14763 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
14764                                                       KnownBits &Known,
14765                                                       const APInt &DemandedElts,
14766                                                       const SelectionDAG &DAG,
14767                                                       unsigned Depth) const {
14768   Known.resetAll();
14769   switch (Op.getOpcode()) {
14770   default: break;
14771   case PPCISD::LBRX: {
14772     // lhbrx is known to have the top bits cleared out.
14773     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
14774       Known.Zero = 0xFFFF0000;
14775     break;
14776   }
14777   case ISD::INTRINSIC_WO_CHAIN: {
14778     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
14779     default: break;
14780     case Intrinsic::ppc_altivec_vcmpbfp_p:
14781     case Intrinsic::ppc_altivec_vcmpeqfp_p:
14782     case Intrinsic::ppc_altivec_vcmpequb_p:
14783     case Intrinsic::ppc_altivec_vcmpequh_p:
14784     case Intrinsic::ppc_altivec_vcmpequw_p:
14785     case Intrinsic::ppc_altivec_vcmpequd_p:
14786     case Intrinsic::ppc_altivec_vcmpequq_p:
14787     case Intrinsic::ppc_altivec_vcmpgefp_p:
14788     case Intrinsic::ppc_altivec_vcmpgtfp_p:
14789     case Intrinsic::ppc_altivec_vcmpgtsb_p:
14790     case Intrinsic::ppc_altivec_vcmpgtsh_p:
14791     case Intrinsic::ppc_altivec_vcmpgtsw_p:
14792     case Intrinsic::ppc_altivec_vcmpgtsd_p:
14793     case Intrinsic::ppc_altivec_vcmpgtsq_p:
14794     case Intrinsic::ppc_altivec_vcmpgtub_p:
14795     case Intrinsic::ppc_altivec_vcmpgtuh_p:
14796     case Intrinsic::ppc_altivec_vcmpgtuw_p:
14797     case Intrinsic::ppc_altivec_vcmpgtud_p:
14798     case Intrinsic::ppc_altivec_vcmpgtuq_p:
14799       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
14800       break;
14801     }
14802   }
14803   }
14804 }
14805 
14806 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
14807   switch (Subtarget.getCPUDirective()) {
14808   default: break;
14809   case PPC::DIR_970:
14810   case PPC::DIR_PWR4:
14811   case PPC::DIR_PWR5:
14812   case PPC::DIR_PWR5X:
14813   case PPC::DIR_PWR6:
14814   case PPC::DIR_PWR6X:
14815   case PPC::DIR_PWR7:
14816   case PPC::DIR_PWR8:
14817   case PPC::DIR_PWR9:
14818   case PPC::DIR_PWR10:
14819   case PPC::DIR_PWR_FUTURE: {
14820     if (!ML)
14821       break;
14822 
14823     if (!DisableInnermostLoopAlign32) {
14824       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
14825       // so that we can decrease cache misses and branch-prediction misses.
14826       // Actual alignment of the loop will depend on the hotness check and other
14827       // logic in alignBlocks.
14828       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
14829         return Align(32);
14830     }
14831 
14832     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
14833 
14834     // For small loops (between 5 and 8 instructions), align to a 32-byte
14835     // boundary so that the entire loop fits in one instruction-cache line.
14836     uint64_t LoopSize = 0;
14837     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
14838       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
14839         LoopSize += TII->getInstSizeInBytes(*J);
14840         if (LoopSize > 32)
14841           break;
14842       }
14843 
14844     if (LoopSize > 16 && LoopSize <= 32)
14845       return Align(32);
14846 
14847     break;
14848   }
14849   }
14850 
14851   return TargetLowering::getPrefLoopAlignment(ML);
14852 }
14853 
14854 /// getConstraintType - Given a constraint, return the type of
14855 /// constraint it is for this target.
14856 PPCTargetLowering::ConstraintType
14857 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
14858   if (Constraint.size() == 1) {
14859     switch (Constraint[0]) {
14860     default: break;
14861     case 'b':
14862     case 'r':
14863     case 'f':
14864     case 'd':
14865     case 'v':
14866     case 'y':
14867       return C_RegisterClass;
14868     case 'Z':
14869       // FIXME: While Z does indicate a memory constraint, it specifically
14870       // indicates an r+r address (used in conjunction with the 'y' modifier
14871       // in the replacement string). Currently, we're forcing the base
14872       // register to be r0 in the asm printer (which is interpreted as zero)
14873       // and forming the complete address in the second register. This is
14874       // suboptimal.
14875       return C_Memory;
14876     }
14877   } else if (Constraint == "wc") { // individual CR bits.
14878     return C_RegisterClass;
14879   } else if (Constraint == "wa" || Constraint == "wd" ||
14880              Constraint == "wf" || Constraint == "ws" ||
14881              Constraint == "wi" || Constraint == "ww") {
14882     return C_RegisterClass; // VSX registers.
14883   }
14884   return TargetLowering::getConstraintType(Constraint);
14885 }
14886 
14887 /// Examine constraint type and operand type and determine a weight value.
14888 /// This object must already have been set up with the operand type
14889 /// and the current alternative constraint selected.
14890 TargetLowering::ConstraintWeight
14891 PPCTargetLowering::getSingleConstraintMatchWeight(
14892     AsmOperandInfo &info, const char *constraint) const {
14893   ConstraintWeight weight = CW_Invalid;
14894   Value *CallOperandVal = info.CallOperandVal;
14895     // If we don't have a value, we can't do a match,
14896     // but allow it at the lowest weight.
14897   if (!CallOperandVal)
14898     return CW_Default;
14899   Type *type = CallOperandVal->getType();
14900 
14901   // Look at the constraint type.
14902   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
14903     return CW_Register; // an individual CR bit.
14904   else if ((StringRef(constraint) == "wa" ||
14905             StringRef(constraint) == "wd" ||
14906             StringRef(constraint) == "wf") &&
14907            type->isVectorTy())
14908     return CW_Register;
14909   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
14910     return CW_Register; // just hold 64-bit integers data.
14911   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
14912     return CW_Register;
14913   else if (StringRef(constraint) == "ww" && type->isFloatTy())
14914     return CW_Register;
14915 
14916   switch (*constraint) {
14917   default:
14918     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
14919     break;
14920   case 'b':
14921     if (type->isIntegerTy())
14922       weight = CW_Register;
14923     break;
14924   case 'f':
14925     if (type->isFloatTy())
14926       weight = CW_Register;
14927     break;
14928   case 'd':
14929     if (type->isDoubleTy())
14930       weight = CW_Register;
14931     break;
14932   case 'v':
14933     if (type->isVectorTy())
14934       weight = CW_Register;
14935     break;
14936   case 'y':
14937     weight = CW_Register;
14938     break;
14939   case 'Z':
14940     weight = CW_Memory;
14941     break;
14942   }
14943   return weight;
14944 }
14945 
14946 std::pair<unsigned, const TargetRegisterClass *>
14947 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
14948                                                 StringRef Constraint,
14949                                                 MVT VT) const {
14950   if (Constraint.size() == 1) {
14951     // GCC RS6000 Constraint Letters
14952     switch (Constraint[0]) {
14953     case 'b':   // R1-R31
14954       if (VT == MVT::i64 && Subtarget.isPPC64())
14955         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
14956       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
14957     case 'r':   // R0-R31
14958       if (VT == MVT::i64 && Subtarget.isPPC64())
14959         return std::make_pair(0U, &PPC::G8RCRegClass);
14960       return std::make_pair(0U, &PPC::GPRCRegClass);
14961     // 'd' and 'f' constraints are both defined to be "the floating point
14962     // registers", where one is for 32-bit and the other for 64-bit. We don't
14963     // really care overly much here so just give them all the same reg classes.
14964     case 'd':
14965     case 'f':
14966       if (Subtarget.hasSPE()) {
14967         if (VT == MVT::f32 || VT == MVT::i32)
14968           return std::make_pair(0U, &PPC::GPRCRegClass);
14969         if (VT == MVT::f64 || VT == MVT::i64)
14970           return std::make_pair(0U, &PPC::SPERCRegClass);
14971       } else {
14972         if (VT == MVT::f32 || VT == MVT::i32)
14973           return std::make_pair(0U, &PPC::F4RCRegClass);
14974         if (VT == MVT::f64 || VT == MVT::i64)
14975           return std::make_pair(0U, &PPC::F8RCRegClass);
14976       }
14977       break;
14978     case 'v':
14979       if (Subtarget.hasAltivec())
14980         return std::make_pair(0U, &PPC::VRRCRegClass);
14981       break;
14982     case 'y':   // crrc
14983       return std::make_pair(0U, &PPC::CRRCRegClass);
14984     }
14985   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
14986     // An individual CR bit.
14987     return std::make_pair(0U, &PPC::CRBITRCRegClass);
14988   } else if ((Constraint == "wa" || Constraint == "wd" ||
14989              Constraint == "wf" || Constraint == "wi") &&
14990              Subtarget.hasVSX()) {
14991     return std::make_pair(0U, &PPC::VSRCRegClass);
14992   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
14993     if (VT == MVT::f32 && Subtarget.hasP8Vector())
14994       return std::make_pair(0U, &PPC::VSSRCRegClass);
14995     else
14996       return std::make_pair(0U, &PPC::VSFRCRegClass);
14997   }
14998 
14999   // If we name a VSX register, we can't defer to the base class because it
15000   // will not recognize the correct register (their names will be VSL{0-31}
15001   // and V{0-31} so they won't match). So we match them here.
15002   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15003     int VSNum = atoi(Constraint.data() + 3);
15004     assert(VSNum >= 0 && VSNum <= 63 &&
15005            "Attempted to access a vsr out of range");
15006     if (VSNum < 32)
15007       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15008     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15009   }
15010   std::pair<unsigned, const TargetRegisterClass *> R =
15011       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15012 
15013   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15014   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15015   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15016   // register.
15017   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15018   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15019   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15020       PPC::GPRCRegClass.contains(R.first))
15021     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15022                             PPC::sub_32, &PPC::G8RCRegClass),
15023                           &PPC::G8RCRegClass);
15024 
15025   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15026   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15027     R.first = PPC::CR0;
15028     R.second = &PPC::CRRCRegClass;
15029   }
15030 
15031   return R;
15032 }
15033 
15034 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15035 /// vector.  If it is invalid, don't add anything to Ops.
15036 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15037                                                      std::string &Constraint,
15038                                                      std::vector<SDValue>&Ops,
15039                                                      SelectionDAG &DAG) const {
15040   SDValue Result;
15041 
15042   // Only support length 1 constraints.
15043   if (Constraint.length() > 1) return;
15044 
15045   char Letter = Constraint[0];
15046   switch (Letter) {
15047   default: break;
15048   case 'I':
15049   case 'J':
15050   case 'K':
15051   case 'L':
15052   case 'M':
15053   case 'N':
15054   case 'O':
15055   case 'P': {
15056     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15057     if (!CST) return; // Must be an immediate to match.
15058     SDLoc dl(Op);
15059     int64_t Value = CST->getSExtValue();
15060     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15061                          // numbers are printed as such.
15062     switch (Letter) {
15063     default: llvm_unreachable("Unknown constraint letter!");
15064     case 'I':  // "I" is a signed 16-bit constant.
15065       if (isInt<16>(Value))
15066         Result = DAG.getTargetConstant(Value, dl, TCVT);
15067       break;
15068     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15069       if (isShiftedUInt<16, 16>(Value))
15070         Result = DAG.getTargetConstant(Value, dl, TCVT);
15071       break;
15072     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15073       if (isShiftedInt<16, 16>(Value))
15074         Result = DAG.getTargetConstant(Value, dl, TCVT);
15075       break;
15076     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15077       if (isUInt<16>(Value))
15078         Result = DAG.getTargetConstant(Value, dl, TCVT);
15079       break;
15080     case 'M':  // "M" is a constant that is greater than 31.
15081       if (Value > 31)
15082         Result = DAG.getTargetConstant(Value, dl, TCVT);
15083       break;
15084     case 'N':  // "N" is a positive constant that is an exact power of two.
15085       if (Value > 0 && isPowerOf2_64(Value))
15086         Result = DAG.getTargetConstant(Value, dl, TCVT);
15087       break;
15088     case 'O':  // "O" is the constant zero.
15089       if (Value == 0)
15090         Result = DAG.getTargetConstant(Value, dl, TCVT);
15091       break;
15092     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15093       if (isInt<16>(-Value))
15094         Result = DAG.getTargetConstant(Value, dl, TCVT);
15095       break;
15096     }
15097     break;
15098   }
15099   }
15100 
15101   if (Result.getNode()) {
15102     Ops.push_back(Result);
15103     return;
15104   }
15105 
15106   // Handle standard constraint letters.
15107   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15108 }
15109 
15110 // isLegalAddressingMode - Return true if the addressing mode represented
15111 // by AM is legal for this target, for a load/store of the specified type.
15112 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15113                                               const AddrMode &AM, Type *Ty,
15114                                               unsigned AS,
15115                                               Instruction *I) const {
15116   // Vector type r+i form is supported since power9 as DQ form. We don't check
15117   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
15118   // imm form is preferred and the offset can be adjusted to use imm form later
15119   // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
15120   // max offset to check legal addressing mode, we should be a little aggressive
15121   // to contain other offsets for that LSRUse.
15122   if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
15123     return false;
15124 
15125   // PPC allows a sign-extended 16-bit immediate field.
15126   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15127     return false;
15128 
15129   // No global is ever allowed as a base.
15130   if (AM.BaseGV)
15131     return false;
15132 
15133   // PPC only support r+r,
15134   switch (AM.Scale) {
15135   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15136     break;
15137   case 1:
15138     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15139       return false;
15140     // Otherwise we have r+r or r+i.
15141     break;
15142   case 2:
15143     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15144       return false;
15145     // Allow 2*r as r+r.
15146     break;
15147   default:
15148     // No other scales are supported.
15149     return false;
15150   }
15151 
15152   return true;
15153 }
15154 
15155 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15156                                            SelectionDAG &DAG) const {
15157   MachineFunction &MF = DAG.getMachineFunction();
15158   MachineFrameInfo &MFI = MF.getFrameInfo();
15159   MFI.setReturnAddressIsTaken(true);
15160 
15161   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15162     return SDValue();
15163 
15164   SDLoc dl(Op);
15165   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15166 
15167   // Make sure the function does not optimize away the store of the RA to
15168   // the stack.
15169   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15170   FuncInfo->setLRStoreRequired();
15171   bool isPPC64 = Subtarget.isPPC64();
15172   auto PtrVT = getPointerTy(MF.getDataLayout());
15173 
15174   if (Depth > 0) {
15175     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15176     SDValue Offset =
15177         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15178                         isPPC64 ? MVT::i64 : MVT::i32);
15179     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15180                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15181                        MachinePointerInfo());
15182   }
15183 
15184   // Just load the return address off the stack.
15185   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15186   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15187                      MachinePointerInfo());
15188 }
15189 
15190 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15191                                           SelectionDAG &DAG) const {
15192   SDLoc dl(Op);
15193   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15194 
15195   MachineFunction &MF = DAG.getMachineFunction();
15196   MachineFrameInfo &MFI = MF.getFrameInfo();
15197   MFI.setFrameAddressIsTaken(true);
15198 
15199   EVT PtrVT = getPointerTy(MF.getDataLayout());
15200   bool isPPC64 = PtrVT == MVT::i64;
15201 
15202   // Naked functions never have a frame pointer, and so we use r1. For all
15203   // other functions, this decision must be delayed until during PEI.
15204   unsigned FrameReg;
15205   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15206     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15207   else
15208     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15209 
15210   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15211                                          PtrVT);
15212   while (Depth--)
15213     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15214                             FrameAddr, MachinePointerInfo());
15215   return FrameAddr;
15216 }
15217 
15218 // FIXME? Maybe this could be a TableGen attribute on some registers and
15219 // this table could be generated automatically from RegInfo.
15220 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15221                                               const MachineFunction &MF) const {
15222   bool isPPC64 = Subtarget.isPPC64();
15223 
15224   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15225   if (!is64Bit && VT != LLT::scalar(32))
15226     report_fatal_error("Invalid register global variable type");
15227 
15228   Register Reg = StringSwitch<Register>(RegName)
15229                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15230                      .Case("r2", isPPC64 ? Register() : PPC::R2)
15231                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15232                      .Default(Register());
15233 
15234   if (Reg)
15235     return Reg;
15236   report_fatal_error("Invalid register name global variable");
15237 }
15238 
15239 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15240   // 32-bit SVR4 ABI access everything as got-indirect.
15241   if (Subtarget.is32BitELFABI())
15242     return true;
15243 
15244   // AIX accesses everything indirectly through the TOC, which is similar to
15245   // the GOT.
15246   if (Subtarget.isAIXABI())
15247     return true;
15248 
15249   CodeModel::Model CModel = getTargetMachine().getCodeModel();
15250   // If it is small or large code model, module locals are accessed
15251   // indirectly by loading their address from .toc/.got.
15252   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15253     return true;
15254 
15255   // JumpTable and BlockAddress are accessed as got-indirect.
15256   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15257     return true;
15258 
15259   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15260     return Subtarget.isGVIndirectSymbol(G->getGlobal());
15261 
15262   return false;
15263 }
15264 
15265 bool
15266 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15267   // The PowerPC target isn't yet aware of offsets.
15268   return false;
15269 }
15270 
15271 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15272                                            const CallInst &I,
15273                                            MachineFunction &MF,
15274                                            unsigned Intrinsic) const {
15275   switch (Intrinsic) {
15276   case Intrinsic::ppc_altivec_lvx:
15277   case Intrinsic::ppc_altivec_lvxl:
15278   case Intrinsic::ppc_altivec_lvebx:
15279   case Intrinsic::ppc_altivec_lvehx:
15280   case Intrinsic::ppc_altivec_lvewx:
15281   case Intrinsic::ppc_vsx_lxvd2x:
15282   case Intrinsic::ppc_vsx_lxvw4x:
15283   case Intrinsic::ppc_vsx_lxvd2x_be:
15284   case Intrinsic::ppc_vsx_lxvw4x_be:
15285   case Intrinsic::ppc_vsx_lxvl:
15286   case Intrinsic::ppc_vsx_lxvll: {
15287     EVT VT;
15288     switch (Intrinsic) {
15289     case Intrinsic::ppc_altivec_lvebx:
15290       VT = MVT::i8;
15291       break;
15292     case Intrinsic::ppc_altivec_lvehx:
15293       VT = MVT::i16;
15294       break;
15295     case Intrinsic::ppc_altivec_lvewx:
15296       VT = MVT::i32;
15297       break;
15298     case Intrinsic::ppc_vsx_lxvd2x:
15299     case Intrinsic::ppc_vsx_lxvd2x_be:
15300       VT = MVT::v2f64;
15301       break;
15302     default:
15303       VT = MVT::v4i32;
15304       break;
15305     }
15306 
15307     Info.opc = ISD::INTRINSIC_W_CHAIN;
15308     Info.memVT = VT;
15309     Info.ptrVal = I.getArgOperand(0);
15310     Info.offset = -VT.getStoreSize()+1;
15311     Info.size = 2*VT.getStoreSize()-1;
15312     Info.align = Align(1);
15313     Info.flags = MachineMemOperand::MOLoad;
15314     return true;
15315   }
15316   case Intrinsic::ppc_altivec_stvx:
15317   case Intrinsic::ppc_altivec_stvxl:
15318   case Intrinsic::ppc_altivec_stvebx:
15319   case Intrinsic::ppc_altivec_stvehx:
15320   case Intrinsic::ppc_altivec_stvewx:
15321   case Intrinsic::ppc_vsx_stxvd2x:
15322   case Intrinsic::ppc_vsx_stxvw4x:
15323   case Intrinsic::ppc_vsx_stxvd2x_be:
15324   case Intrinsic::ppc_vsx_stxvw4x_be:
15325   case Intrinsic::ppc_vsx_stxvl:
15326   case Intrinsic::ppc_vsx_stxvll: {
15327     EVT VT;
15328     switch (Intrinsic) {
15329     case Intrinsic::ppc_altivec_stvebx:
15330       VT = MVT::i8;
15331       break;
15332     case Intrinsic::ppc_altivec_stvehx:
15333       VT = MVT::i16;
15334       break;
15335     case Intrinsic::ppc_altivec_stvewx:
15336       VT = MVT::i32;
15337       break;
15338     case Intrinsic::ppc_vsx_stxvd2x:
15339     case Intrinsic::ppc_vsx_stxvd2x_be:
15340       VT = MVT::v2f64;
15341       break;
15342     default:
15343       VT = MVT::v4i32;
15344       break;
15345     }
15346 
15347     Info.opc = ISD::INTRINSIC_VOID;
15348     Info.memVT = VT;
15349     Info.ptrVal = I.getArgOperand(1);
15350     Info.offset = -VT.getStoreSize()+1;
15351     Info.size = 2*VT.getStoreSize()-1;
15352     Info.align = Align(1);
15353     Info.flags = MachineMemOperand::MOStore;
15354     return true;
15355   }
15356   default:
15357     break;
15358   }
15359 
15360   return false;
15361 }
15362 
15363 /// It returns EVT::Other if the type should be determined using generic
15364 /// target-independent logic.
15365 EVT PPCTargetLowering::getOptimalMemOpType(
15366     const MemOp &Op, const AttributeList &FuncAttributes) const {
15367   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15368     // We should use Altivec/VSX loads and stores when available. For unaligned
15369     // addresses, unaligned VSX loads are only fast starting with the P8.
15370     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
15371         (Op.isAligned(Align(16)) ||
15372          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15373       return MVT::v4i32;
15374   }
15375 
15376   if (Subtarget.isPPC64()) {
15377     return MVT::i64;
15378   }
15379 
15380   return MVT::i32;
15381 }
15382 
15383 /// Returns true if it is beneficial to convert a load of a constant
15384 /// to just the constant itself.
15385 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15386                                                           Type *Ty) const {
15387   assert(Ty->isIntegerTy());
15388 
15389   unsigned BitSize = Ty->getPrimitiveSizeInBits();
15390   return !(BitSize == 0 || BitSize > 64);
15391 }
15392 
15393 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15394   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15395     return false;
15396   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
15397   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
15398   return NumBits1 == 64 && NumBits2 == 32;
15399 }
15400 
15401 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15402   if (!VT1.isInteger() || !VT2.isInteger())
15403     return false;
15404   unsigned NumBits1 = VT1.getSizeInBits();
15405   unsigned NumBits2 = VT2.getSizeInBits();
15406   return NumBits1 == 64 && NumBits2 == 32;
15407 }
15408 
15409 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15410   // Generally speaking, zexts are not free, but they are free when they can be
15411   // folded with other operations.
15412   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
15413     EVT MemVT = LD->getMemoryVT();
15414     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
15415          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
15416         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
15417          LD->getExtensionType() == ISD::ZEXTLOAD))
15418       return true;
15419   }
15420 
15421   // FIXME: Add other cases...
15422   //  - 32-bit shifts with a zext to i64
15423   //  - zext after ctlz, bswap, etc.
15424   //  - zext after and by a constant mask
15425 
15426   return TargetLowering::isZExtFree(Val, VT2);
15427 }
15428 
15429 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
15430   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
15431          "invalid fpext types");
15432   // Extending to float128 is not free.
15433   if (DestVT == MVT::f128)
15434     return false;
15435   return true;
15436 }
15437 
15438 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15439   return isInt<16>(Imm) || isUInt<16>(Imm);
15440 }
15441 
15442 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15443   return isInt<16>(Imm) || isUInt<16>(Imm);
15444 }
15445 
15446 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
15447                                                        unsigned,
15448                                                        unsigned,
15449                                                        MachineMemOperand::Flags,
15450                                                        bool *Fast) const {
15451   if (DisablePPCUnaligned)
15452     return false;
15453 
15454   // PowerPC supports unaligned memory access for simple non-vector types.
15455   // Although accessing unaligned addresses is not as efficient as accessing
15456   // aligned addresses, it is generally more efficient than manual expansion,
15457   // and generally only traps for software emulation when crossing page
15458   // boundaries.
15459 
15460   if (!VT.isSimple())
15461     return false;
15462 
15463   if (VT.isFloatingPoint() && !VT.isVector() &&
15464       !Subtarget.allowsUnalignedFPAccess())
15465     return false;
15466 
15467   if (VT.getSimpleVT().isVector()) {
15468     if (Subtarget.hasVSX()) {
15469       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
15470           VT != MVT::v4f32 && VT != MVT::v4i32)
15471         return false;
15472     } else {
15473       return false;
15474     }
15475   }
15476 
15477   if (VT == MVT::ppcf128)
15478     return false;
15479 
15480   if (Fast)
15481     *Fast = true;
15482 
15483   return true;
15484 }
15485 
15486 bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
15487                                                SDValue C) const {
15488   // Check integral scalar types.
15489   if (!VT.isScalarInteger())
15490     return false;
15491   if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
15492     if (!ConstNode->getAPIntValue().isSignedIntN(64))
15493       return false;
15494     // This transformation will generate >= 2 operations. But the following
15495     // cases will generate <= 2 instructions during ISEL. So exclude them.
15496     // 1. If the constant multiplier fits 16 bits, it can be handled by one
15497     // HW instruction, ie. MULLI
15498     // 2. If the multiplier after shifted fits 16 bits, an extra shift
15499     // instruction is needed than case 1, ie. MULLI and RLDICR
15500     int64_t Imm = ConstNode->getSExtValue();
15501     unsigned Shift = countTrailingZeros<uint64_t>(Imm);
15502     Imm >>= Shift;
15503     if (isInt<16>(Imm))
15504       return false;
15505     uint64_t UImm = static_cast<uint64_t>(Imm);
15506     if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) ||
15507         isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm))
15508       return true;
15509   }
15510   return false;
15511 }
15512 
15513 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15514                                                    EVT VT) const {
15515   return isFMAFasterThanFMulAndFAdd(
15516       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
15517 }
15518 
15519 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
15520                                                    Type *Ty) const {
15521   switch (Ty->getScalarType()->getTypeID()) {
15522   case Type::FloatTyID:
15523   case Type::DoubleTyID:
15524     return true;
15525   case Type::FP128TyID:
15526     return Subtarget.hasP9Vector();
15527   default:
15528     return false;
15529   }
15530 }
15531 
15532 // FIXME: add more patterns which are not profitable to hoist.
15533 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
15534   if (!I->hasOneUse())
15535     return true;
15536 
15537   Instruction *User = I->user_back();
15538   assert(User && "A single use instruction with no uses.");
15539 
15540   switch (I->getOpcode()) {
15541   case Instruction::FMul: {
15542     // Don't break FMA, PowerPC prefers FMA.
15543     if (User->getOpcode() != Instruction::FSub &&
15544         User->getOpcode() != Instruction::FAdd)
15545       return true;
15546 
15547     const TargetOptions &Options = getTargetMachine().Options;
15548     const Function *F = I->getFunction();
15549     const DataLayout &DL = F->getParent()->getDataLayout();
15550     Type *Ty = User->getOperand(0)->getType();
15551 
15552     return !(
15553         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
15554         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
15555         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
15556   }
15557   case Instruction::Load: {
15558     // Don't break "store (load float*)" pattern, this pattern will be combined
15559     // to "store (load int32)" in later InstCombine pass. See function
15560     // combineLoadToOperationType. On PowerPC, loading a float point takes more
15561     // cycles than loading a 32 bit integer.
15562     LoadInst *LI = cast<LoadInst>(I);
15563     // For the loads that combineLoadToOperationType does nothing, like
15564     // ordered load, it should be profitable to hoist them.
15565     // For swifterror load, it can only be used for pointer to pointer type, so
15566     // later type check should get rid of this case.
15567     if (!LI->isUnordered())
15568       return true;
15569 
15570     if (User->getOpcode() != Instruction::Store)
15571       return true;
15572 
15573     if (I->getType()->getTypeID() != Type::FloatTyID)
15574       return true;
15575 
15576     return false;
15577   }
15578   default:
15579     return true;
15580   }
15581   return true;
15582 }
15583 
15584 const MCPhysReg *
15585 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
15586   // LR is a callee-save register, but we must treat it as clobbered by any call
15587   // site. Hence we include LR in the scratch registers, which are in turn added
15588   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
15589   // to CTR, which is used by any indirect call.
15590   static const MCPhysReg ScratchRegs[] = {
15591     PPC::X12, PPC::LR8, PPC::CTR8, 0
15592   };
15593 
15594   return ScratchRegs;
15595 }
15596 
15597 Register PPCTargetLowering::getExceptionPointerRegister(
15598     const Constant *PersonalityFn) const {
15599   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
15600 }
15601 
15602 Register PPCTargetLowering::getExceptionSelectorRegister(
15603     const Constant *PersonalityFn) const {
15604   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
15605 }
15606 
15607 bool
15608 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
15609                      EVT VT , unsigned DefinedValues) const {
15610   if (VT == MVT::v2i64)
15611     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
15612 
15613   if (Subtarget.hasVSX())
15614     return true;
15615 
15616   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
15617 }
15618 
15619 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
15620   if (DisableILPPref || Subtarget.enableMachineScheduler())
15621     return TargetLowering::getSchedulingPreference(N);
15622 
15623   return Sched::ILP;
15624 }
15625 
15626 // Create a fast isel object.
15627 FastISel *
15628 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
15629                                   const TargetLibraryInfo *LibInfo) const {
15630   return PPC::createFastISel(FuncInfo, LibInfo);
15631 }
15632 
15633 // 'Inverted' means the FMA opcode after negating one multiplicand.
15634 // For example, (fma -a b c) = (fnmsub a b c)
15635 static unsigned invertFMAOpcode(unsigned Opc) {
15636   switch (Opc) {
15637   default:
15638     llvm_unreachable("Invalid FMA opcode for PowerPC!");
15639   case ISD::FMA:
15640     return PPCISD::FNMSUB;
15641   case PPCISD::FNMSUB:
15642     return ISD::FMA;
15643   }
15644 }
15645 
15646 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
15647                                                 bool LegalOps, bool OptForSize,
15648                                                 NegatibleCost &Cost,
15649                                                 unsigned Depth) const {
15650   if (Depth > SelectionDAG::MaxRecursionDepth)
15651     return SDValue();
15652 
15653   unsigned Opc = Op.getOpcode();
15654   EVT VT = Op.getValueType();
15655   SDNodeFlags Flags = Op.getNode()->getFlags();
15656 
15657   switch (Opc) {
15658   case PPCISD::FNMSUB:
15659     if (!Op.hasOneUse() || !isTypeLegal(VT))
15660       break;
15661 
15662     const TargetOptions &Options = getTargetMachine().Options;
15663     SDValue N0 = Op.getOperand(0);
15664     SDValue N1 = Op.getOperand(1);
15665     SDValue N2 = Op.getOperand(2);
15666     SDLoc Loc(Op);
15667 
15668     NegatibleCost N2Cost = NegatibleCost::Expensive;
15669     SDValue NegN2 =
15670         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
15671 
15672     if (!NegN2)
15673       return SDValue();
15674 
15675     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
15676     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
15677     // These transformations may change sign of zeroes. For example,
15678     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
15679     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
15680       // Try and choose the cheaper one to negate.
15681       NegatibleCost N0Cost = NegatibleCost::Expensive;
15682       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
15683                                            N0Cost, Depth + 1);
15684 
15685       NegatibleCost N1Cost = NegatibleCost::Expensive;
15686       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
15687                                            N1Cost, Depth + 1);
15688 
15689       if (NegN0 && N0Cost <= N1Cost) {
15690         Cost = std::min(N0Cost, N2Cost);
15691         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
15692       } else if (NegN1) {
15693         Cost = std::min(N1Cost, N2Cost);
15694         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
15695       }
15696     }
15697 
15698     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
15699     if (isOperationLegal(ISD::FMA, VT)) {
15700       Cost = N2Cost;
15701       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
15702     }
15703 
15704     break;
15705   }
15706 
15707   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
15708                                               Cost, Depth);
15709 }
15710 
15711 // Override to enable LOAD_STACK_GUARD lowering on Linux.
15712 bool PPCTargetLowering::useLoadStackGuardNode() const {
15713   if (!Subtarget.isTargetLinux())
15714     return TargetLowering::useLoadStackGuardNode();
15715   return true;
15716 }
15717 
15718 // Override to disable global variable loading on Linux.
15719 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
15720   if (!Subtarget.isTargetLinux())
15721     return TargetLowering::insertSSPDeclarations(M);
15722 }
15723 
15724 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
15725                                      bool ForCodeSize) const {
15726   if (!VT.isSimple() || !Subtarget.hasVSX())
15727     return false;
15728 
15729   switch(VT.getSimpleVT().SimpleTy) {
15730   default:
15731     // For FP types that are currently not supported by PPC backend, return
15732     // false. Examples: f16, f80.
15733     return false;
15734   case MVT::f32:
15735   case MVT::f64:
15736     if (Subtarget.hasPrefixInstrs()) {
15737       // With prefixed instructions, we can materialize anything that can be
15738       // represented with a 32-bit immediate, not just positive zero.
15739       APFloat APFloatOfImm = Imm;
15740       return convertToNonDenormSingle(APFloatOfImm);
15741     }
15742     LLVM_FALLTHROUGH;
15743   case MVT::ppcf128:
15744     return Imm.isPosZero();
15745   }
15746 }
15747 
15748 // For vector shift operation op, fold
15749 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
15750 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
15751                                   SelectionDAG &DAG) {
15752   SDValue N0 = N->getOperand(0);
15753   SDValue N1 = N->getOperand(1);
15754   EVT VT = N0.getValueType();
15755   unsigned OpSizeInBits = VT.getScalarSizeInBits();
15756   unsigned Opcode = N->getOpcode();
15757   unsigned TargetOpcode;
15758 
15759   switch (Opcode) {
15760   default:
15761     llvm_unreachable("Unexpected shift operation");
15762   case ISD::SHL:
15763     TargetOpcode = PPCISD::SHL;
15764     break;
15765   case ISD::SRL:
15766     TargetOpcode = PPCISD::SRL;
15767     break;
15768   case ISD::SRA:
15769     TargetOpcode = PPCISD::SRA;
15770     break;
15771   }
15772 
15773   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
15774       N1->getOpcode() == ISD::AND)
15775     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
15776       if (Mask->getZExtValue() == OpSizeInBits - 1)
15777         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
15778 
15779   return SDValue();
15780 }
15781 
15782 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
15783   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15784     return Value;
15785 
15786   SDValue N0 = N->getOperand(0);
15787   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
15788   if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
15789       N0.getOpcode() != ISD::SIGN_EXTEND ||
15790       N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr ||
15791       N->getValueType(0) != MVT::i64)
15792     return SDValue();
15793 
15794   // We can't save an operation here if the value is already extended, and
15795   // the existing shift is easier to combine.
15796   SDValue ExtsSrc = N0.getOperand(0);
15797   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
15798       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
15799     return SDValue();
15800 
15801   SDLoc DL(N0);
15802   SDValue ShiftBy = SDValue(CN1, 0);
15803   // We want the shift amount to be i32 on the extswli, but the shift could
15804   // have an i64.
15805   if (ShiftBy.getValueType() == MVT::i64)
15806     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
15807 
15808   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
15809                          ShiftBy);
15810 }
15811 
15812 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
15813   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15814     return Value;
15815 
15816   return SDValue();
15817 }
15818 
15819 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
15820   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15821     return Value;
15822 
15823   return SDValue();
15824 }
15825 
15826 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
15827 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
15828 // When C is zero, the equation (addi Z, -C) can be simplified to Z
15829 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
15830 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
15831                                  const PPCSubtarget &Subtarget) {
15832   if (!Subtarget.isPPC64())
15833     return SDValue();
15834 
15835   SDValue LHS = N->getOperand(0);
15836   SDValue RHS = N->getOperand(1);
15837 
15838   auto isZextOfCompareWithConstant = [](SDValue Op) {
15839     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
15840         Op.getValueType() != MVT::i64)
15841       return false;
15842 
15843     SDValue Cmp = Op.getOperand(0);
15844     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
15845         Cmp.getOperand(0).getValueType() != MVT::i64)
15846       return false;
15847 
15848     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
15849       int64_t NegConstant = 0 - Constant->getSExtValue();
15850       // Due to the limitations of the addi instruction,
15851       // -C is required to be [-32768, 32767].
15852       return isInt<16>(NegConstant);
15853     }
15854 
15855     return false;
15856   };
15857 
15858   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
15859   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
15860 
15861   // If there is a pattern, canonicalize a zext operand to the RHS.
15862   if (LHSHasPattern && !RHSHasPattern)
15863     std::swap(LHS, RHS);
15864   else if (!LHSHasPattern && !RHSHasPattern)
15865     return SDValue();
15866 
15867   SDLoc DL(N);
15868   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
15869   SDValue Cmp = RHS.getOperand(0);
15870   SDValue Z = Cmp.getOperand(0);
15871   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
15872 
15873   assert(Constant && "Constant Should not be a null pointer.");
15874   int64_t NegConstant = 0 - Constant->getSExtValue();
15875 
15876   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
15877   default: break;
15878   case ISD::SETNE: {
15879     //                                 when C == 0
15880     //                             --> addze X, (addic Z, -1).carry
15881     //                            /
15882     // add X, (zext(setne Z, C))--
15883     //                            \    when -32768 <= -C <= 32767 && C != 0
15884     //                             --> addze X, (addic (addi Z, -C), -1).carry
15885     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15886                               DAG.getConstant(NegConstant, DL, MVT::i64));
15887     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15888     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15889                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
15890     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15891                        SDValue(Addc.getNode(), 1));
15892     }
15893   case ISD::SETEQ: {
15894     //                                 when C == 0
15895     //                             --> addze X, (subfic Z, 0).carry
15896     //                            /
15897     // add X, (zext(sete  Z, C))--
15898     //                            \    when -32768 <= -C <= 32767 && C != 0
15899     //                             --> addze X, (subfic (addi Z, -C), 0).carry
15900     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15901                               DAG.getConstant(NegConstant, DL, MVT::i64));
15902     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15903     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15904                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
15905     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15906                        SDValue(Subc.getNode(), 1));
15907     }
15908   }
15909 
15910   return SDValue();
15911 }
15912 
15913 // Transform
15914 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
15915 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
15916 // In this case both C1 and C2 must be known constants.
15917 // C1+C2 must fit into a 34 bit signed integer.
15918 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
15919                                           const PPCSubtarget &Subtarget) {
15920   if (!Subtarget.isUsingPCRelativeCalls())
15921     return SDValue();
15922 
15923   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
15924   // If we find that node try to cast the Global Address and the Constant.
15925   SDValue LHS = N->getOperand(0);
15926   SDValue RHS = N->getOperand(1);
15927 
15928   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
15929     std::swap(LHS, RHS);
15930 
15931   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
15932     return SDValue();
15933 
15934   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
15935   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
15936   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
15937 
15938   // Check that both casts succeeded.
15939   if (!GSDN || !ConstNode)
15940     return SDValue();
15941 
15942   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
15943   SDLoc DL(GSDN);
15944 
15945   // The signed int offset needs to fit in 34 bits.
15946   if (!isInt<34>(NewOffset))
15947     return SDValue();
15948 
15949   // The new global address is a copy of the old global address except
15950   // that it has the updated Offset.
15951   SDValue GA =
15952       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
15953                                  NewOffset, GSDN->getTargetFlags());
15954   SDValue MatPCRel =
15955       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
15956   return MatPCRel;
15957 }
15958 
15959 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
15960   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
15961     return Value;
15962 
15963   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
15964     return Value;
15965 
15966   return SDValue();
15967 }
15968 
15969 // Detect TRUNCATE operations on bitcasts of float128 values.
15970 // What we are looking for here is the situtation where we extract a subset
15971 // of bits from a 128 bit float.
15972 // This can be of two forms:
15973 // 1) BITCAST of f128 feeding TRUNCATE
15974 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
15975 // The reason this is required is because we do not have a legal i128 type
15976 // and so we want to prevent having to store the f128 and then reload part
15977 // of it.
15978 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
15979                                            DAGCombinerInfo &DCI) const {
15980   // If we are using CRBits then try that first.
15981   if (Subtarget.useCRBits()) {
15982     // Check if CRBits did anything and return that if it did.
15983     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
15984       return CRTruncValue;
15985   }
15986 
15987   SDLoc dl(N);
15988   SDValue Op0 = N->getOperand(0);
15989 
15990   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
15991   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
15992     EVT VT = N->getValueType(0);
15993     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
15994       return SDValue();
15995     SDValue Sub = Op0.getOperand(0);
15996     if (Sub.getOpcode() == ISD::SUB) {
15997       SDValue SubOp0 = Sub.getOperand(0);
15998       SDValue SubOp1 = Sub.getOperand(1);
15999       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16000           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16001         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16002                                SubOp1.getOperand(0),
16003                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16004       }
16005     }
16006   }
16007 
16008   // Looking for a truncate of i128 to i64.
16009   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16010     return SDValue();
16011 
16012   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16013 
16014   // SRL feeding TRUNCATE.
16015   if (Op0.getOpcode() == ISD::SRL) {
16016     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16017     // The right shift has to be by 64 bits.
16018     if (!ConstNode || ConstNode->getZExtValue() != 64)
16019       return SDValue();
16020 
16021     // Switch the element number to extract.
16022     EltToExtract = EltToExtract ? 0 : 1;
16023     // Update Op0 past the SRL.
16024     Op0 = Op0.getOperand(0);
16025   }
16026 
16027   // BITCAST feeding a TRUNCATE possibly via SRL.
16028   if (Op0.getOpcode() == ISD::BITCAST &&
16029       Op0.getValueType() == MVT::i128 &&
16030       Op0.getOperand(0).getValueType() == MVT::f128) {
16031     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16032     return DCI.DAG.getNode(
16033         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16034         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16035   }
16036   return SDValue();
16037 }
16038 
16039 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16040   SelectionDAG &DAG = DCI.DAG;
16041 
16042   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16043   if (!ConstOpOrElement)
16044     return SDValue();
16045 
16046   // An imul is usually smaller than the alternative sequence for legal type.
16047   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16048       isOperationLegal(ISD::MUL, N->getValueType(0)))
16049     return SDValue();
16050 
16051   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16052     switch (this->Subtarget.getCPUDirective()) {
16053     default:
16054       // TODO: enhance the condition for subtarget before pwr8
16055       return false;
16056     case PPC::DIR_PWR8:
16057       //  type        mul     add    shl
16058       // scalar        4       1      1
16059       // vector        7       2      2
16060       return true;
16061     case PPC::DIR_PWR9:
16062     case PPC::DIR_PWR10:
16063     case PPC::DIR_PWR_FUTURE:
16064       //  type        mul     add    shl
16065       // scalar        5       2      2
16066       // vector        7       2      2
16067 
16068       // The cycle RATIO of related operations are showed as a table above.
16069       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16070       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16071       // are 4, it is always profitable; but for 3 instrs patterns
16072       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16073       // So we should only do it for vector type.
16074       return IsAddOne && IsNeg ? VT.isVector() : true;
16075     }
16076   };
16077 
16078   EVT VT = N->getValueType(0);
16079   SDLoc DL(N);
16080 
16081   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16082   bool IsNeg = MulAmt.isNegative();
16083   APInt MulAmtAbs = MulAmt.abs();
16084 
16085   if ((MulAmtAbs - 1).isPowerOf2()) {
16086     // (mul x, 2^N + 1) => (add (shl x, N), x)
16087     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16088 
16089     if (!IsProfitable(IsNeg, true, VT))
16090       return SDValue();
16091 
16092     SDValue Op0 = N->getOperand(0);
16093     SDValue Op1 =
16094         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16095                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16096     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16097 
16098     if (!IsNeg)
16099       return Res;
16100 
16101     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16102   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16103     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16104     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16105 
16106     if (!IsProfitable(IsNeg, false, VT))
16107       return SDValue();
16108 
16109     SDValue Op0 = N->getOperand(0);
16110     SDValue Op1 =
16111         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16112                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16113 
16114     if (!IsNeg)
16115       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16116     else
16117       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16118 
16119   } else {
16120     return SDValue();
16121   }
16122 }
16123 
16124 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16125 // in combiner since we need to check SD flags and other subtarget features.
16126 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16127                                           DAGCombinerInfo &DCI) const {
16128   SDValue N0 = N->getOperand(0);
16129   SDValue N1 = N->getOperand(1);
16130   SDValue N2 = N->getOperand(2);
16131   SDNodeFlags Flags = N->getFlags();
16132   EVT VT = N->getValueType(0);
16133   SelectionDAG &DAG = DCI.DAG;
16134   const TargetOptions &Options = getTargetMachine().Options;
16135   unsigned Opc = N->getOpcode();
16136   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16137   bool LegalOps = !DCI.isBeforeLegalizeOps();
16138   SDLoc Loc(N);
16139 
16140   if (!isOperationLegal(ISD::FMA, VT))
16141     return SDValue();
16142 
16143   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16144   // since (fnmsub a b c)=-0 while c-ab=+0.
16145   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16146     return SDValue();
16147 
16148   // (fma (fneg a) b c) => (fnmsub a b c)
16149   // (fnmsub (fneg a) b c) => (fma a b c)
16150   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16151     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16152 
16153   // (fma a (fneg b) c) => (fnmsub a b c)
16154   // (fnmsub a (fneg b) c) => (fma a b c)
16155   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16156     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16157 
16158   return SDValue();
16159 }
16160 
16161 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16162   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16163   if (!Subtarget.is64BitELFABI())
16164     return false;
16165 
16166   // If not a tail call then no need to proceed.
16167   if (!CI->isTailCall())
16168     return false;
16169 
16170   // If sibling calls have been disabled and tail-calls aren't guaranteed
16171   // there is no reason to duplicate.
16172   auto &TM = getTargetMachine();
16173   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16174     return false;
16175 
16176   // Can't tail call a function called indirectly, or if it has variadic args.
16177   const Function *Callee = CI->getCalledFunction();
16178   if (!Callee || Callee->isVarArg())
16179     return false;
16180 
16181   // Make sure the callee and caller calling conventions are eligible for tco.
16182   const Function *Caller = CI->getParent()->getParent();
16183   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
16184                                            CI->getCallingConv()))
16185       return false;
16186 
16187   // If the function is local then we have a good chance at tail-calling it
16188   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
16189 }
16190 
16191 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
16192   if (!Subtarget.hasVSX())
16193     return false;
16194   if (Subtarget.hasP9Vector() && VT == MVT::f128)
16195     return true;
16196   return VT == MVT::f32 || VT == MVT::f64 ||
16197     VT == MVT::v4f32 || VT == MVT::v2f64;
16198 }
16199 
16200 bool PPCTargetLowering::
16201 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
16202   const Value *Mask = AndI.getOperand(1);
16203   // If the mask is suitable for andi. or andis. we should sink the and.
16204   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
16205     // Can't handle constants wider than 64-bits.
16206     if (CI->getBitWidth() > 64)
16207       return false;
16208     int64_t ConstVal = CI->getZExtValue();
16209     return isUInt<16>(ConstVal) ||
16210       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16211   }
16212 
16213   // For non-constant masks, we can always use the record-form and.
16214   return true;
16215 }
16216 
16217 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16218 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16219 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16220 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16221 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
16222 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16223   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
16224   assert(Subtarget.hasP9Altivec() &&
16225          "Only combine this when P9 altivec supported!");
16226   EVT VT = N->getValueType(0);
16227   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16228     return SDValue();
16229 
16230   SelectionDAG &DAG = DCI.DAG;
16231   SDLoc dl(N);
16232   if (N->getOperand(0).getOpcode() == ISD::SUB) {
16233     // Even for signed integers, if it's known to be positive (as signed
16234     // integer) due to zero-extended inputs.
16235     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16236     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16237     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16238          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16239         (SubOpcd1 == ISD::ZERO_EXTEND ||
16240          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16241       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16242                          N->getOperand(0)->getOperand(0),
16243                          N->getOperand(0)->getOperand(1),
16244                          DAG.getTargetConstant(0, dl, MVT::i32));
16245     }
16246 
16247     // For type v4i32, it can be optimized with xvnegsp + vabsduw
16248     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16249         N->getOperand(0).hasOneUse()) {
16250       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16251                          N->getOperand(0)->getOperand(0),
16252                          N->getOperand(0)->getOperand(1),
16253                          DAG.getTargetConstant(1, dl, MVT::i32));
16254     }
16255   }
16256 
16257   return SDValue();
16258 }
16259 
16260 // For type v4i32/v8ii16/v16i8, transform
16261 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16262 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16263 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16264 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
16265 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16266                                           DAGCombinerInfo &DCI) const {
16267   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
16268   assert(Subtarget.hasP9Altivec() &&
16269          "Only combine this when P9 altivec supported!");
16270 
16271   SelectionDAG &DAG = DCI.DAG;
16272   SDLoc dl(N);
16273   SDValue Cond = N->getOperand(0);
16274   SDValue TrueOpnd = N->getOperand(1);
16275   SDValue FalseOpnd = N->getOperand(2);
16276   EVT VT = N->getOperand(1).getValueType();
16277 
16278   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
16279       FalseOpnd.getOpcode() != ISD::SUB)
16280     return SDValue();
16281 
16282   // ABSD only available for type v4i32/v8i16/v16i8
16283   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16284     return SDValue();
16285 
16286   // At least to save one more dependent computation
16287   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
16288     return SDValue();
16289 
16290   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
16291 
16292   // Can only handle unsigned comparison here
16293   switch (CC) {
16294   default:
16295     return SDValue();
16296   case ISD::SETUGT:
16297   case ISD::SETUGE:
16298     break;
16299   case ISD::SETULT:
16300   case ISD::SETULE:
16301     std::swap(TrueOpnd, FalseOpnd);
16302     break;
16303   }
16304 
16305   SDValue CmpOpnd1 = Cond.getOperand(0);
16306   SDValue CmpOpnd2 = Cond.getOperand(1);
16307 
16308   // SETCC CmpOpnd1 CmpOpnd2 cond
16309   // TrueOpnd = CmpOpnd1 - CmpOpnd2
16310   // FalseOpnd = CmpOpnd2 - CmpOpnd1
16311   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
16312       TrueOpnd.getOperand(1) == CmpOpnd2 &&
16313       FalseOpnd.getOperand(0) == CmpOpnd2 &&
16314       FalseOpnd.getOperand(1) == CmpOpnd1) {
16315     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
16316                        CmpOpnd1, CmpOpnd2,
16317                        DAG.getTargetConstant(0, dl, MVT::i32));
16318   }
16319 
16320   return SDValue();
16321 }
16322