1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSectionXCOFF.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <list>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "ppc-lowering"
105 
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 
121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123 
124 // TODO - Remove this option if soft fp128 has been fully supported .
125 static cl::opt<bool>
126     EnableSoftFP128("enable-soft-fp128",
127                     cl::desc("temp option to enable soft fp128"), cl::Hidden);
128 
129 STATISTIC(NumTailCalls, "Number of tail calls");
130 STATISTIC(NumSiblingCalls, "Number of sibling calls");
131 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
132 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
133 
134 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
135 
136 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
137 
138 // FIXME: Remove this once the bug has been fixed!
139 extern cl::opt<bool> ANDIGlueBug;
140 
141 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
142                                      const PPCSubtarget &STI)
143     : TargetLowering(TM), Subtarget(STI) {
144   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
145   // arguments are at least 4/8 bytes aligned.
146   bool isPPC64 = Subtarget.isPPC64();
147   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
148 
149   // Set up the register classes.
150   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
151   if (!useSoftFloat()) {
152     if (hasSPE()) {
153       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
154       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
155     } else {
156       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
157       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
158     }
159   }
160 
161   // Match BITREVERSE to customized fast code sequence in the td file.
162   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
163   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
164 
165   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
166   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
167 
168   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
169   for (MVT VT : MVT::integer_valuetypes()) {
170     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
171     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
172   }
173 
174   if (Subtarget.isISA3_0()) {
175     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
176     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
177     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
178     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
179   } else {
180     // No extending loads from f16 or HW conversions back and forth.
181     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
182     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
183     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
184     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
185     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
186     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
187     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
188     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
189   }
190 
191   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
192 
193   // PowerPC has pre-inc load and store's.
194   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
195   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
196   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
197   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
198   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
199   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
200   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
201   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
202   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
203   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
204   if (!Subtarget.hasSPE()) {
205     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
206     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
207     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
208     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
209   }
210 
211   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
212   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
213   for (MVT VT : ScalarIntVTs) {
214     setOperationAction(ISD::ADDC, VT, Legal);
215     setOperationAction(ISD::ADDE, VT, Legal);
216     setOperationAction(ISD::SUBC, VT, Legal);
217     setOperationAction(ISD::SUBE, VT, Legal);
218   }
219 
220   if (Subtarget.useCRBits()) {
221     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
222 
223     if (isPPC64 || Subtarget.hasFPCVT()) {
224       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
225       AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
226                         isPPC64 ? MVT::i64 : MVT::i32);
227       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
228       AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
229                         isPPC64 ? MVT::i64 : MVT::i32);
230 
231       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
232       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
233                          isPPC64 ? MVT::i64 : MVT::i32);
234       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
235       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
236                         isPPC64 ? MVT::i64 : MVT::i32);
237 
238       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote);
239       AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1,
240                         isPPC64 ? MVT::i64 : MVT::i32);
241       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote);
242       AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1,
243                         isPPC64 ? MVT::i64 : MVT::i32);
244 
245       setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
246       AddPromotedToType(ISD::FP_TO_SINT, MVT::i1,
247                         isPPC64 ? MVT::i64 : MVT::i32);
248       setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
249       AddPromotedToType(ISD::FP_TO_UINT, MVT::i1,
250                         isPPC64 ? MVT::i64 : MVT::i32);
251     } else {
252       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
253       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
254       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
255       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
256     }
257 
258     // PowerPC does not support direct load/store of condition registers.
259     setOperationAction(ISD::LOAD, MVT::i1, Custom);
260     setOperationAction(ISD::STORE, MVT::i1, Custom);
261 
262     // FIXME: Remove this once the ANDI glue bug is fixed:
263     if (ANDIGlueBug)
264       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
265 
266     for (MVT VT : MVT::integer_valuetypes()) {
267       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
268       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
269       setTruncStoreAction(VT, MVT::i1, Expand);
270     }
271 
272     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
273   }
274 
275   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
276   // PPC (the libcall is not available).
277   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
278   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
279   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom);
280   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom);
281 
282   // We do not currently implement these libm ops for PowerPC.
283   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
284   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
285   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
286   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
287   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
288   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
289 
290   // PowerPC has no SREM/UREM instructions unless we are on P9
291   // On P9 we may use a hardware instruction to compute the remainder.
292   // When the result of both the remainder and the division is required it is
293   // more efficient to compute the remainder from the result of the division
294   // rather than use the remainder instruction. The instructions are legalized
295   // directly because the DivRemPairsPass performs the transformation at the IR
296   // level.
297   if (Subtarget.isISA3_0()) {
298     setOperationAction(ISD::SREM, MVT::i32, Legal);
299     setOperationAction(ISD::UREM, MVT::i32, Legal);
300     setOperationAction(ISD::SREM, MVT::i64, Legal);
301     setOperationAction(ISD::UREM, MVT::i64, Legal);
302   } else {
303     setOperationAction(ISD::SREM, MVT::i32, Expand);
304     setOperationAction(ISD::UREM, MVT::i32, Expand);
305     setOperationAction(ISD::SREM, MVT::i64, Expand);
306     setOperationAction(ISD::UREM, MVT::i64, Expand);
307   }
308 
309   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
310   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
311   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
312   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
313   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
314   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
315   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
316   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
317   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
318 
319   // Handle constrained floating-point operations of scalar.
320   // TODO: Handle SPE specific operation.
321   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
322   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
323   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
324   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
325   setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
326   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
327 
328   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
329   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
330   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
331   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
332   setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
333   if (Subtarget.hasVSX()) {
334     setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal);
335     setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal);
336   }
337 
338   if (Subtarget.hasFSQRT()) {
339     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
340     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
341   }
342 
343   if (Subtarget.hasFPRND()) {
344     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
345     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
346     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
347     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
348 
349     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
350     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
351     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
352     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
353   }
354 
355   // We don't support sin/cos/sqrt/fmod/pow
356   setOperationAction(ISD::FSIN , MVT::f64, Expand);
357   setOperationAction(ISD::FCOS , MVT::f64, Expand);
358   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
359   setOperationAction(ISD::FREM , MVT::f64, Expand);
360   setOperationAction(ISD::FPOW , MVT::f64, Expand);
361   setOperationAction(ISD::FSIN , MVT::f32, Expand);
362   setOperationAction(ISD::FCOS , MVT::f32, Expand);
363   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
364   setOperationAction(ISD::FREM , MVT::f32, Expand);
365   setOperationAction(ISD::FPOW , MVT::f32, Expand);
366   if (Subtarget.hasSPE()) {
367     setOperationAction(ISD::FMA  , MVT::f64, Expand);
368     setOperationAction(ISD::FMA  , MVT::f32, Expand);
369   } else {
370     setOperationAction(ISD::FMA  , MVT::f64, Legal);
371     setOperationAction(ISD::FMA  , MVT::f32, Legal);
372   }
373 
374   if (Subtarget.hasSPE())
375     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
376 
377   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
378 
379   // If we're enabling GP optimizations, use hardware square root
380   if (!Subtarget.hasFSQRT() &&
381       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
382         Subtarget.hasFRE()))
383     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
384 
385   if (!Subtarget.hasFSQRT() &&
386       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
387         Subtarget.hasFRES()))
388     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
389 
390   if (Subtarget.hasFCPSGN()) {
391     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
392     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
393   } else {
394     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
395     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
396   }
397 
398   if (Subtarget.hasFPRND()) {
399     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
400     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
401     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
402     setOperationAction(ISD::FROUND, MVT::f64, Legal);
403 
404     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
405     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
406     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
407     setOperationAction(ISD::FROUND, MVT::f32, Legal);
408   }
409 
410   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
411   // to speed up scalar BSWAP64.
412   // CTPOP or CTTZ were introduced in P8/P9 respectively
413   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
414   if (Subtarget.hasP9Vector())
415     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
416   else
417     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
418   if (Subtarget.isISA3_0()) {
419     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
420     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
421   } else {
422     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
423     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
424   }
425 
426   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
427     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
428     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
429   } else {
430     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
431     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
432   }
433 
434   // PowerPC does not have ROTR
435   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
436   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
437 
438   if (!Subtarget.useCRBits()) {
439     // PowerPC does not have Select
440     setOperationAction(ISD::SELECT, MVT::i32, Expand);
441     setOperationAction(ISD::SELECT, MVT::i64, Expand);
442     setOperationAction(ISD::SELECT, MVT::f32, Expand);
443     setOperationAction(ISD::SELECT, MVT::f64, Expand);
444   }
445 
446   // PowerPC wants to turn select_cc of FP into fsel when possible.
447   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
448   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
449 
450   // PowerPC wants to optimize integer setcc a bit
451   if (!Subtarget.useCRBits())
452     setOperationAction(ISD::SETCC, MVT::i32, Custom);
453 
454   if (Subtarget.hasFPU()) {
455     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
456     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
457     setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
458 
459     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
460     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
461     setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
462   }
463 
464   // PowerPC does not have BRCOND which requires SetCC
465   if (!Subtarget.useCRBits())
466     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
467 
468   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
469 
470   if (Subtarget.hasSPE()) {
471     // SPE has built-in conversions
472     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
473     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
474     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
475     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
476     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
477     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
478   } else {
479     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
480     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
481     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
482 
483     // PowerPC does not have [U|S]INT_TO_FP
484     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
485     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
486     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
487     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
488   }
489 
490   if (Subtarget.hasDirectMove() && isPPC64) {
491     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
492     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
493     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
494     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
495     if (TM.Options.UnsafeFPMath) {
496       setOperationAction(ISD::LRINT, MVT::f64, Legal);
497       setOperationAction(ISD::LRINT, MVT::f32, Legal);
498       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
499       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
500       setOperationAction(ISD::LROUND, MVT::f64, Legal);
501       setOperationAction(ISD::LROUND, MVT::f32, Legal);
502       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
503       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
504     }
505   } else {
506     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
507     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
508     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
509     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
510   }
511 
512   // We cannot sextinreg(i1).  Expand to shifts.
513   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
514 
515   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
516   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
517   // support continuation, user-level threading, and etc.. As a result, no
518   // other SjLj exception interfaces are implemented and please don't build
519   // your own exception handling based on them.
520   // LLVM/Clang supports zero-cost DWARF exception handling.
521   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
522   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
523 
524   // We want to legalize GlobalAddress and ConstantPool nodes into the
525   // appropriate instructions to materialize the address.
526   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
527   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
528   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
529   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
530   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
531   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
532   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
533   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
534   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
535   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
536 
537   // TRAP is legal.
538   setOperationAction(ISD::TRAP, MVT::Other, Legal);
539 
540   // TRAMPOLINE is custom lowered.
541   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
542   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
543 
544   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
545   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
546 
547   if (Subtarget.is64BitELFABI()) {
548     // VAARG always uses double-word chunks, so promote anything smaller.
549     setOperationAction(ISD::VAARG, MVT::i1, Promote);
550     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
551     setOperationAction(ISD::VAARG, MVT::i8, Promote);
552     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
553     setOperationAction(ISD::VAARG, MVT::i16, Promote);
554     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
555     setOperationAction(ISD::VAARG, MVT::i32, Promote);
556     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
557     setOperationAction(ISD::VAARG, MVT::Other, Expand);
558   } else if (Subtarget.is32BitELFABI()) {
559     // VAARG is custom lowered with the 32-bit SVR4 ABI.
560     setOperationAction(ISD::VAARG, MVT::Other, Custom);
561     setOperationAction(ISD::VAARG, MVT::i64, Custom);
562   } else
563     setOperationAction(ISD::VAARG, MVT::Other, Expand);
564 
565   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
566   if (Subtarget.is32BitELFABI())
567     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
568   else
569     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
570 
571   // Use the default implementation.
572   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
573   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
574   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
575   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
576   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
577   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
578   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
579   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
580   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
581 
582   // We want to custom lower some of our intrinsics.
583   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
584 
585   // To handle counter-based loop conditions.
586   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
587 
588   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
589   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
590   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
591   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
592 
593   // Comparisons that require checking two conditions.
594   if (Subtarget.hasSPE()) {
595     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
596     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
597     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
598     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
599   }
600   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
601   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
602   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
603   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
604   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
605   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
606   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
607   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
608   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
609   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
610   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
611   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
612 
613   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
614   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
615 
616   if (Subtarget.has64BitSupport()) {
617     // They also have instructions for converting between i64 and fp.
618     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
619     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
620     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
621     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
622     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
623     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
624     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
625     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
626     // This is just the low 32 bits of a (signed) fp->i64 conversion.
627     // We cannot do this with Promote because i64 is not a legal type.
628     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
629     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
630 
631     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
632       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
633       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
634     }
635   } else {
636     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
637     if (Subtarget.hasSPE()) {
638       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
639       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
640     } else {
641       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
642       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
643     }
644   }
645 
646   // With the instructions enabled under FPCVT, we can do everything.
647   if (Subtarget.hasFPCVT()) {
648     if (Subtarget.has64BitSupport()) {
649       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
650       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
651       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
652       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
653       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
654       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
655       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
656       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
657     }
658 
659     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
660     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
661     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
662     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
663     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
664     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
665     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
666     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
667   }
668 
669   if (Subtarget.use64BitRegs()) {
670     // 64-bit PowerPC implementations can support i64 types directly
671     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
672     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
673     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
674     // 64-bit PowerPC wants to expand i128 shifts itself.
675     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
676     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
677     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
678   } else {
679     // 32-bit PowerPC wants to expand i64 shifts itself.
680     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
681     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
682     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
683   }
684 
685   // PowerPC has better expansions for funnel shifts than the generic
686   // TargetLowering::expandFunnelShift.
687   if (Subtarget.has64BitSupport()) {
688     setOperationAction(ISD::FSHL, MVT::i64, Custom);
689     setOperationAction(ISD::FSHR, MVT::i64, Custom);
690   }
691   setOperationAction(ISD::FSHL, MVT::i32, Custom);
692   setOperationAction(ISD::FSHR, MVT::i32, Custom);
693 
694   if (Subtarget.hasVSX()) {
695     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
696     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
697     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
698     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
699   }
700 
701   if (Subtarget.hasAltivec()) {
702     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
703       setOperationAction(ISD::SADDSAT, VT, Legal);
704       setOperationAction(ISD::SSUBSAT, VT, Legal);
705       setOperationAction(ISD::UADDSAT, VT, Legal);
706       setOperationAction(ISD::USUBSAT, VT, Legal);
707     }
708     // First set operation action for all vector types to expand. Then we
709     // will selectively turn on ones that can be effectively codegen'd.
710     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
711       // add/sub are legal for all supported vector VT's.
712       setOperationAction(ISD::ADD, VT, Legal);
713       setOperationAction(ISD::SUB, VT, Legal);
714 
715       // For v2i64, these are only valid with P8Vector. This is corrected after
716       // the loop.
717       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
718         setOperationAction(ISD::SMAX, VT, Legal);
719         setOperationAction(ISD::SMIN, VT, Legal);
720         setOperationAction(ISD::UMAX, VT, Legal);
721         setOperationAction(ISD::UMIN, VT, Legal);
722       }
723       else {
724         setOperationAction(ISD::SMAX, VT, Expand);
725         setOperationAction(ISD::SMIN, VT, Expand);
726         setOperationAction(ISD::UMAX, VT, Expand);
727         setOperationAction(ISD::UMIN, VT, Expand);
728       }
729 
730       if (Subtarget.hasVSX()) {
731         setOperationAction(ISD::FMAXNUM, VT, Legal);
732         setOperationAction(ISD::FMINNUM, VT, Legal);
733       }
734 
735       // Vector instructions introduced in P8
736       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
737         setOperationAction(ISD::CTPOP, VT, Legal);
738         setOperationAction(ISD::CTLZ, VT, Legal);
739       }
740       else {
741         setOperationAction(ISD::CTPOP, VT, Expand);
742         setOperationAction(ISD::CTLZ, VT, Expand);
743       }
744 
745       // Vector instructions introduced in P9
746       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
747         setOperationAction(ISD::CTTZ, VT, Legal);
748       else
749         setOperationAction(ISD::CTTZ, VT, Expand);
750 
751       // We promote all shuffles to v16i8.
752       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
753       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
754 
755       // We promote all non-typed operations to v4i32.
756       setOperationAction(ISD::AND   , VT, Promote);
757       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
758       setOperationAction(ISD::OR    , VT, Promote);
759       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
760       setOperationAction(ISD::XOR   , VT, Promote);
761       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
762       setOperationAction(ISD::LOAD  , VT, Promote);
763       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
764       setOperationAction(ISD::SELECT, VT, Promote);
765       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
766       setOperationAction(ISD::VSELECT, VT, Legal);
767       setOperationAction(ISD::SELECT_CC, VT, Promote);
768       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
769       setOperationAction(ISD::STORE, VT, Promote);
770       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
771 
772       // No other operations are legal.
773       setOperationAction(ISD::MUL , VT, Expand);
774       setOperationAction(ISD::SDIV, VT, Expand);
775       setOperationAction(ISD::SREM, VT, Expand);
776       setOperationAction(ISD::UDIV, VT, Expand);
777       setOperationAction(ISD::UREM, VT, Expand);
778       setOperationAction(ISD::FDIV, VT, Expand);
779       setOperationAction(ISD::FREM, VT, Expand);
780       setOperationAction(ISD::FNEG, VT, Expand);
781       setOperationAction(ISD::FSQRT, VT, Expand);
782       setOperationAction(ISD::FLOG, VT, Expand);
783       setOperationAction(ISD::FLOG10, VT, Expand);
784       setOperationAction(ISD::FLOG2, VT, Expand);
785       setOperationAction(ISD::FEXP, VT, Expand);
786       setOperationAction(ISD::FEXP2, VT, Expand);
787       setOperationAction(ISD::FSIN, VT, Expand);
788       setOperationAction(ISD::FCOS, VT, Expand);
789       setOperationAction(ISD::FABS, VT, Expand);
790       setOperationAction(ISD::FFLOOR, VT, Expand);
791       setOperationAction(ISD::FCEIL,  VT, Expand);
792       setOperationAction(ISD::FTRUNC, VT, Expand);
793       setOperationAction(ISD::FRINT,  VT, Expand);
794       setOperationAction(ISD::FNEARBYINT, VT, Expand);
795       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
796       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
797       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
798       setOperationAction(ISD::MULHU, VT, Expand);
799       setOperationAction(ISD::MULHS, VT, Expand);
800       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
801       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
802       setOperationAction(ISD::UDIVREM, VT, Expand);
803       setOperationAction(ISD::SDIVREM, VT, Expand);
804       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
805       setOperationAction(ISD::FPOW, VT, Expand);
806       setOperationAction(ISD::BSWAP, VT, Expand);
807       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
808       setOperationAction(ISD::ROTL, VT, Expand);
809       setOperationAction(ISD::ROTR, VT, Expand);
810 
811       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
812         setTruncStoreAction(VT, InnerVT, Expand);
813         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
814         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
815         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
816       }
817     }
818     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
819     if (!Subtarget.hasP8Vector()) {
820       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
821       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
822       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
823       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
824     }
825 
826     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
827     // with merges, splats, etc.
828     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
829 
830     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
831     // are cheap, so handle them before they get expanded to scalar.
832     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
833     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
834     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
835     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
836     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
837 
838     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
839     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
840     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
841     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
842     setOperationAction(ISD::SELECT, MVT::v4i32,
843                        Subtarget.useCRBits() ? Legal : Expand);
844     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
845     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
846     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
847     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
848     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
849     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
850     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
851     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
852     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
853     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
854     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
855     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
856     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
857 
858     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
859     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
860     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
861     if (Subtarget.hasAltivec())
862       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
863         setOperationAction(ISD::ROTL, VT, Legal);
864     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
865     if (Subtarget.hasP8Altivec())
866       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
867 
868     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
869     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
870     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
871     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
872 
873     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
874     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
875 
876     if (Subtarget.hasVSX()) {
877       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
878       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
879     }
880 
881     if (Subtarget.hasP8Altivec())
882       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
883     else
884       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
885 
886     if (Subtarget.isISA3_1()) {
887       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
888       setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
889       setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
890       setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
891       setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
892       setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
893       setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
894       setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
895       setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
896       setOperationAction(ISD::UREM, MVT::v2i64, Legal);
897       setOperationAction(ISD::SREM, MVT::v2i64, Legal);
898       setOperationAction(ISD::UREM, MVT::v4i32, Legal);
899       setOperationAction(ISD::SREM, MVT::v4i32, Legal);
900       setOperationAction(ISD::UREM, MVT::v1i128, Legal);
901       setOperationAction(ISD::SREM, MVT::v1i128, Legal);
902       setOperationAction(ISD::UDIV, MVT::v1i128, Legal);
903       setOperationAction(ISD::SDIV, MVT::v1i128, Legal);
904       setOperationAction(ISD::ROTL, MVT::v1i128, Legal);
905     }
906 
907     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
908     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
909 
910     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
911     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
912 
913     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
914     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
915     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
916     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
917 
918     // Altivec does not contain unordered floating-point compare instructions
919     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
920     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
921     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
922     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
923 
924     if (Subtarget.hasVSX()) {
925       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
926       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
927       if (Subtarget.hasP8Vector()) {
928         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
929         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
930       }
931       if (Subtarget.hasDirectMove() && isPPC64) {
932         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
933         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
934         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
935         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
936         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
937         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
938         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
939         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
940       }
941       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
942 
943       // The nearbyint variants are not allowed to raise the inexact exception
944       // so we can only code-gen them with unsafe math.
945       if (TM.Options.UnsafeFPMath) {
946         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
947         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
948       }
949 
950       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
951       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
952       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
953       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
954       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
955       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
956       setOperationAction(ISD::FROUND, MVT::f64, Legal);
957       setOperationAction(ISD::FRINT, MVT::f64, Legal);
958 
959       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
960       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
961       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
962       setOperationAction(ISD::FROUND, MVT::f32, Legal);
963       setOperationAction(ISD::FRINT, MVT::f32, Legal);
964 
965       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
966       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
967 
968       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
969       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
970 
971       // Share the Altivec comparison restrictions.
972       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
973       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
974       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
975       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
976 
977       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
978       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
979 
980       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
981 
982       if (Subtarget.hasP8Vector())
983         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
984 
985       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
986 
987       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
988       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
989       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
990 
991       if (Subtarget.hasP8Altivec()) {
992         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
993         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
994         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
995 
996         // 128 bit shifts can be accomplished via 3 instructions for SHL and
997         // SRL, but not for SRA because of the instructions available:
998         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
999         // doing
1000         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
1001         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
1002         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1003 
1004         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
1005       }
1006       else {
1007         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
1008         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
1009         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
1010 
1011         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
1012 
1013         // VSX v2i64 only supports non-arithmetic operations.
1014         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
1015         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
1016       }
1017 
1018       if (Subtarget.isISA3_1())
1019         setOperationAction(ISD::SETCC, MVT::v1i128, Legal);
1020       else
1021         setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1022 
1023       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1024       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1025       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
1026       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1027 
1028       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
1029 
1030       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
1031       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
1032       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
1033       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
1034       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1035       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1036       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1037       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1038 
1039       // Custom handling for partial vectors of integers converted to
1040       // floating point. We already have optimal handling for v2i32 through
1041       // the DAG combine, so those aren't necessary.
1042       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
1043       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
1044       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
1045       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
1046       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
1047       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
1048       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
1049       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
1050       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
1051       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1052       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
1053       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1054       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
1055       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
1056       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
1057       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
1058 
1059       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1060       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1061       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1062       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1063       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1064       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
1065 
1066       if (Subtarget.hasDirectMove())
1067         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1068       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1069 
1070       // Handle constrained floating-point operations of vector.
1071       // The predictor is `hasVSX` because altivec instruction has
1072       // no exception but VSX vector instruction has.
1073       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1074       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1075       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1076       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1077       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1078       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1079       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1080       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1081       setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
1082       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1083       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
1084       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1085       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1086 
1087       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1088       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1089       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1090       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1091       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1092       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1093       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1094       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1095       setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
1096       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1097       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
1098       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1099       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1100 
1101       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1102     }
1103 
1104     if (Subtarget.hasP8Altivec()) {
1105       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1106       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1107     }
1108 
1109     if (Subtarget.hasP9Vector()) {
1110       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1111       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1112 
1113       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1114       // SRL, but not for SRA because of the instructions available:
1115       // VS{RL} and VS{RL}O.
1116       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1117       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1118       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1119 
1120       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1121       setOperationAction(ISD::FADD, MVT::f128, Legal);
1122       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1123       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1124       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1125       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1126       // No extending loads to f128 on PPC.
1127       for (MVT FPT : MVT::fp_valuetypes())
1128         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1129       setOperationAction(ISD::FMA, MVT::f128, Legal);
1130       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1131       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1132       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1133       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1134       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1135       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1136 
1137       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1138       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1139       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1140       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1141       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1142       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1143 
1144       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1145       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1146       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1147       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1148       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1149       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1150       // No implementation for these ops for PowerPC.
1151       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1152       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1153       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1154       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1155       setOperationAction(ISD::FREM, MVT::f128, Expand);
1156 
1157       // Handle constrained floating-point operations of fp128
1158       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1159       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1160       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1161       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1162       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1163       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1164       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1165       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1166       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1167       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1168       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1169       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1170       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1171       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1172       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1173       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1174       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1175       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1176       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1177       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1178     } else if (Subtarget.hasAltivec() && EnableSoftFP128) {
1179       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1180 
1181       for (MVT FPT : MVT::fp_valuetypes())
1182         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1183 
1184       setOperationAction(ISD::LOAD, MVT::f128, Promote);
1185       setOperationAction(ISD::STORE, MVT::f128, Promote);
1186 
1187       AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32);
1188       AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32);
1189 
1190       // Set FADD/FSUB as libcall to avoid the legalizer to expand the
1191       // fp_to_uint and int_to_fp.
1192       setOperationAction(ISD::FADD, MVT::f128, LibCall);
1193       setOperationAction(ISD::FSUB, MVT::f128, LibCall);
1194 
1195       setOperationAction(ISD::FMUL, MVT::f128, Expand);
1196       setOperationAction(ISD::FDIV, MVT::f128, Expand);
1197       setOperationAction(ISD::FNEG, MVT::f128, Expand);
1198       setOperationAction(ISD::FABS, MVT::f128, Expand);
1199       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1200       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1201       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1202       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1203       setOperationAction(ISD::FREM, MVT::f128, Expand);
1204       setOperationAction(ISD::FSQRT, MVT::f128, Expand);
1205       setOperationAction(ISD::FMA, MVT::f128, Expand);
1206       setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1207 
1208       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1209       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1210 
1211       // Expand the fp_extend if the target type is fp128.
1212       setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
1213       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand);
1214 
1215       // Expand the fp_round if the source type is fp128.
1216       for (MVT VT : {MVT::f32, MVT::f64}) {
1217         setOperationAction(ISD::FP_ROUND, VT, Custom);
1218         setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
1219       }
1220     }
1221 
1222     if (Subtarget.hasP9Altivec()) {
1223       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1224       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1225 
1226       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1227       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1228       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1229       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1230       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1231       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1232       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1233     }
1234   }
1235 
1236   if (Subtarget.pairedVectorMemops()) {
1237     addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass);
1238     setOperationAction(ISD::LOAD, MVT::v256i1, Custom);
1239     setOperationAction(ISD::STORE, MVT::v256i1, Custom);
1240   }
1241   if (Subtarget.hasMMA()) {
1242     addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass);
1243     setOperationAction(ISD::LOAD, MVT::v512i1, Custom);
1244     setOperationAction(ISD::STORE, MVT::v512i1, Custom);
1245     setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom);
1246   }
1247 
1248   if (Subtarget.has64BitSupport())
1249     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1250 
1251   if (Subtarget.isISA3_1())
1252     setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1253 
1254   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1255 
1256   if (!isPPC64) {
1257     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1258     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1259   }
1260 
1261   setBooleanContents(ZeroOrOneBooleanContent);
1262 
1263   if (Subtarget.hasAltivec()) {
1264     // Altivec instructions set fields to all zeros or all ones.
1265     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1266   }
1267 
1268   if (!isPPC64) {
1269     // These libcalls are not available in 32-bit.
1270     setLibcallName(RTLIB::SHL_I128, nullptr);
1271     setLibcallName(RTLIB::SRL_I128, nullptr);
1272     setLibcallName(RTLIB::SRA_I128, nullptr);
1273   }
1274 
1275   if (!isPPC64)
1276     setMaxAtomicSizeInBitsSupported(32);
1277 
1278   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1279 
1280   // We have target-specific dag combine patterns for the following nodes:
1281   setTargetDAGCombine(ISD::ADD);
1282   setTargetDAGCombine(ISD::SHL);
1283   setTargetDAGCombine(ISD::SRA);
1284   setTargetDAGCombine(ISD::SRL);
1285   setTargetDAGCombine(ISD::MUL);
1286   setTargetDAGCombine(ISD::FMA);
1287   setTargetDAGCombine(ISD::SINT_TO_FP);
1288   setTargetDAGCombine(ISD::BUILD_VECTOR);
1289   if (Subtarget.hasFPCVT())
1290     setTargetDAGCombine(ISD::UINT_TO_FP);
1291   setTargetDAGCombine(ISD::LOAD);
1292   setTargetDAGCombine(ISD::STORE);
1293   setTargetDAGCombine(ISD::BR_CC);
1294   if (Subtarget.useCRBits())
1295     setTargetDAGCombine(ISD::BRCOND);
1296   setTargetDAGCombine(ISD::BSWAP);
1297   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1298   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1299   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1300 
1301   setTargetDAGCombine(ISD::SIGN_EXTEND);
1302   setTargetDAGCombine(ISD::ZERO_EXTEND);
1303   setTargetDAGCombine(ISD::ANY_EXTEND);
1304 
1305   setTargetDAGCombine(ISD::TRUNCATE);
1306   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1307 
1308 
1309   if (Subtarget.useCRBits()) {
1310     setTargetDAGCombine(ISD::TRUNCATE);
1311     setTargetDAGCombine(ISD::SETCC);
1312     setTargetDAGCombine(ISD::SELECT_CC);
1313   }
1314 
1315   if (Subtarget.hasP9Altivec()) {
1316     setTargetDAGCombine(ISD::ABS);
1317     setTargetDAGCombine(ISD::VSELECT);
1318   }
1319 
1320   setLibcallName(RTLIB::LOG_F128, "logf128");
1321   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1322   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1323   setLibcallName(RTLIB::EXP_F128, "expf128");
1324   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1325   setLibcallName(RTLIB::SIN_F128, "sinf128");
1326   setLibcallName(RTLIB::COS_F128, "cosf128");
1327   setLibcallName(RTLIB::POW_F128, "powf128");
1328   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1329   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1330   setLibcallName(RTLIB::REM_F128, "fmodf128");
1331   setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
1332   setLibcallName(RTLIB::CEIL_F128, "ceilf128");
1333   setLibcallName(RTLIB::FLOOR_F128, "floorf128");
1334   setLibcallName(RTLIB::TRUNC_F128, "truncf128");
1335   setLibcallName(RTLIB::ROUND_F128, "roundf128");
1336   setLibcallName(RTLIB::LROUND_F128, "lroundf128");
1337   setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
1338   setLibcallName(RTLIB::RINT_F128, "rintf128");
1339   setLibcallName(RTLIB::LRINT_F128, "lrintf128");
1340   setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
1341   setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
1342   setLibcallName(RTLIB::FMA_F128, "fmaf128");
1343 
1344   // With 32 condition bits, we don't need to sink (and duplicate) compares
1345   // aggressively in CodeGenPrep.
1346   if (Subtarget.useCRBits()) {
1347     setHasMultipleConditionRegisters();
1348     setJumpIsExpensive();
1349   }
1350 
1351   setMinFunctionAlignment(Align(4));
1352 
1353   switch (Subtarget.getCPUDirective()) {
1354   default: break;
1355   case PPC::DIR_970:
1356   case PPC::DIR_A2:
1357   case PPC::DIR_E500:
1358   case PPC::DIR_E500mc:
1359   case PPC::DIR_E5500:
1360   case PPC::DIR_PWR4:
1361   case PPC::DIR_PWR5:
1362   case PPC::DIR_PWR5X:
1363   case PPC::DIR_PWR6:
1364   case PPC::DIR_PWR6X:
1365   case PPC::DIR_PWR7:
1366   case PPC::DIR_PWR8:
1367   case PPC::DIR_PWR9:
1368   case PPC::DIR_PWR10:
1369   case PPC::DIR_PWR_FUTURE:
1370     setPrefLoopAlignment(Align(16));
1371     setPrefFunctionAlignment(Align(16));
1372     break;
1373   }
1374 
1375   if (Subtarget.enableMachineScheduler())
1376     setSchedulingPreference(Sched::Source);
1377   else
1378     setSchedulingPreference(Sched::Hybrid);
1379 
1380   computeRegisterProperties(STI.getRegisterInfo());
1381 
1382   // The Freescale cores do better with aggressive inlining of memcpy and
1383   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1384   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1385       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1386     MaxStoresPerMemset = 32;
1387     MaxStoresPerMemsetOptSize = 16;
1388     MaxStoresPerMemcpy = 32;
1389     MaxStoresPerMemcpyOptSize = 8;
1390     MaxStoresPerMemmove = 32;
1391     MaxStoresPerMemmoveOptSize = 8;
1392   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1393     // The A2 also benefits from (very) aggressive inlining of memcpy and
1394     // friends. The overhead of a the function call, even when warm, can be
1395     // over one hundred cycles.
1396     MaxStoresPerMemset = 128;
1397     MaxStoresPerMemcpy = 128;
1398     MaxStoresPerMemmove = 128;
1399     MaxLoadsPerMemcmp = 128;
1400   } else {
1401     MaxLoadsPerMemcmp = 8;
1402     MaxLoadsPerMemcmpOptSize = 4;
1403   }
1404 
1405   IsStrictFPEnabled = true;
1406 
1407   // Let the subtarget (CPU) decide if a predictable select is more expensive
1408   // than the corresponding branch. This information is used in CGP to decide
1409   // when to convert selects into branches.
1410   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1411 }
1412 
1413 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1414 /// the desired ByVal argument alignment.
1415 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1416   if (MaxAlign == MaxMaxAlign)
1417     return;
1418   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1419     if (MaxMaxAlign >= 32 &&
1420         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1421       MaxAlign = Align(32);
1422     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1423              MaxAlign < 16)
1424       MaxAlign = Align(16);
1425   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1426     Align EltAlign;
1427     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1428     if (EltAlign > MaxAlign)
1429       MaxAlign = EltAlign;
1430   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1431     for (auto *EltTy : STy->elements()) {
1432       Align EltAlign;
1433       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1434       if (EltAlign > MaxAlign)
1435         MaxAlign = EltAlign;
1436       if (MaxAlign == MaxMaxAlign)
1437         break;
1438     }
1439   }
1440 }
1441 
1442 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1443 /// function arguments in the caller parameter area.
1444 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1445                                                   const DataLayout &DL) const {
1446   // 16byte and wider vectors are passed on 16byte boundary.
1447   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1448   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1449   if (Subtarget.hasAltivec())
1450     getMaxByValAlign(Ty, Alignment, Align(16));
1451   return Alignment.value();
1452 }
1453 
1454 bool PPCTargetLowering::useSoftFloat() const {
1455   return Subtarget.useSoftFloat();
1456 }
1457 
1458 bool PPCTargetLowering::hasSPE() const {
1459   return Subtarget.hasSPE();
1460 }
1461 
1462 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1463   return VT.isScalarInteger();
1464 }
1465 
1466 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1467   switch ((PPCISD::NodeType)Opcode) {
1468   case PPCISD::FIRST_NUMBER:    break;
1469   case PPCISD::FSEL:            return "PPCISD::FSEL";
1470   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1471   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1472   case PPCISD::FCFID:           return "PPCISD::FCFID";
1473   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1474   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1475   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1476   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1477   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1478   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1479   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1480   case PPCISD::FP_TO_UINT_IN_VSR:
1481                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1482   case PPCISD::FP_TO_SINT_IN_VSR:
1483                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1484   case PPCISD::FRE:             return "PPCISD::FRE";
1485   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1486   case PPCISD::FTSQRT:
1487     return "PPCISD::FTSQRT";
1488   case PPCISD::FSQRT:
1489     return "PPCISD::FSQRT";
1490   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1491   case PPCISD::VPERM:           return "PPCISD::VPERM";
1492   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1493   case PPCISD::XXSPLTI_SP_TO_DP:
1494     return "PPCISD::XXSPLTI_SP_TO_DP";
1495   case PPCISD::XXSPLTI32DX:
1496     return "PPCISD::XXSPLTI32DX";
1497   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1498   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1499   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1500   case PPCISD::CMPB:            return "PPCISD::CMPB";
1501   case PPCISD::Hi:              return "PPCISD::Hi";
1502   case PPCISD::Lo:              return "PPCISD::Lo";
1503   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1504   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1505   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1506   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1507   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1508   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1509   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1510   case PPCISD::SRL:             return "PPCISD::SRL";
1511   case PPCISD::SRA:             return "PPCISD::SRA";
1512   case PPCISD::SHL:             return "PPCISD::SHL";
1513   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1514   case PPCISD::CALL:            return "PPCISD::CALL";
1515   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1516   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1517   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1518   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1519   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1520   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1521   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1522   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1523   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1524   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1525   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1526   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1527   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1528   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1529   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1530   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1531     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1532   case PPCISD::ANDI_rec_1_EQ_BIT:
1533     return "PPCISD::ANDI_rec_1_EQ_BIT";
1534   case PPCISD::ANDI_rec_1_GT_BIT:
1535     return "PPCISD::ANDI_rec_1_GT_BIT";
1536   case PPCISD::VCMP:            return "PPCISD::VCMP";
1537   case PPCISD::VCMP_rec:        return "PPCISD::VCMP_rec";
1538   case PPCISD::LBRX:            return "PPCISD::LBRX";
1539   case PPCISD::STBRX:           return "PPCISD::STBRX";
1540   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1541   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1542   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1543   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1544   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1545   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1546   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1547   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1548   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1549   case PPCISD::ST_VSR_SCAL_INT:
1550                                 return "PPCISD::ST_VSR_SCAL_INT";
1551   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1552   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1553   case PPCISD::BDZ:             return "PPCISD::BDZ";
1554   case PPCISD::MFFS:            return "PPCISD::MFFS";
1555   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1556   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1557   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1558   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1559   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1560   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1561   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1562   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1563   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1564   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1565   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1566   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1567   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1568   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1569   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1570   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1571   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1572   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1573   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1574   case PPCISD::PADDI_DTPREL:
1575     return "PPCISD::PADDI_DTPREL";
1576   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1577   case PPCISD::SC:              return "PPCISD::SC";
1578   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1579   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1580   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1581   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1582   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1583   case PPCISD::VABSD:           return "PPCISD::VABSD";
1584   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1585   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1586   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1587   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1588   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1589   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1590   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1591   case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
1592     return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1593   case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR:
1594     return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1595   case PPCISD::ACC_BUILD:       return "PPCISD::ACC_BUILD";
1596   case PPCISD::PAIR_BUILD:      return "PPCISD::PAIR_BUILD";
1597   case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG";
1598   case PPCISD::XXMFACC:         return "PPCISD::XXMFACC";
1599   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1600   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1601   case PPCISD::STRICT_FADDRTZ:
1602     return "PPCISD::STRICT_FADDRTZ";
1603   case PPCISD::STRICT_FCTIDZ:
1604     return "PPCISD::STRICT_FCTIDZ";
1605   case PPCISD::STRICT_FCTIWZ:
1606     return "PPCISD::STRICT_FCTIWZ";
1607   case PPCISD::STRICT_FCTIDUZ:
1608     return "PPCISD::STRICT_FCTIDUZ";
1609   case PPCISD::STRICT_FCTIWUZ:
1610     return "PPCISD::STRICT_FCTIWUZ";
1611   case PPCISD::STRICT_FCFID:
1612     return "PPCISD::STRICT_FCFID";
1613   case PPCISD::STRICT_FCFIDU:
1614     return "PPCISD::STRICT_FCFIDU";
1615   case PPCISD::STRICT_FCFIDS:
1616     return "PPCISD::STRICT_FCFIDS";
1617   case PPCISD::STRICT_FCFIDUS:
1618     return "PPCISD::STRICT_FCFIDUS";
1619   case PPCISD::LXVRZX:          return "PPCISD::LXVRZX";
1620   }
1621   return nullptr;
1622 }
1623 
1624 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1625                                           EVT VT) const {
1626   if (!VT.isVector())
1627     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1628 
1629   return VT.changeVectorElementTypeToInteger();
1630 }
1631 
1632 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1633   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1634   return true;
1635 }
1636 
1637 //===----------------------------------------------------------------------===//
1638 // Node matching predicates, for use by the tblgen matching code.
1639 //===----------------------------------------------------------------------===//
1640 
1641 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1642 static bool isFloatingPointZero(SDValue Op) {
1643   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1644     return CFP->getValueAPF().isZero();
1645   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1646     // Maybe this has already been legalized into the constant pool?
1647     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1648       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1649         return CFP->getValueAPF().isZero();
1650   }
1651   return false;
1652 }
1653 
1654 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1655 /// true if Op is undef or if it matches the specified value.
1656 static bool isConstantOrUndef(int Op, int Val) {
1657   return Op < 0 || Op == Val;
1658 }
1659 
1660 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1661 /// VPKUHUM instruction.
1662 /// The ShuffleKind distinguishes between big-endian operations with
1663 /// two different inputs (0), either-endian operations with two identical
1664 /// inputs (1), and little-endian operations with two different inputs (2).
1665 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1666 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1667                                SelectionDAG &DAG) {
1668   bool IsLE = DAG.getDataLayout().isLittleEndian();
1669   if (ShuffleKind == 0) {
1670     if (IsLE)
1671       return false;
1672     for (unsigned i = 0; i != 16; ++i)
1673       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1674         return false;
1675   } else if (ShuffleKind == 2) {
1676     if (!IsLE)
1677       return false;
1678     for (unsigned i = 0; i != 16; ++i)
1679       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1680         return false;
1681   } else if (ShuffleKind == 1) {
1682     unsigned j = IsLE ? 0 : 1;
1683     for (unsigned i = 0; i != 8; ++i)
1684       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1685           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1686         return false;
1687   }
1688   return true;
1689 }
1690 
1691 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1692 /// VPKUWUM instruction.
1693 /// The ShuffleKind distinguishes between big-endian operations with
1694 /// two different inputs (0), either-endian operations with two identical
1695 /// inputs (1), and little-endian operations with two different inputs (2).
1696 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1697 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1698                                SelectionDAG &DAG) {
1699   bool IsLE = DAG.getDataLayout().isLittleEndian();
1700   if (ShuffleKind == 0) {
1701     if (IsLE)
1702       return false;
1703     for (unsigned i = 0; i != 16; i += 2)
1704       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1705           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1706         return false;
1707   } else if (ShuffleKind == 2) {
1708     if (!IsLE)
1709       return false;
1710     for (unsigned i = 0; i != 16; i += 2)
1711       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1712           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1713         return false;
1714   } else if (ShuffleKind == 1) {
1715     unsigned j = IsLE ? 0 : 2;
1716     for (unsigned i = 0; i != 8; i += 2)
1717       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1718           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1719           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1720           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1721         return false;
1722   }
1723   return true;
1724 }
1725 
1726 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1727 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1728 /// current subtarget.
1729 ///
1730 /// The ShuffleKind distinguishes between big-endian operations with
1731 /// two different inputs (0), either-endian operations with two identical
1732 /// inputs (1), and little-endian operations with two different inputs (2).
1733 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1734 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1735                                SelectionDAG &DAG) {
1736   const PPCSubtarget& Subtarget =
1737       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1738   if (!Subtarget.hasP8Vector())
1739     return false;
1740 
1741   bool IsLE = DAG.getDataLayout().isLittleEndian();
1742   if (ShuffleKind == 0) {
1743     if (IsLE)
1744       return false;
1745     for (unsigned i = 0; i != 16; i += 4)
1746       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1747           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1748           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1749           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1750         return false;
1751   } else if (ShuffleKind == 2) {
1752     if (!IsLE)
1753       return false;
1754     for (unsigned i = 0; i != 16; i += 4)
1755       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1756           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1757           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1758           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1759         return false;
1760   } else if (ShuffleKind == 1) {
1761     unsigned j = IsLE ? 0 : 4;
1762     for (unsigned i = 0; i != 8; i += 4)
1763       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1764           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1765           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1766           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1767           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1768           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1769           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1770           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1771         return false;
1772   }
1773   return true;
1774 }
1775 
1776 /// isVMerge - Common function, used to match vmrg* shuffles.
1777 ///
1778 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1779                      unsigned LHSStart, unsigned RHSStart) {
1780   if (N->getValueType(0) != MVT::v16i8)
1781     return false;
1782   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1783          "Unsupported merge size!");
1784 
1785   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1786     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1787       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1788                              LHSStart+j+i*UnitSize) ||
1789           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1790                              RHSStart+j+i*UnitSize))
1791         return false;
1792     }
1793   return true;
1794 }
1795 
1796 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1797 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1798 /// The ShuffleKind distinguishes between big-endian merges with two
1799 /// different inputs (0), either-endian merges with two identical inputs (1),
1800 /// and little-endian merges with two different inputs (2).  For the latter,
1801 /// the input operands are swapped (see PPCInstrAltivec.td).
1802 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1803                              unsigned ShuffleKind, SelectionDAG &DAG) {
1804   if (DAG.getDataLayout().isLittleEndian()) {
1805     if (ShuffleKind == 1) // unary
1806       return isVMerge(N, UnitSize, 0, 0);
1807     else if (ShuffleKind == 2) // swapped
1808       return isVMerge(N, UnitSize, 0, 16);
1809     else
1810       return false;
1811   } else {
1812     if (ShuffleKind == 1) // unary
1813       return isVMerge(N, UnitSize, 8, 8);
1814     else if (ShuffleKind == 0) // normal
1815       return isVMerge(N, UnitSize, 8, 24);
1816     else
1817       return false;
1818   }
1819 }
1820 
1821 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1822 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1823 /// The ShuffleKind distinguishes between big-endian merges with two
1824 /// different inputs (0), either-endian merges with two identical inputs (1),
1825 /// and little-endian merges with two different inputs (2).  For the latter,
1826 /// the input operands are swapped (see PPCInstrAltivec.td).
1827 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1828                              unsigned ShuffleKind, SelectionDAG &DAG) {
1829   if (DAG.getDataLayout().isLittleEndian()) {
1830     if (ShuffleKind == 1) // unary
1831       return isVMerge(N, UnitSize, 8, 8);
1832     else if (ShuffleKind == 2) // swapped
1833       return isVMerge(N, UnitSize, 8, 24);
1834     else
1835       return false;
1836   } else {
1837     if (ShuffleKind == 1) // unary
1838       return isVMerge(N, UnitSize, 0, 0);
1839     else if (ShuffleKind == 0) // normal
1840       return isVMerge(N, UnitSize, 0, 16);
1841     else
1842       return false;
1843   }
1844 }
1845 
1846 /**
1847  * Common function used to match vmrgew and vmrgow shuffles
1848  *
1849  * The indexOffset determines whether to look for even or odd words in
1850  * the shuffle mask. This is based on the of the endianness of the target
1851  * machine.
1852  *   - Little Endian:
1853  *     - Use offset of 0 to check for odd elements
1854  *     - Use offset of 4 to check for even elements
1855  *   - Big Endian:
1856  *     - Use offset of 0 to check for even elements
1857  *     - Use offset of 4 to check for odd elements
1858  * A detailed description of the vector element ordering for little endian and
1859  * big endian can be found at
1860  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1861  * Targeting your applications - what little endian and big endian IBM XL C/C++
1862  * compiler differences mean to you
1863  *
1864  * The mask to the shuffle vector instruction specifies the indices of the
1865  * elements from the two input vectors to place in the result. The elements are
1866  * numbered in array-access order, starting with the first vector. These vectors
1867  * are always of type v16i8, thus each vector will contain 16 elements of size
1868  * 8. More info on the shuffle vector can be found in the
1869  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1870  * Language Reference.
1871  *
1872  * The RHSStartValue indicates whether the same input vectors are used (unary)
1873  * or two different input vectors are used, based on the following:
1874  *   - If the instruction uses the same vector for both inputs, the range of the
1875  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1876  *     be 0.
1877  *   - If the instruction has two different vectors then the range of the
1878  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1879  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1880  *     to 31 specify elements in the second vector).
1881  *
1882  * \param[in] N The shuffle vector SD Node to analyze
1883  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1884  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1885  * vector to the shuffle_vector instruction
1886  * \return true iff this shuffle vector represents an even or odd word merge
1887  */
1888 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1889                      unsigned RHSStartValue) {
1890   if (N->getValueType(0) != MVT::v16i8)
1891     return false;
1892 
1893   for (unsigned i = 0; i < 2; ++i)
1894     for (unsigned j = 0; j < 4; ++j)
1895       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1896                              i*RHSStartValue+j+IndexOffset) ||
1897           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1898                              i*RHSStartValue+j+IndexOffset+8))
1899         return false;
1900   return true;
1901 }
1902 
1903 /**
1904  * Determine if the specified shuffle mask is suitable for the vmrgew or
1905  * vmrgow instructions.
1906  *
1907  * \param[in] N The shuffle vector SD Node to analyze
1908  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1909  * \param[in] ShuffleKind Identify the type of merge:
1910  *   - 0 = big-endian merge with two different inputs;
1911  *   - 1 = either-endian merge with two identical inputs;
1912  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1913  *     little-endian merges).
1914  * \param[in] DAG The current SelectionDAG
1915  * \return true iff this shuffle mask
1916  */
1917 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1918                               unsigned ShuffleKind, SelectionDAG &DAG) {
1919   if (DAG.getDataLayout().isLittleEndian()) {
1920     unsigned indexOffset = CheckEven ? 4 : 0;
1921     if (ShuffleKind == 1) // Unary
1922       return isVMerge(N, indexOffset, 0);
1923     else if (ShuffleKind == 2) // swapped
1924       return isVMerge(N, indexOffset, 16);
1925     else
1926       return false;
1927   }
1928   else {
1929     unsigned indexOffset = CheckEven ? 0 : 4;
1930     if (ShuffleKind == 1) // Unary
1931       return isVMerge(N, indexOffset, 0);
1932     else if (ShuffleKind == 0) // Normal
1933       return isVMerge(N, indexOffset, 16);
1934     else
1935       return false;
1936   }
1937   return false;
1938 }
1939 
1940 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1941 /// amount, otherwise return -1.
1942 /// The ShuffleKind distinguishes between big-endian operations with two
1943 /// different inputs (0), either-endian operations with two identical inputs
1944 /// (1), and little-endian operations with two different inputs (2).  For the
1945 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1946 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1947                              SelectionDAG &DAG) {
1948   if (N->getValueType(0) != MVT::v16i8)
1949     return -1;
1950 
1951   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1952 
1953   // Find the first non-undef value in the shuffle mask.
1954   unsigned i;
1955   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1956     /*search*/;
1957 
1958   if (i == 16) return -1;  // all undef.
1959 
1960   // Otherwise, check to see if the rest of the elements are consecutively
1961   // numbered from this value.
1962   unsigned ShiftAmt = SVOp->getMaskElt(i);
1963   if (ShiftAmt < i) return -1;
1964 
1965   ShiftAmt -= i;
1966   bool isLE = DAG.getDataLayout().isLittleEndian();
1967 
1968   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1969     // Check the rest of the elements to see if they are consecutive.
1970     for (++i; i != 16; ++i)
1971       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1972         return -1;
1973   } else if (ShuffleKind == 1) {
1974     // Check the rest of the elements to see if they are consecutive.
1975     for (++i; i != 16; ++i)
1976       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1977         return -1;
1978   } else
1979     return -1;
1980 
1981   if (isLE)
1982     ShiftAmt = 16 - ShiftAmt;
1983 
1984   return ShiftAmt;
1985 }
1986 
1987 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1988 /// specifies a splat of a single element that is suitable for input to
1989 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1990 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1991   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1992          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1993 
1994   // The consecutive indices need to specify an element, not part of two
1995   // different elements.  So abandon ship early if this isn't the case.
1996   if (N->getMaskElt(0) % EltSize != 0)
1997     return false;
1998 
1999   // This is a splat operation if each element of the permute is the same, and
2000   // if the value doesn't reference the second vector.
2001   unsigned ElementBase = N->getMaskElt(0);
2002 
2003   // FIXME: Handle UNDEF elements too!
2004   if (ElementBase >= 16)
2005     return false;
2006 
2007   // Check that the indices are consecutive, in the case of a multi-byte element
2008   // splatted with a v16i8 mask.
2009   for (unsigned i = 1; i != EltSize; ++i)
2010     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
2011       return false;
2012 
2013   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2014     if (N->getMaskElt(i) < 0) continue;
2015     for (unsigned j = 0; j != EltSize; ++j)
2016       if (N->getMaskElt(i+j) != N->getMaskElt(j))
2017         return false;
2018   }
2019   return true;
2020 }
2021 
2022 /// Check that the mask is shuffling N byte elements. Within each N byte
2023 /// element of the mask, the indices could be either in increasing or
2024 /// decreasing order as long as they are consecutive.
2025 /// \param[in] N the shuffle vector SD Node to analyze
2026 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
2027 /// Word/DoubleWord/QuadWord).
2028 /// \param[in] StepLen the delta indices number among the N byte element, if
2029 /// the mask is in increasing/decreasing order then it is 1/-1.
2030 /// \return true iff the mask is shuffling N byte elements.
2031 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
2032                                    int StepLen) {
2033   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2034          "Unexpected element width.");
2035   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
2036 
2037   unsigned NumOfElem = 16 / Width;
2038   unsigned MaskVal[16]; //  Width is never greater than 16
2039   for (unsigned i = 0; i < NumOfElem; ++i) {
2040     MaskVal[0] = N->getMaskElt(i * Width);
2041     if ((StepLen == 1) && (MaskVal[0] % Width)) {
2042       return false;
2043     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2044       return false;
2045     }
2046 
2047     for (unsigned int j = 1; j < Width; ++j) {
2048       MaskVal[j] = N->getMaskElt(i * Width + j);
2049       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2050         return false;
2051       }
2052     }
2053   }
2054 
2055   return true;
2056 }
2057 
2058 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2059                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2060   if (!isNByteElemShuffleMask(N, 4, 1))
2061     return false;
2062 
2063   // Now we look at mask elements 0,4,8,12
2064   unsigned M0 = N->getMaskElt(0) / 4;
2065   unsigned M1 = N->getMaskElt(4) / 4;
2066   unsigned M2 = N->getMaskElt(8) / 4;
2067   unsigned M3 = N->getMaskElt(12) / 4;
2068   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2069   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2070 
2071   // Below, let H and L be arbitrary elements of the shuffle mask
2072   // where H is in the range [4,7] and L is in the range [0,3].
2073   // H, 1, 2, 3 or L, 5, 6, 7
2074   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2075       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2076     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2077     InsertAtByte = IsLE ? 12 : 0;
2078     Swap = M0 < 4;
2079     return true;
2080   }
2081   // 0, H, 2, 3 or 4, L, 6, 7
2082   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2083       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2084     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2085     InsertAtByte = IsLE ? 8 : 4;
2086     Swap = M1 < 4;
2087     return true;
2088   }
2089   // 0, 1, H, 3 or 4, 5, L, 7
2090   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2091       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2092     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2093     InsertAtByte = IsLE ? 4 : 8;
2094     Swap = M2 < 4;
2095     return true;
2096   }
2097   // 0, 1, 2, H or 4, 5, 6, L
2098   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2099       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2100     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2101     InsertAtByte = IsLE ? 0 : 12;
2102     Swap = M3 < 4;
2103     return true;
2104   }
2105 
2106   // If both vector operands for the shuffle are the same vector, the mask will
2107   // contain only elements from the first one and the second one will be undef.
2108   if (N->getOperand(1).isUndef()) {
2109     ShiftElts = 0;
2110     Swap = true;
2111     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2112     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2113       InsertAtByte = IsLE ? 12 : 0;
2114       return true;
2115     }
2116     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2117       InsertAtByte = IsLE ? 8 : 4;
2118       return true;
2119     }
2120     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2121       InsertAtByte = IsLE ? 4 : 8;
2122       return true;
2123     }
2124     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2125       InsertAtByte = IsLE ? 0 : 12;
2126       return true;
2127     }
2128   }
2129 
2130   return false;
2131 }
2132 
2133 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2134                                bool &Swap, bool IsLE) {
2135   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2136   // Ensure each byte index of the word is consecutive.
2137   if (!isNByteElemShuffleMask(N, 4, 1))
2138     return false;
2139 
2140   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2141   unsigned M0 = N->getMaskElt(0) / 4;
2142   unsigned M1 = N->getMaskElt(4) / 4;
2143   unsigned M2 = N->getMaskElt(8) / 4;
2144   unsigned M3 = N->getMaskElt(12) / 4;
2145 
2146   // If both vector operands for the shuffle are the same vector, the mask will
2147   // contain only elements from the first one and the second one will be undef.
2148   if (N->getOperand(1).isUndef()) {
2149     assert(M0 < 4 && "Indexing into an undef vector?");
2150     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2151       return false;
2152 
2153     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2154     Swap = false;
2155     return true;
2156   }
2157 
2158   // Ensure each word index of the ShuffleVector Mask is consecutive.
2159   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2160     return false;
2161 
2162   if (IsLE) {
2163     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2164       // Input vectors don't need to be swapped if the leading element
2165       // of the result is one of the 3 left elements of the second vector
2166       // (or if there is no shift to be done at all).
2167       Swap = false;
2168       ShiftElts = (8 - M0) % 8;
2169     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2170       // Input vectors need to be swapped if the leading element
2171       // of the result is one of the 3 left elements of the first vector
2172       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2173       Swap = true;
2174       ShiftElts = (4 - M0) % 4;
2175     }
2176 
2177     return true;
2178   } else {                                          // BE
2179     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2180       // Input vectors don't need to be swapped if the leading element
2181       // of the result is one of the 4 elements of the first vector.
2182       Swap = false;
2183       ShiftElts = M0;
2184     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2185       // Input vectors need to be swapped if the leading element
2186       // of the result is one of the 4 elements of the right vector.
2187       Swap = true;
2188       ShiftElts = M0 - 4;
2189     }
2190 
2191     return true;
2192   }
2193 }
2194 
2195 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2196   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2197 
2198   if (!isNByteElemShuffleMask(N, Width, -1))
2199     return false;
2200 
2201   for (int i = 0; i < 16; i += Width)
2202     if (N->getMaskElt(i) != i + Width - 1)
2203       return false;
2204 
2205   return true;
2206 }
2207 
2208 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2209   return isXXBRShuffleMaskHelper(N, 2);
2210 }
2211 
2212 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2213   return isXXBRShuffleMaskHelper(N, 4);
2214 }
2215 
2216 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2217   return isXXBRShuffleMaskHelper(N, 8);
2218 }
2219 
2220 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2221   return isXXBRShuffleMaskHelper(N, 16);
2222 }
2223 
2224 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2225 /// if the inputs to the instruction should be swapped and set \p DM to the
2226 /// value for the immediate.
2227 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2228 /// AND element 0 of the result comes from the first input (LE) or second input
2229 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2230 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2231 /// mask.
2232 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2233                                bool &Swap, bool IsLE) {
2234   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2235 
2236   // Ensure each byte index of the double word is consecutive.
2237   if (!isNByteElemShuffleMask(N, 8, 1))
2238     return false;
2239 
2240   unsigned M0 = N->getMaskElt(0) / 8;
2241   unsigned M1 = N->getMaskElt(8) / 8;
2242   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2243 
2244   // If both vector operands for the shuffle are the same vector, the mask will
2245   // contain only elements from the first one and the second one will be undef.
2246   if (N->getOperand(1).isUndef()) {
2247     if ((M0 | M1) < 2) {
2248       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2249       Swap = false;
2250       return true;
2251     } else
2252       return false;
2253   }
2254 
2255   if (IsLE) {
2256     if (M0 > 1 && M1 < 2) {
2257       Swap = false;
2258     } else if (M0 < 2 && M1 > 1) {
2259       M0 = (M0 + 2) % 4;
2260       M1 = (M1 + 2) % 4;
2261       Swap = true;
2262     } else
2263       return false;
2264 
2265     // Note: if control flow comes here that means Swap is already set above
2266     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2267     return true;
2268   } else { // BE
2269     if (M0 < 2 && M1 > 1) {
2270       Swap = false;
2271     } else if (M0 > 1 && M1 < 2) {
2272       M0 = (M0 + 2) % 4;
2273       M1 = (M1 + 2) % 4;
2274       Swap = true;
2275     } else
2276       return false;
2277 
2278     // Note: if control flow comes here that means Swap is already set above
2279     DM = (M0 << 1) + (M1 & 1);
2280     return true;
2281   }
2282 }
2283 
2284 
2285 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2286 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2287 /// elements are counted from the left of the vector register).
2288 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2289                                          SelectionDAG &DAG) {
2290   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2291   assert(isSplatShuffleMask(SVOp, EltSize));
2292   if (DAG.getDataLayout().isLittleEndian())
2293     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2294   else
2295     return SVOp->getMaskElt(0) / EltSize;
2296 }
2297 
2298 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2299 /// by using a vspltis[bhw] instruction of the specified element size, return
2300 /// the constant being splatted.  The ByteSize field indicates the number of
2301 /// bytes of each element [124] -> [bhw].
2302 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2303   SDValue OpVal(nullptr, 0);
2304 
2305   // If ByteSize of the splat is bigger than the element size of the
2306   // build_vector, then we have a case where we are checking for a splat where
2307   // multiple elements of the buildvector are folded together into a single
2308   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2309   unsigned EltSize = 16/N->getNumOperands();
2310   if (EltSize < ByteSize) {
2311     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2312     SDValue UniquedVals[4];
2313     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2314 
2315     // See if all of the elements in the buildvector agree across.
2316     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2317       if (N->getOperand(i).isUndef()) continue;
2318       // If the element isn't a constant, bail fully out.
2319       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2320 
2321       if (!UniquedVals[i&(Multiple-1)].getNode())
2322         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2323       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2324         return SDValue();  // no match.
2325     }
2326 
2327     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2328     // either constant or undef values that are identical for each chunk.  See
2329     // if these chunks can form into a larger vspltis*.
2330 
2331     // Check to see if all of the leading entries are either 0 or -1.  If
2332     // neither, then this won't fit into the immediate field.
2333     bool LeadingZero = true;
2334     bool LeadingOnes = true;
2335     for (unsigned i = 0; i != Multiple-1; ++i) {
2336       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2337 
2338       LeadingZero &= isNullConstant(UniquedVals[i]);
2339       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2340     }
2341     // Finally, check the least significant entry.
2342     if (LeadingZero) {
2343       if (!UniquedVals[Multiple-1].getNode())
2344         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2345       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2346       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2347         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2348     }
2349     if (LeadingOnes) {
2350       if (!UniquedVals[Multiple-1].getNode())
2351         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2352       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2353       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2354         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2355     }
2356 
2357     return SDValue();
2358   }
2359 
2360   // Check to see if this buildvec has a single non-undef value in its elements.
2361   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2362     if (N->getOperand(i).isUndef()) continue;
2363     if (!OpVal.getNode())
2364       OpVal = N->getOperand(i);
2365     else if (OpVal != N->getOperand(i))
2366       return SDValue();
2367   }
2368 
2369   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2370 
2371   unsigned ValSizeInBytes = EltSize;
2372   uint64_t Value = 0;
2373   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2374     Value = CN->getZExtValue();
2375   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2376     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2377     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2378   }
2379 
2380   // If the splat value is larger than the element value, then we can never do
2381   // this splat.  The only case that we could fit the replicated bits into our
2382   // immediate field for would be zero, and we prefer to use vxor for it.
2383   if (ValSizeInBytes < ByteSize) return SDValue();
2384 
2385   // If the element value is larger than the splat value, check if it consists
2386   // of a repeated bit pattern of size ByteSize.
2387   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2388     return SDValue();
2389 
2390   // Properly sign extend the value.
2391   int MaskVal = SignExtend32(Value, ByteSize * 8);
2392 
2393   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2394   if (MaskVal == 0) return SDValue();
2395 
2396   // Finally, if this value fits in a 5 bit sext field, return it
2397   if (SignExtend32<5>(MaskVal) == MaskVal)
2398     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2399   return SDValue();
2400 }
2401 
2402 //===----------------------------------------------------------------------===//
2403 //  Addressing Mode Selection
2404 //===----------------------------------------------------------------------===//
2405 
2406 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2407 /// or 64-bit immediate, and if the value can be accurately represented as a
2408 /// sign extension from a 16-bit value.  If so, this returns true and the
2409 /// immediate.
2410 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2411   if (!isa<ConstantSDNode>(N))
2412     return false;
2413 
2414   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2415   if (N->getValueType(0) == MVT::i32)
2416     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2417   else
2418     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2419 }
2420 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2421   return isIntS16Immediate(Op.getNode(), Imm);
2422 }
2423 
2424 
2425 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2426 /// be represented as an indexed [r+r] operation.
2427 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2428                                                SDValue &Index,
2429                                                SelectionDAG &DAG) const {
2430   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2431       UI != E; ++UI) {
2432     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2433       if (Memop->getMemoryVT() == MVT::f64) {
2434           Base = N.getOperand(0);
2435           Index = N.getOperand(1);
2436           return true;
2437       }
2438     }
2439   }
2440   return false;
2441 }
2442 
2443 /// isIntS34Immediate - This method tests if value of node given can be
2444 /// accurately represented as a sign extension from a 34-bit value.  If so,
2445 /// this returns true and the immediate.
2446 bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
2447   if (!isa<ConstantSDNode>(N))
2448     return false;
2449 
2450   Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2451   return isInt<34>(Imm);
2452 }
2453 bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
2454   return isIntS34Immediate(Op.getNode(), Imm);
2455 }
2456 
2457 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2458 /// can be represented as an indexed [r+r] operation.  Returns false if it
2459 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2460 /// non-zero and N can be represented by a base register plus a signed 16-bit
2461 /// displacement, make a more precise judgement by checking (displacement % \p
2462 /// EncodingAlignment).
2463 bool PPCTargetLowering::SelectAddressRegReg(
2464     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2465     MaybeAlign EncodingAlignment) const {
2466   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2467   // a [pc+imm].
2468   if (SelectAddressPCRel(N, Base))
2469     return false;
2470 
2471   int16_t Imm = 0;
2472   if (N.getOpcode() == ISD::ADD) {
2473     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2474     // SPE load/store can only handle 8-bit offsets.
2475     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2476         return true;
2477     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2478         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2479       return false; // r+i
2480     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2481       return false;    // r+i
2482 
2483     Base = N.getOperand(0);
2484     Index = N.getOperand(1);
2485     return true;
2486   } else if (N.getOpcode() == ISD::OR) {
2487     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2488         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2489       return false; // r+i can fold it if we can.
2490 
2491     // If this is an or of disjoint bitfields, we can codegen this as an add
2492     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2493     // disjoint.
2494     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2495 
2496     if (LHSKnown.Zero.getBoolValue()) {
2497       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2498       // If all of the bits are known zero on the LHS or RHS, the add won't
2499       // carry.
2500       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2501         Base = N.getOperand(0);
2502         Index = N.getOperand(1);
2503         return true;
2504       }
2505     }
2506   }
2507 
2508   return false;
2509 }
2510 
2511 // If we happen to be doing an i64 load or store into a stack slot that has
2512 // less than a 4-byte alignment, then the frame-index elimination may need to
2513 // use an indexed load or store instruction (because the offset may not be a
2514 // multiple of 4). The extra register needed to hold the offset comes from the
2515 // register scavenger, and it is possible that the scavenger will need to use
2516 // an emergency spill slot. As a result, we need to make sure that a spill slot
2517 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2518 // stack slot.
2519 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2520   // FIXME: This does not handle the LWA case.
2521   if (VT != MVT::i64)
2522     return;
2523 
2524   // NOTE: We'll exclude negative FIs here, which come from argument
2525   // lowering, because there are no known test cases triggering this problem
2526   // using packed structures (or similar). We can remove this exclusion if
2527   // we find such a test case. The reason why this is so test-case driven is
2528   // because this entire 'fixup' is only to prevent crashes (from the
2529   // register scavenger) on not-really-valid inputs. For example, if we have:
2530   //   %a = alloca i1
2531   //   %b = bitcast i1* %a to i64*
2532   //   store i64* a, i64 b
2533   // then the store should really be marked as 'align 1', but is not. If it
2534   // were marked as 'align 1' then the indexed form would have been
2535   // instruction-selected initially, and the problem this 'fixup' is preventing
2536   // won't happen regardless.
2537   if (FrameIdx < 0)
2538     return;
2539 
2540   MachineFunction &MF = DAG.getMachineFunction();
2541   MachineFrameInfo &MFI = MF.getFrameInfo();
2542 
2543   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2544     return;
2545 
2546   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2547   FuncInfo->setHasNonRISpills();
2548 }
2549 
2550 /// Returns true if the address N can be represented by a base register plus
2551 /// a signed 16-bit displacement [r+imm], and if it is not better
2552 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2553 /// displacements that are multiples of that value.
2554 bool PPCTargetLowering::SelectAddressRegImm(
2555     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2556     MaybeAlign EncodingAlignment) const {
2557   // FIXME dl should come from parent load or store, not from address
2558   SDLoc dl(N);
2559 
2560   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2561   // a [pc+imm].
2562   if (SelectAddressPCRel(N, Base))
2563     return false;
2564 
2565   // If this can be more profitably realized as r+r, fail.
2566   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2567     return false;
2568 
2569   if (N.getOpcode() == ISD::ADD) {
2570     int16_t imm = 0;
2571     if (isIntS16Immediate(N.getOperand(1), imm) &&
2572         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2573       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2574       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2575         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2576         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2577       } else {
2578         Base = N.getOperand(0);
2579       }
2580       return true; // [r+i]
2581     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2582       // Match LOAD (ADD (X, Lo(G))).
2583       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2584              && "Cannot handle constant offsets yet!");
2585       Disp = N.getOperand(1).getOperand(0);  // The global address.
2586       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2587              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2588              Disp.getOpcode() == ISD::TargetConstantPool ||
2589              Disp.getOpcode() == ISD::TargetJumpTable);
2590       Base = N.getOperand(0);
2591       return true;  // [&g+r]
2592     }
2593   } else if (N.getOpcode() == ISD::OR) {
2594     int16_t imm = 0;
2595     if (isIntS16Immediate(N.getOperand(1), imm) &&
2596         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2597       // If this is an or of disjoint bitfields, we can codegen this as an add
2598       // (for better address arithmetic) if the LHS and RHS of the OR are
2599       // provably disjoint.
2600       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2601 
2602       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2603         // If all of the bits are known zero on the LHS or RHS, the add won't
2604         // carry.
2605         if (FrameIndexSDNode *FI =
2606               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2607           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2608           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2609         } else {
2610           Base = N.getOperand(0);
2611         }
2612         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2613         return true;
2614       }
2615     }
2616   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2617     // Loading from a constant address.
2618 
2619     // If this address fits entirely in a 16-bit sext immediate field, codegen
2620     // this as "d, 0"
2621     int16_t Imm;
2622     if (isIntS16Immediate(CN, Imm) &&
2623         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2624       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2625       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2626                              CN->getValueType(0));
2627       return true;
2628     }
2629 
2630     // Handle 32-bit sext immediates with LIS + addr mode.
2631     if ((CN->getValueType(0) == MVT::i32 ||
2632          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2633         (!EncodingAlignment ||
2634          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2635       int Addr = (int)CN->getZExtValue();
2636 
2637       // Otherwise, break this down into an LIS + disp.
2638       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2639 
2640       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2641                                    MVT::i32);
2642       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2643       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2644       return true;
2645     }
2646   }
2647 
2648   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2649   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2650     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2651     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2652   } else
2653     Base = N;
2654   return true;      // [r+0]
2655 }
2656 
2657 /// Similar to the 16-bit case but for instructions that take a 34-bit
2658 /// displacement field (prefixed loads/stores).
2659 bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp,
2660                                               SDValue &Base,
2661                                               SelectionDAG &DAG) const {
2662   // Only on 64-bit targets.
2663   if (N.getValueType() != MVT::i64)
2664     return false;
2665 
2666   SDLoc dl(N);
2667   int64_t Imm = 0;
2668 
2669   if (N.getOpcode() == ISD::ADD) {
2670     if (!isIntS34Immediate(N.getOperand(1), Imm))
2671       return false;
2672     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2673     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2674       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2675     else
2676       Base = N.getOperand(0);
2677     return true;
2678   }
2679 
2680   if (N.getOpcode() == ISD::OR) {
2681     if (!isIntS34Immediate(N.getOperand(1), Imm))
2682       return false;
2683     // If this is an or of disjoint bitfields, we can codegen this as an add
2684     // (for better address arithmetic) if the LHS and RHS of the OR are
2685     // provably disjoint.
2686     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2687     if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL)
2688       return false;
2689     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2690       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2691     else
2692       Base = N.getOperand(0);
2693     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2694     return true;
2695   }
2696 
2697   if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const.
2698     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2699     Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
2700     return true;
2701   }
2702 
2703   return false;
2704 }
2705 
2706 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2707 /// represented as an indexed [r+r] operation.
2708 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2709                                                 SDValue &Index,
2710                                                 SelectionDAG &DAG) const {
2711   // Check to see if we can easily represent this as an [r+r] address.  This
2712   // will fail if it thinks that the address is more profitably represented as
2713   // reg+imm, e.g. where imm = 0.
2714   if (SelectAddressRegReg(N, Base, Index, DAG))
2715     return true;
2716 
2717   // If the address is the result of an add, we will utilize the fact that the
2718   // address calculation includes an implicit add.  However, we can reduce
2719   // register pressure if we do not materialize a constant just for use as the
2720   // index register.  We only get rid of the add if it is not an add of a
2721   // value and a 16-bit signed constant and both have a single use.
2722   int16_t imm = 0;
2723   if (N.getOpcode() == ISD::ADD &&
2724       (!isIntS16Immediate(N.getOperand(1), imm) ||
2725        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2726     Base = N.getOperand(0);
2727     Index = N.getOperand(1);
2728     return true;
2729   }
2730 
2731   // Otherwise, do it the hard way, using R0 as the base register.
2732   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2733                          N.getValueType());
2734   Index = N;
2735   return true;
2736 }
2737 
2738 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2739   Ty *PCRelCand = dyn_cast<Ty>(N);
2740   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2741 }
2742 
2743 /// Returns true if this address is a PC Relative address.
2744 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2745 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2746 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2747   // This is a materialize PC Relative node. Always select this as PC Relative.
2748   Base = N;
2749   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2750     return true;
2751   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2752       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2753       isValidPCRelNode<JumpTableSDNode>(N) ||
2754       isValidPCRelNode<BlockAddressSDNode>(N))
2755     return true;
2756   return false;
2757 }
2758 
2759 /// Returns true if we should use a direct load into vector instruction
2760 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2761 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2762 
2763   // If there are any other uses other than scalar to vector, then we should
2764   // keep it as a scalar load -> direct move pattern to prevent multiple
2765   // loads.
2766   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2767   if (!LD)
2768     return false;
2769 
2770   EVT MemVT = LD->getMemoryVT();
2771   if (!MemVT.isSimple())
2772     return false;
2773   switch(MemVT.getSimpleVT().SimpleTy) {
2774   case MVT::i64:
2775     break;
2776   case MVT::i32:
2777     if (!ST.hasP8Vector())
2778       return false;
2779     break;
2780   case MVT::i16:
2781   case MVT::i8:
2782     if (!ST.hasP9Vector())
2783       return false;
2784     break;
2785   default:
2786     return false;
2787   }
2788 
2789   SDValue LoadedVal(N, 0);
2790   if (!LoadedVal.hasOneUse())
2791     return false;
2792 
2793   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2794        UI != UE; ++UI)
2795     if (UI.getUse().get().getResNo() == 0 &&
2796         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2797         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2798       return false;
2799 
2800   return true;
2801 }
2802 
2803 /// getPreIndexedAddressParts - returns true by value, base pointer and
2804 /// offset pointer and addressing mode by reference if the node's address
2805 /// can be legally represented as pre-indexed load / store address.
2806 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2807                                                   SDValue &Offset,
2808                                                   ISD::MemIndexedMode &AM,
2809                                                   SelectionDAG &DAG) const {
2810   if (DisablePPCPreinc) return false;
2811 
2812   bool isLoad = true;
2813   SDValue Ptr;
2814   EVT VT;
2815   unsigned Alignment;
2816   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2817     Ptr = LD->getBasePtr();
2818     VT = LD->getMemoryVT();
2819     Alignment = LD->getAlignment();
2820   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2821     Ptr = ST->getBasePtr();
2822     VT  = ST->getMemoryVT();
2823     Alignment = ST->getAlignment();
2824     isLoad = false;
2825   } else
2826     return false;
2827 
2828   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2829   // instructions because we can fold these into a more efficient instruction
2830   // instead, (such as LXSD).
2831   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2832     return false;
2833   }
2834 
2835   // PowerPC doesn't have preinc load/store instructions for vectors
2836   if (VT.isVector())
2837     return false;
2838 
2839   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2840     // Common code will reject creating a pre-inc form if the base pointer
2841     // is a frame index, or if N is a store and the base pointer is either
2842     // the same as or a predecessor of the value being stored.  Check for
2843     // those situations here, and try with swapped Base/Offset instead.
2844     bool Swap = false;
2845 
2846     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2847       Swap = true;
2848     else if (!isLoad) {
2849       SDValue Val = cast<StoreSDNode>(N)->getValue();
2850       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2851         Swap = true;
2852     }
2853 
2854     if (Swap)
2855       std::swap(Base, Offset);
2856 
2857     AM = ISD::PRE_INC;
2858     return true;
2859   }
2860 
2861   // LDU/STU can only handle immediates that are a multiple of 4.
2862   if (VT != MVT::i64) {
2863     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2864       return false;
2865   } else {
2866     // LDU/STU need an address with at least 4-byte alignment.
2867     if (Alignment < 4)
2868       return false;
2869 
2870     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2871       return false;
2872   }
2873 
2874   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2875     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2876     // sext i32 to i64 when addr mode is r+i.
2877     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2878         LD->getExtensionType() == ISD::SEXTLOAD &&
2879         isa<ConstantSDNode>(Offset))
2880       return false;
2881   }
2882 
2883   AM = ISD::PRE_INC;
2884   return true;
2885 }
2886 
2887 //===----------------------------------------------------------------------===//
2888 //  LowerOperation implementation
2889 //===----------------------------------------------------------------------===//
2890 
2891 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2892 /// and LoOpFlags to the target MO flags.
2893 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2894                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2895                                const GlobalValue *GV = nullptr) {
2896   HiOpFlags = PPCII::MO_HA;
2897   LoOpFlags = PPCII::MO_LO;
2898 
2899   // Don't use the pic base if not in PIC relocation model.
2900   if (IsPIC) {
2901     HiOpFlags |= PPCII::MO_PIC_FLAG;
2902     LoOpFlags |= PPCII::MO_PIC_FLAG;
2903   }
2904 }
2905 
2906 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2907                              SelectionDAG &DAG) {
2908   SDLoc DL(HiPart);
2909   EVT PtrVT = HiPart.getValueType();
2910   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2911 
2912   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2913   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2914 
2915   // With PIC, the first instruction is actually "GR+hi(&G)".
2916   if (isPIC)
2917     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2918                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2919 
2920   // Generate non-pic code that has direct accesses to the constant pool.
2921   // The address of the global is just (hi(&g)+lo(&g)).
2922   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2923 }
2924 
2925 static void setUsesTOCBasePtr(MachineFunction &MF) {
2926   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2927   FuncInfo->setUsesTOCBasePtr();
2928 }
2929 
2930 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2931   setUsesTOCBasePtr(DAG.getMachineFunction());
2932 }
2933 
2934 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2935                                        SDValue GA) const {
2936   const bool Is64Bit = Subtarget.isPPC64();
2937   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2938   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2939                         : Subtarget.isAIXABI()
2940                               ? DAG.getRegister(PPC::R2, VT)
2941                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2942   SDValue Ops[] = { GA, Reg };
2943   return DAG.getMemIntrinsicNode(
2944       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2945       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2946       MachineMemOperand::MOLoad);
2947 }
2948 
2949 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2950                                              SelectionDAG &DAG) const {
2951   EVT PtrVT = Op.getValueType();
2952   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2953   const Constant *C = CP->getConstVal();
2954 
2955   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2956   // The actual address of the GlobalValue is stored in the TOC.
2957   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2958     if (Subtarget.isUsingPCRelativeCalls()) {
2959       SDLoc DL(CP);
2960       EVT Ty = getPointerTy(DAG.getDataLayout());
2961       SDValue ConstPool = DAG.getTargetConstantPool(
2962           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2963       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2964     }
2965     setUsesTOCBasePtr(DAG);
2966     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2967     return getTOCEntry(DAG, SDLoc(CP), GA);
2968   }
2969 
2970   unsigned MOHiFlag, MOLoFlag;
2971   bool IsPIC = isPositionIndependent();
2972   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2973 
2974   if (IsPIC && Subtarget.isSVR4ABI()) {
2975     SDValue GA =
2976         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2977     return getTOCEntry(DAG, SDLoc(CP), GA);
2978   }
2979 
2980   SDValue CPIHi =
2981       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2982   SDValue CPILo =
2983       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2984   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2985 }
2986 
2987 // For 64-bit PowerPC, prefer the more compact relative encodings.
2988 // This trades 32 bits per jump table entry for one or two instructions
2989 // on the jump site.
2990 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2991   if (isJumpTableRelative())
2992     return MachineJumpTableInfo::EK_LabelDifference32;
2993 
2994   return TargetLowering::getJumpTableEncoding();
2995 }
2996 
2997 bool PPCTargetLowering::isJumpTableRelative() const {
2998   if (UseAbsoluteJumpTables)
2999     return false;
3000   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
3001     return true;
3002   return TargetLowering::isJumpTableRelative();
3003 }
3004 
3005 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3006                                                     SelectionDAG &DAG) const {
3007   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3008     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3009 
3010   switch (getTargetMachine().getCodeModel()) {
3011   case CodeModel::Small:
3012   case CodeModel::Medium:
3013     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3014   default:
3015     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
3016                        getPointerTy(DAG.getDataLayout()));
3017   }
3018 }
3019 
3020 const MCExpr *
3021 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3022                                                 unsigned JTI,
3023                                                 MCContext &Ctx) const {
3024   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3025     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3026 
3027   switch (getTargetMachine().getCodeModel()) {
3028   case CodeModel::Small:
3029   case CodeModel::Medium:
3030     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3031   default:
3032     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
3033   }
3034 }
3035 
3036 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
3037   EVT PtrVT = Op.getValueType();
3038   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3039 
3040   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3041   if (Subtarget.isUsingPCRelativeCalls()) {
3042     SDLoc DL(JT);
3043     EVT Ty = getPointerTy(DAG.getDataLayout());
3044     SDValue GA =
3045         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
3046     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3047     return MatAddr;
3048   }
3049 
3050   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3051   // The actual address of the GlobalValue is stored in the TOC.
3052   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3053     setUsesTOCBasePtr(DAG);
3054     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3055     return getTOCEntry(DAG, SDLoc(JT), GA);
3056   }
3057 
3058   unsigned MOHiFlag, MOLoFlag;
3059   bool IsPIC = isPositionIndependent();
3060   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3061 
3062   if (IsPIC && Subtarget.isSVR4ABI()) {
3063     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3064                                         PPCII::MO_PIC_FLAG);
3065     return getTOCEntry(DAG, SDLoc(GA), GA);
3066   }
3067 
3068   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3069   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3070   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3071 }
3072 
3073 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3074                                              SelectionDAG &DAG) const {
3075   EVT PtrVT = Op.getValueType();
3076   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3077   const BlockAddress *BA = BASDN->getBlockAddress();
3078 
3079   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3080   if (Subtarget.isUsingPCRelativeCalls()) {
3081     SDLoc DL(BASDN);
3082     EVT Ty = getPointerTy(DAG.getDataLayout());
3083     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3084                                            PPCII::MO_PCREL_FLAG);
3085     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3086     return MatAddr;
3087   }
3088 
3089   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3090   // The actual BlockAddress is stored in the TOC.
3091   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3092     setUsesTOCBasePtr(DAG);
3093     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3094     return getTOCEntry(DAG, SDLoc(BASDN), GA);
3095   }
3096 
3097   // 32-bit position-independent ELF stores the BlockAddress in the .got.
3098   if (Subtarget.is32BitELFABI() && isPositionIndependent())
3099     return getTOCEntry(
3100         DAG, SDLoc(BASDN),
3101         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3102 
3103   unsigned MOHiFlag, MOLoFlag;
3104   bool IsPIC = isPositionIndependent();
3105   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3106   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3107   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3108   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3109 }
3110 
3111 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3112                                               SelectionDAG &DAG) const {
3113   // FIXME: TLS addresses currently use medium model code sequences,
3114   // which is the most useful form.  Eventually support for small and
3115   // large models could be added if users need it, at the cost of
3116   // additional complexity.
3117   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3118   if (DAG.getTarget().useEmulatedTLS())
3119     return LowerToTLSEmulatedModel(GA, DAG);
3120 
3121   SDLoc dl(GA);
3122   const GlobalValue *GV = GA->getGlobal();
3123   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3124   bool is64bit = Subtarget.isPPC64();
3125   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3126   PICLevel::Level picLevel = M->getPICLevel();
3127 
3128   const TargetMachine &TM = getTargetMachine();
3129   TLSModel::Model Model = TM.getTLSModel(GV);
3130 
3131   if (Model == TLSModel::LocalExec) {
3132     if (Subtarget.isUsingPCRelativeCalls()) {
3133       SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3134       SDValue TGA = DAG.getTargetGlobalAddress(
3135           GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG));
3136       SDValue MatAddr =
3137           DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
3138       return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
3139     }
3140 
3141     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3142                                                PPCII::MO_TPREL_HA);
3143     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3144                                                PPCII::MO_TPREL_LO);
3145     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3146                              : DAG.getRegister(PPC::R2, MVT::i32);
3147 
3148     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3149     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3150   }
3151 
3152   if (Model == TLSModel::InitialExec) {
3153     bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3154     SDValue TGA = DAG.getTargetGlobalAddress(
3155         GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3156     SDValue TGATLS = DAG.getTargetGlobalAddress(
3157         GV, dl, PtrVT, 0,
3158         IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
3159     SDValue TPOffset;
3160     if (IsPCRel) {
3161       SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3162       TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3163                              MachinePointerInfo());
3164     } else {
3165       SDValue GOTPtr;
3166       if (is64bit) {
3167         setUsesTOCBasePtr(DAG);
3168         SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3169         GOTPtr =
3170             DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3171       } else {
3172         if (!TM.isPositionIndependent())
3173           GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3174         else if (picLevel == PICLevel::SmallPIC)
3175           GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3176         else
3177           GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3178       }
3179       TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3180     }
3181     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3182   }
3183 
3184   if (Model == TLSModel::GeneralDynamic) {
3185     if (Subtarget.isUsingPCRelativeCalls()) {
3186       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3187                                                PPCII::MO_GOT_TLSGD_PCREL_FLAG);
3188       return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3189     }
3190 
3191     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3192     SDValue GOTPtr;
3193     if (is64bit) {
3194       setUsesTOCBasePtr(DAG);
3195       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3196       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3197                                    GOTReg, TGA);
3198     } else {
3199       if (picLevel == PICLevel::SmallPIC)
3200         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3201       else
3202         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3203     }
3204     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3205                        GOTPtr, TGA, TGA);
3206   }
3207 
3208   if (Model == TLSModel::LocalDynamic) {
3209     if (Subtarget.isUsingPCRelativeCalls()) {
3210       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3211                                                PPCII::MO_GOT_TLSLD_PCREL_FLAG);
3212       SDValue MatPCRel =
3213           DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3214       return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA);
3215     }
3216 
3217     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3218     SDValue GOTPtr;
3219     if (is64bit) {
3220       setUsesTOCBasePtr(DAG);
3221       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3222       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3223                            GOTReg, TGA);
3224     } else {
3225       if (picLevel == PICLevel::SmallPIC)
3226         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3227       else
3228         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3229     }
3230     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3231                                   PtrVT, GOTPtr, TGA, TGA);
3232     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3233                                       PtrVT, TLSAddr, TGA);
3234     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3235   }
3236 
3237   llvm_unreachable("Unknown TLS model!");
3238 }
3239 
3240 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3241                                               SelectionDAG &DAG) const {
3242   EVT PtrVT = Op.getValueType();
3243   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3244   SDLoc DL(GSDN);
3245   const GlobalValue *GV = GSDN->getGlobal();
3246 
3247   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3248   // The actual address of the GlobalValue is stored in the TOC.
3249   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3250     if (Subtarget.isUsingPCRelativeCalls()) {
3251       EVT Ty = getPointerTy(DAG.getDataLayout());
3252       if (isAccessedAsGotIndirect(Op)) {
3253         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3254                                                 PPCII::MO_PCREL_FLAG |
3255                                                     PPCII::MO_GOT_FLAG);
3256         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3257         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3258                                    MachinePointerInfo());
3259         return Load;
3260       } else {
3261         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3262                                                 PPCII::MO_PCREL_FLAG);
3263         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3264       }
3265     }
3266     setUsesTOCBasePtr(DAG);
3267     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3268     return getTOCEntry(DAG, DL, GA);
3269   }
3270 
3271   unsigned MOHiFlag, MOLoFlag;
3272   bool IsPIC = isPositionIndependent();
3273   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3274 
3275   if (IsPIC && Subtarget.isSVR4ABI()) {
3276     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3277                                             GSDN->getOffset(),
3278                                             PPCII::MO_PIC_FLAG);
3279     return getTOCEntry(DAG, DL, GA);
3280   }
3281 
3282   SDValue GAHi =
3283     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3284   SDValue GALo =
3285     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3286 
3287   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3288 }
3289 
3290 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3291   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3292   SDLoc dl(Op);
3293 
3294   if (Op.getValueType() == MVT::v2i64) {
3295     // When the operands themselves are v2i64 values, we need to do something
3296     // special because VSX has no underlying comparison operations for these.
3297     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3298       // Equality can be handled by casting to the legal type for Altivec
3299       // comparisons, everything else needs to be expanded.
3300       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3301         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3302                  DAG.getSetCC(dl, MVT::v4i32,
3303                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3304                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3305                    CC));
3306       }
3307 
3308       return SDValue();
3309     }
3310 
3311     // We handle most of these in the usual way.
3312     return Op;
3313   }
3314 
3315   // If we're comparing for equality to zero, expose the fact that this is
3316   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3317   // fold the new nodes.
3318   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3319     return V;
3320 
3321   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3322     // Leave comparisons against 0 and -1 alone for now, since they're usually
3323     // optimized.  FIXME: revisit this when we can custom lower all setcc
3324     // optimizations.
3325     if (C->isAllOnesValue() || C->isNullValue())
3326       return SDValue();
3327   }
3328 
3329   // If we have an integer seteq/setne, turn it into a compare against zero
3330   // by xor'ing the rhs with the lhs, which is faster than setting a
3331   // condition register, reading it back out, and masking the correct bit.  The
3332   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3333   // the result to other bit-twiddling opportunities.
3334   EVT LHSVT = Op.getOperand(0).getValueType();
3335   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3336     EVT VT = Op.getValueType();
3337     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3338                                 Op.getOperand(1));
3339     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3340   }
3341   return SDValue();
3342 }
3343 
3344 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3345   SDNode *Node = Op.getNode();
3346   EVT VT = Node->getValueType(0);
3347   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3348   SDValue InChain = Node->getOperand(0);
3349   SDValue VAListPtr = Node->getOperand(1);
3350   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3351   SDLoc dl(Node);
3352 
3353   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3354 
3355   // gpr_index
3356   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3357                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3358   InChain = GprIndex.getValue(1);
3359 
3360   if (VT == MVT::i64) {
3361     // Check if GprIndex is even
3362     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3363                                  DAG.getConstant(1, dl, MVT::i32));
3364     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3365                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3366     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3367                                           DAG.getConstant(1, dl, MVT::i32));
3368     // Align GprIndex to be even if it isn't
3369     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3370                            GprIndex);
3371   }
3372 
3373   // fpr index is 1 byte after gpr
3374   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3375                                DAG.getConstant(1, dl, MVT::i32));
3376 
3377   // fpr
3378   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3379                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3380   InChain = FprIndex.getValue(1);
3381 
3382   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3383                                        DAG.getConstant(8, dl, MVT::i32));
3384 
3385   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3386                                         DAG.getConstant(4, dl, MVT::i32));
3387 
3388   // areas
3389   SDValue OverflowArea =
3390       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3391   InChain = OverflowArea.getValue(1);
3392 
3393   SDValue RegSaveArea =
3394       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3395   InChain = RegSaveArea.getValue(1);
3396 
3397   // select overflow_area if index > 8
3398   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3399                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3400 
3401   // adjustment constant gpr_index * 4/8
3402   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3403                                     VT.isInteger() ? GprIndex : FprIndex,
3404                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3405                                                     MVT::i32));
3406 
3407   // OurReg = RegSaveArea + RegConstant
3408   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3409                                RegConstant);
3410 
3411   // Floating types are 32 bytes into RegSaveArea
3412   if (VT.isFloatingPoint())
3413     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3414                          DAG.getConstant(32, dl, MVT::i32));
3415 
3416   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3417   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3418                                    VT.isInteger() ? GprIndex : FprIndex,
3419                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3420                                                    MVT::i32));
3421 
3422   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3423                               VT.isInteger() ? VAListPtr : FprPtr,
3424                               MachinePointerInfo(SV), MVT::i8);
3425 
3426   // determine if we should load from reg_save_area or overflow_area
3427   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3428 
3429   // increase overflow_area by 4/8 if gpr/fpr > 8
3430   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3431                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3432                                           dl, MVT::i32));
3433 
3434   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3435                              OverflowAreaPlusN);
3436 
3437   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3438                               MachinePointerInfo(), MVT::i32);
3439 
3440   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3441 }
3442 
3443 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3444   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3445 
3446   // We have to copy the entire va_list struct:
3447   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3448   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3449                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3450                        false, true, false, MachinePointerInfo(),
3451                        MachinePointerInfo());
3452 }
3453 
3454 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3455                                                   SelectionDAG &DAG) const {
3456   if (Subtarget.isAIXABI())
3457     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3458 
3459   return Op.getOperand(0);
3460 }
3461 
3462 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3463                                                 SelectionDAG &DAG) const {
3464   if (Subtarget.isAIXABI())
3465     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3466 
3467   SDValue Chain = Op.getOperand(0);
3468   SDValue Trmp = Op.getOperand(1); // trampoline
3469   SDValue FPtr = Op.getOperand(2); // nested function
3470   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3471   SDLoc dl(Op);
3472 
3473   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3474   bool isPPC64 = (PtrVT == MVT::i64);
3475   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3476 
3477   TargetLowering::ArgListTy Args;
3478   TargetLowering::ArgListEntry Entry;
3479 
3480   Entry.Ty = IntPtrTy;
3481   Entry.Node = Trmp; Args.push_back(Entry);
3482 
3483   // TrampSize == (isPPC64 ? 48 : 40);
3484   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3485                                isPPC64 ? MVT::i64 : MVT::i32);
3486   Args.push_back(Entry);
3487 
3488   Entry.Node = FPtr; Args.push_back(Entry);
3489   Entry.Node = Nest; Args.push_back(Entry);
3490 
3491   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3492   TargetLowering::CallLoweringInfo CLI(DAG);
3493   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3494       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3495       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3496 
3497   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3498   return CallResult.second;
3499 }
3500 
3501 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3502   MachineFunction &MF = DAG.getMachineFunction();
3503   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3504   EVT PtrVT = getPointerTy(MF.getDataLayout());
3505 
3506   SDLoc dl(Op);
3507 
3508   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3509     // vastart just stores the address of the VarArgsFrameIndex slot into the
3510     // memory location argument.
3511     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3512     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3513     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3514                         MachinePointerInfo(SV));
3515   }
3516 
3517   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3518   // We suppose the given va_list is already allocated.
3519   //
3520   // typedef struct {
3521   //  char gpr;     /* index into the array of 8 GPRs
3522   //                 * stored in the register save area
3523   //                 * gpr=0 corresponds to r3,
3524   //                 * gpr=1 to r4, etc.
3525   //                 */
3526   //  char fpr;     /* index into the array of 8 FPRs
3527   //                 * stored in the register save area
3528   //                 * fpr=0 corresponds to f1,
3529   //                 * fpr=1 to f2, etc.
3530   //                 */
3531   //  char *overflow_arg_area;
3532   //                /* location on stack that holds
3533   //                 * the next overflow argument
3534   //                 */
3535   //  char *reg_save_area;
3536   //               /* where r3:r10 and f1:f8 (if saved)
3537   //                * are stored
3538   //                */
3539   // } va_list[1];
3540 
3541   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3542   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3543   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3544                                             PtrVT);
3545   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3546                                  PtrVT);
3547 
3548   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3549   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3550 
3551   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3552   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3553 
3554   uint64_t FPROffset = 1;
3555   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3556 
3557   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3558 
3559   // Store first byte : number of int regs
3560   SDValue firstStore =
3561       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3562                         MachinePointerInfo(SV), MVT::i8);
3563   uint64_t nextOffset = FPROffset;
3564   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3565                                   ConstFPROffset);
3566 
3567   // Store second byte : number of float regs
3568   SDValue secondStore =
3569       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3570                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3571   nextOffset += StackOffset;
3572   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3573 
3574   // Store second word : arguments given on stack
3575   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3576                                     MachinePointerInfo(SV, nextOffset));
3577   nextOffset += FrameOffset;
3578   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3579 
3580   // Store third word : arguments given in registers
3581   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3582                       MachinePointerInfo(SV, nextOffset));
3583 }
3584 
3585 /// FPR - The set of FP registers that should be allocated for arguments
3586 /// on Darwin and AIX.
3587 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3588                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3589                                 PPC::F11, PPC::F12, PPC::F13};
3590 
3591 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3592 /// the stack.
3593 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3594                                        unsigned PtrByteSize) {
3595   unsigned ArgSize = ArgVT.getStoreSize();
3596   if (Flags.isByVal())
3597     ArgSize = Flags.getByValSize();
3598 
3599   // Round up to multiples of the pointer size, except for array members,
3600   // which are always packed.
3601   if (!Flags.isInConsecutiveRegs())
3602     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3603 
3604   return ArgSize;
3605 }
3606 
3607 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3608 /// on the stack.
3609 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3610                                          ISD::ArgFlagsTy Flags,
3611                                          unsigned PtrByteSize) {
3612   Align Alignment(PtrByteSize);
3613 
3614   // Altivec parameters are padded to a 16 byte boundary.
3615   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3616       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3617       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3618       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3619     Alignment = Align(16);
3620 
3621   // ByVal parameters are aligned as requested.
3622   if (Flags.isByVal()) {
3623     auto BVAlign = Flags.getNonZeroByValAlign();
3624     if (BVAlign > PtrByteSize) {
3625       if (BVAlign.value() % PtrByteSize != 0)
3626         llvm_unreachable(
3627             "ByVal alignment is not a multiple of the pointer size");
3628 
3629       Alignment = BVAlign;
3630     }
3631   }
3632 
3633   // Array members are always packed to their original alignment.
3634   if (Flags.isInConsecutiveRegs()) {
3635     // If the array member was split into multiple registers, the first
3636     // needs to be aligned to the size of the full type.  (Except for
3637     // ppcf128, which is only aligned as its f64 components.)
3638     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3639       Alignment = Align(OrigVT.getStoreSize());
3640     else
3641       Alignment = Align(ArgVT.getStoreSize());
3642   }
3643 
3644   return Alignment;
3645 }
3646 
3647 /// CalculateStackSlotUsed - Return whether this argument will use its
3648 /// stack slot (instead of being passed in registers).  ArgOffset,
3649 /// AvailableFPRs, and AvailableVRs must hold the current argument
3650 /// position, and will be updated to account for this argument.
3651 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
3652                                    unsigned PtrByteSize, unsigned LinkageSize,
3653                                    unsigned ParamAreaSize, unsigned &ArgOffset,
3654                                    unsigned &AvailableFPRs,
3655                                    unsigned &AvailableVRs) {
3656   bool UseMemory = false;
3657 
3658   // Respect alignment of argument on the stack.
3659   Align Alignment =
3660       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3661   ArgOffset = alignTo(ArgOffset, Alignment);
3662   // If there's no space left in the argument save area, we must
3663   // use memory (this check also catches zero-sized arguments).
3664   if (ArgOffset >= LinkageSize + ParamAreaSize)
3665     UseMemory = true;
3666 
3667   // Allocate argument on the stack.
3668   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3669   if (Flags.isInConsecutiveRegsLast())
3670     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3671   // If we overran the argument save area, we must use memory
3672   // (this check catches arguments passed partially in memory)
3673   if (ArgOffset > LinkageSize + ParamAreaSize)
3674     UseMemory = true;
3675 
3676   // However, if the argument is actually passed in an FPR or a VR,
3677   // we don't use memory after all.
3678   if (!Flags.isByVal()) {
3679     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3680       if (AvailableFPRs > 0) {
3681         --AvailableFPRs;
3682         return false;
3683       }
3684     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3685         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3686         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3687         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3688       if (AvailableVRs > 0) {
3689         --AvailableVRs;
3690         return false;
3691       }
3692   }
3693 
3694   return UseMemory;
3695 }
3696 
3697 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3698 /// ensure minimum alignment required for target.
3699 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3700                                      unsigned NumBytes) {
3701   return alignTo(NumBytes, Lowering->getStackAlign());
3702 }
3703 
3704 SDValue PPCTargetLowering::LowerFormalArguments(
3705     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3706     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3707     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3708   if (Subtarget.isAIXABI())
3709     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3710                                     InVals);
3711   if (Subtarget.is64BitELFABI())
3712     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3713                                        InVals);
3714   assert(Subtarget.is32BitELFABI());
3715   return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3716                                      InVals);
3717 }
3718 
3719 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3720     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3721     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3722     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3723 
3724   // 32-bit SVR4 ABI Stack Frame Layout:
3725   //              +-----------------------------------+
3726   //        +-->  |            Back chain             |
3727   //        |     +-----------------------------------+
3728   //        |     | Floating-point register save area |
3729   //        |     +-----------------------------------+
3730   //        |     |    General register save area     |
3731   //        |     +-----------------------------------+
3732   //        |     |          CR save word             |
3733   //        |     +-----------------------------------+
3734   //        |     |         VRSAVE save word          |
3735   //        |     +-----------------------------------+
3736   //        |     |         Alignment padding         |
3737   //        |     +-----------------------------------+
3738   //        |     |     Vector register save area     |
3739   //        |     +-----------------------------------+
3740   //        |     |       Local variable space        |
3741   //        |     +-----------------------------------+
3742   //        |     |        Parameter list area        |
3743   //        |     +-----------------------------------+
3744   //        |     |           LR save word            |
3745   //        |     +-----------------------------------+
3746   // SP-->  +---  |            Back chain             |
3747   //              +-----------------------------------+
3748   //
3749   // Specifications:
3750   //   System V Application Binary Interface PowerPC Processor Supplement
3751   //   AltiVec Technology Programming Interface Manual
3752 
3753   MachineFunction &MF = DAG.getMachineFunction();
3754   MachineFrameInfo &MFI = MF.getFrameInfo();
3755   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3756 
3757   EVT PtrVT = getPointerTy(MF.getDataLayout());
3758   // Potential tail calls could cause overwriting of argument stack slots.
3759   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3760                        (CallConv == CallingConv::Fast));
3761   const Align PtrAlign(4);
3762 
3763   // Assign locations to all of the incoming arguments.
3764   SmallVector<CCValAssign, 16> ArgLocs;
3765   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3766                  *DAG.getContext());
3767 
3768   // Reserve space for the linkage area on the stack.
3769   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3770   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3771   if (useSoftFloat())
3772     CCInfo.PreAnalyzeFormalArguments(Ins);
3773 
3774   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3775   CCInfo.clearWasPPCF128();
3776 
3777   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3778     CCValAssign &VA = ArgLocs[i];
3779 
3780     // Arguments stored in registers.
3781     if (VA.isRegLoc()) {
3782       const TargetRegisterClass *RC;
3783       EVT ValVT = VA.getValVT();
3784 
3785       switch (ValVT.getSimpleVT().SimpleTy) {
3786         default:
3787           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3788         case MVT::i1:
3789         case MVT::i32:
3790           RC = &PPC::GPRCRegClass;
3791           break;
3792         case MVT::f32:
3793           if (Subtarget.hasP8Vector())
3794             RC = &PPC::VSSRCRegClass;
3795           else if (Subtarget.hasSPE())
3796             RC = &PPC::GPRCRegClass;
3797           else
3798             RC = &PPC::F4RCRegClass;
3799           break;
3800         case MVT::f64:
3801           if (Subtarget.hasVSX())
3802             RC = &PPC::VSFRCRegClass;
3803           else if (Subtarget.hasSPE())
3804             // SPE passes doubles in GPR pairs.
3805             RC = &PPC::GPRCRegClass;
3806           else
3807             RC = &PPC::F8RCRegClass;
3808           break;
3809         case MVT::v16i8:
3810         case MVT::v8i16:
3811         case MVT::v4i32:
3812           RC = &PPC::VRRCRegClass;
3813           break;
3814         case MVT::v4f32:
3815           RC = &PPC::VRRCRegClass;
3816           break;
3817         case MVT::v2f64:
3818         case MVT::v2i64:
3819           RC = &PPC::VRRCRegClass;
3820           break;
3821       }
3822 
3823       SDValue ArgValue;
3824       // Transform the arguments stored in physical registers into
3825       // virtual ones.
3826       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3827         assert(i + 1 < e && "No second half of double precision argument");
3828         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3829         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3830         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3831         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3832         if (!Subtarget.isLittleEndian())
3833           std::swap (ArgValueLo, ArgValueHi);
3834         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3835                                ArgValueHi);
3836       } else {
3837         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3838         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3839                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3840         if (ValVT == MVT::i1)
3841           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3842       }
3843 
3844       InVals.push_back(ArgValue);
3845     } else {
3846       // Argument stored in memory.
3847       assert(VA.isMemLoc());
3848 
3849       // Get the extended size of the argument type in stack
3850       unsigned ArgSize = VA.getLocVT().getStoreSize();
3851       // Get the actual size of the argument type
3852       unsigned ObjSize = VA.getValVT().getStoreSize();
3853       unsigned ArgOffset = VA.getLocMemOffset();
3854       // Stack objects in PPC32 are right justified.
3855       ArgOffset += ArgSize - ObjSize;
3856       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3857 
3858       // Create load nodes to retrieve arguments from the stack.
3859       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3860       InVals.push_back(
3861           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3862     }
3863   }
3864 
3865   // Assign locations to all of the incoming aggregate by value arguments.
3866   // Aggregates passed by value are stored in the local variable space of the
3867   // caller's stack frame, right above the parameter list area.
3868   SmallVector<CCValAssign, 16> ByValArgLocs;
3869   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3870                       ByValArgLocs, *DAG.getContext());
3871 
3872   // Reserve stack space for the allocations in CCInfo.
3873   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3874 
3875   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3876 
3877   // Area that is at least reserved in the caller of this function.
3878   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3879   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3880 
3881   // Set the size that is at least reserved in caller of this function.  Tail
3882   // call optimized function's reserved stack space needs to be aligned so that
3883   // taking the difference between two stack areas will result in an aligned
3884   // stack.
3885   MinReservedArea =
3886       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3887   FuncInfo->setMinReservedArea(MinReservedArea);
3888 
3889   SmallVector<SDValue, 8> MemOps;
3890 
3891   // If the function takes variable number of arguments, make a frame index for
3892   // the start of the first vararg value... for expansion of llvm.va_start.
3893   if (isVarArg) {
3894     static const MCPhysReg GPArgRegs[] = {
3895       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3896       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3897     };
3898     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3899 
3900     static const MCPhysReg FPArgRegs[] = {
3901       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3902       PPC::F8
3903     };
3904     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3905 
3906     if (useSoftFloat() || hasSPE())
3907        NumFPArgRegs = 0;
3908 
3909     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3910     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3911 
3912     // Make room for NumGPArgRegs and NumFPArgRegs.
3913     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3914                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3915 
3916     FuncInfo->setVarArgsStackOffset(
3917       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3918                             CCInfo.getNextStackOffset(), true));
3919 
3920     FuncInfo->setVarArgsFrameIndex(
3921         MFI.CreateStackObject(Depth, Align(8), false));
3922     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3923 
3924     // The fixed integer arguments of a variadic function are stored to the
3925     // VarArgsFrameIndex on the stack so that they may be loaded by
3926     // dereferencing the result of va_next.
3927     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3928       // Get an existing live-in vreg, or add a new one.
3929       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3930       if (!VReg)
3931         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3932 
3933       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3934       SDValue Store =
3935           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3936       MemOps.push_back(Store);
3937       // Increment the address by four for the next argument to store
3938       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3939       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3940     }
3941 
3942     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3943     // is set.
3944     // The double arguments are stored to the VarArgsFrameIndex
3945     // on the stack.
3946     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3947       // Get an existing live-in vreg, or add a new one.
3948       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3949       if (!VReg)
3950         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3951 
3952       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3953       SDValue Store =
3954           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3955       MemOps.push_back(Store);
3956       // Increment the address by eight for the next argument to store
3957       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3958                                          PtrVT);
3959       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3960     }
3961   }
3962 
3963   if (!MemOps.empty())
3964     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3965 
3966   return Chain;
3967 }
3968 
3969 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3970 // value to MVT::i64 and then truncate to the correct register size.
3971 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3972                                              EVT ObjectVT, SelectionDAG &DAG,
3973                                              SDValue ArgVal,
3974                                              const SDLoc &dl) const {
3975   if (Flags.isSExt())
3976     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3977                          DAG.getValueType(ObjectVT));
3978   else if (Flags.isZExt())
3979     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3980                          DAG.getValueType(ObjectVT));
3981 
3982   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3983 }
3984 
3985 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3986     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3987     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3988     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3989   // TODO: add description of PPC stack frame format, or at least some docs.
3990   //
3991   bool isELFv2ABI = Subtarget.isELFv2ABI();
3992   bool isLittleEndian = Subtarget.isLittleEndian();
3993   MachineFunction &MF = DAG.getMachineFunction();
3994   MachineFrameInfo &MFI = MF.getFrameInfo();
3995   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3996 
3997   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3998          "fastcc not supported on varargs functions");
3999 
4000   EVT PtrVT = getPointerTy(MF.getDataLayout());
4001   // Potential tail calls could cause overwriting of argument stack slots.
4002   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4003                        (CallConv == CallingConv::Fast));
4004   unsigned PtrByteSize = 8;
4005   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4006 
4007   static const MCPhysReg GPR[] = {
4008     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4009     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4010   };
4011   static const MCPhysReg VR[] = {
4012     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4013     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4014   };
4015 
4016   const unsigned Num_GPR_Regs = array_lengthof(GPR);
4017   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4018   const unsigned Num_VR_Regs  = array_lengthof(VR);
4019 
4020   // Do a first pass over the arguments to determine whether the ABI
4021   // guarantees that our caller has allocated the parameter save area
4022   // on its stack frame.  In the ELFv1 ABI, this is always the case;
4023   // in the ELFv2 ABI, it is true if this is a vararg function or if
4024   // any parameter is located in a stack slot.
4025 
4026   bool HasParameterArea = !isELFv2ABI || isVarArg;
4027   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
4028   unsigned NumBytes = LinkageSize;
4029   unsigned AvailableFPRs = Num_FPR_Regs;
4030   unsigned AvailableVRs = Num_VR_Regs;
4031   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4032     if (Ins[i].Flags.isNest())
4033       continue;
4034 
4035     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
4036                                PtrByteSize, LinkageSize, ParamAreaSize,
4037                                NumBytes, AvailableFPRs, AvailableVRs))
4038       HasParameterArea = true;
4039   }
4040 
4041   // Add DAG nodes to load the arguments or copy them out of registers.  On
4042   // entry to a function on PPC, the arguments start after the linkage area,
4043   // although the first ones are often in registers.
4044 
4045   unsigned ArgOffset = LinkageSize;
4046   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4047   SmallVector<SDValue, 8> MemOps;
4048   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4049   unsigned CurArgIdx = 0;
4050   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4051     SDValue ArgVal;
4052     bool needsLoad = false;
4053     EVT ObjectVT = Ins[ArgNo].VT;
4054     EVT OrigVT = Ins[ArgNo].ArgVT;
4055     unsigned ObjSize = ObjectVT.getStoreSize();
4056     unsigned ArgSize = ObjSize;
4057     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4058     if (Ins[ArgNo].isOrigArg()) {
4059       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4060       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4061     }
4062     // We re-align the argument offset for each argument, except when using the
4063     // fast calling convention, when we need to make sure we do that only when
4064     // we'll actually use a stack slot.
4065     unsigned CurArgOffset;
4066     Align Alignment;
4067     auto ComputeArgOffset = [&]() {
4068       /* Respect alignment of argument on the stack.  */
4069       Alignment =
4070           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4071       ArgOffset = alignTo(ArgOffset, Alignment);
4072       CurArgOffset = ArgOffset;
4073     };
4074 
4075     if (CallConv != CallingConv::Fast) {
4076       ComputeArgOffset();
4077 
4078       /* Compute GPR index associated with argument offset.  */
4079       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4080       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4081     }
4082 
4083     // FIXME the codegen can be much improved in some cases.
4084     // We do not have to keep everything in memory.
4085     if (Flags.isByVal()) {
4086       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4087 
4088       if (CallConv == CallingConv::Fast)
4089         ComputeArgOffset();
4090 
4091       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4092       ObjSize = Flags.getByValSize();
4093       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4094       // Empty aggregate parameters do not take up registers.  Examples:
4095       //   struct { } a;
4096       //   union  { } b;
4097       //   int c[0];
4098       // etc.  However, we have to provide a place-holder in InVals, so
4099       // pretend we have an 8-byte item at the current address for that
4100       // purpose.
4101       if (!ObjSize) {
4102         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4103         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4104         InVals.push_back(FIN);
4105         continue;
4106       }
4107 
4108       // Create a stack object covering all stack doublewords occupied
4109       // by the argument.  If the argument is (fully or partially) on
4110       // the stack, or if the argument is fully in registers but the
4111       // caller has allocated the parameter save anyway, we can refer
4112       // directly to the caller's stack frame.  Otherwise, create a
4113       // local copy in our own frame.
4114       int FI;
4115       if (HasParameterArea ||
4116           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4117         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4118       else
4119         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4120       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4121 
4122       // Handle aggregates smaller than 8 bytes.
4123       if (ObjSize < PtrByteSize) {
4124         // The value of the object is its address, which differs from the
4125         // address of the enclosing doubleword on big-endian systems.
4126         SDValue Arg = FIN;
4127         if (!isLittleEndian) {
4128           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4129           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4130         }
4131         InVals.push_back(Arg);
4132 
4133         if (GPR_idx != Num_GPR_Regs) {
4134           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4135           FuncInfo->addLiveInAttr(VReg, Flags);
4136           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4137           SDValue Store;
4138 
4139           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4140             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4141                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4142             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4143                                       MachinePointerInfo(&*FuncArg), ObjType);
4144           } else {
4145             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4146             // store the whole register as-is to the parameter save area
4147             // slot.
4148             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4149                                  MachinePointerInfo(&*FuncArg));
4150           }
4151 
4152           MemOps.push_back(Store);
4153         }
4154         // Whether we copied from a register or not, advance the offset
4155         // into the parameter save area by a full doubleword.
4156         ArgOffset += PtrByteSize;
4157         continue;
4158       }
4159 
4160       // The value of the object is its address, which is the address of
4161       // its first stack doubleword.
4162       InVals.push_back(FIN);
4163 
4164       // Store whatever pieces of the object are in registers to memory.
4165       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4166         if (GPR_idx == Num_GPR_Regs)
4167           break;
4168 
4169         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4170         FuncInfo->addLiveInAttr(VReg, Flags);
4171         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4172         SDValue Addr = FIN;
4173         if (j) {
4174           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4175           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4176         }
4177         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4178                                      MachinePointerInfo(&*FuncArg, j));
4179         MemOps.push_back(Store);
4180         ++GPR_idx;
4181       }
4182       ArgOffset += ArgSize;
4183       continue;
4184     }
4185 
4186     switch (ObjectVT.getSimpleVT().SimpleTy) {
4187     default: llvm_unreachable("Unhandled argument type!");
4188     case MVT::i1:
4189     case MVT::i32:
4190     case MVT::i64:
4191       if (Flags.isNest()) {
4192         // The 'nest' parameter, if any, is passed in R11.
4193         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4194         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4195 
4196         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4197           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4198 
4199         break;
4200       }
4201 
4202       // These can be scalar arguments or elements of an integer array type
4203       // passed directly.  Clang may use those instead of "byval" aggregate
4204       // types to avoid forcing arguments to memory unnecessarily.
4205       if (GPR_idx != Num_GPR_Regs) {
4206         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4207         FuncInfo->addLiveInAttr(VReg, Flags);
4208         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4209 
4210         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4211           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4212           // value to MVT::i64 and then truncate to the correct register size.
4213           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4214       } else {
4215         if (CallConv == CallingConv::Fast)
4216           ComputeArgOffset();
4217 
4218         needsLoad = true;
4219         ArgSize = PtrByteSize;
4220       }
4221       if (CallConv != CallingConv::Fast || needsLoad)
4222         ArgOffset += 8;
4223       break;
4224 
4225     case MVT::f32:
4226     case MVT::f64:
4227       // These can be scalar arguments or elements of a float array type
4228       // passed directly.  The latter are used to implement ELFv2 homogenous
4229       // float aggregates.
4230       if (FPR_idx != Num_FPR_Regs) {
4231         unsigned VReg;
4232 
4233         if (ObjectVT == MVT::f32)
4234           VReg = MF.addLiveIn(FPR[FPR_idx],
4235                               Subtarget.hasP8Vector()
4236                                   ? &PPC::VSSRCRegClass
4237                                   : &PPC::F4RCRegClass);
4238         else
4239           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4240                                                 ? &PPC::VSFRCRegClass
4241                                                 : &PPC::F8RCRegClass);
4242 
4243         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4244         ++FPR_idx;
4245       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4246         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4247         // once we support fp <-> gpr moves.
4248 
4249         // This can only ever happen in the presence of f32 array types,
4250         // since otherwise we never run out of FPRs before running out
4251         // of GPRs.
4252         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4253         FuncInfo->addLiveInAttr(VReg, Flags);
4254         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4255 
4256         if (ObjectVT == MVT::f32) {
4257           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4258             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4259                                  DAG.getConstant(32, dl, MVT::i32));
4260           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4261         }
4262 
4263         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4264       } else {
4265         if (CallConv == CallingConv::Fast)
4266           ComputeArgOffset();
4267 
4268         needsLoad = true;
4269       }
4270 
4271       // When passing an array of floats, the array occupies consecutive
4272       // space in the argument area; only round up to the next doubleword
4273       // at the end of the array.  Otherwise, each float takes 8 bytes.
4274       if (CallConv != CallingConv::Fast || needsLoad) {
4275         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4276         ArgOffset += ArgSize;
4277         if (Flags.isInConsecutiveRegsLast())
4278           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4279       }
4280       break;
4281     case MVT::v4f32:
4282     case MVT::v4i32:
4283     case MVT::v8i16:
4284     case MVT::v16i8:
4285     case MVT::v2f64:
4286     case MVT::v2i64:
4287     case MVT::v1i128:
4288     case MVT::f128:
4289       // These can be scalar arguments or elements of a vector array type
4290       // passed directly.  The latter are used to implement ELFv2 homogenous
4291       // vector aggregates.
4292       if (VR_idx != Num_VR_Regs) {
4293         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4294         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4295         ++VR_idx;
4296       } else {
4297         if (CallConv == CallingConv::Fast)
4298           ComputeArgOffset();
4299         needsLoad = true;
4300       }
4301       if (CallConv != CallingConv::Fast || needsLoad)
4302         ArgOffset += 16;
4303       break;
4304     }
4305 
4306     // We need to load the argument to a virtual register if we determined
4307     // above that we ran out of physical registers of the appropriate type.
4308     if (needsLoad) {
4309       if (ObjSize < ArgSize && !isLittleEndian)
4310         CurArgOffset += ArgSize - ObjSize;
4311       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4312       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4313       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4314     }
4315 
4316     InVals.push_back(ArgVal);
4317   }
4318 
4319   // Area that is at least reserved in the caller of this function.
4320   unsigned MinReservedArea;
4321   if (HasParameterArea)
4322     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4323   else
4324     MinReservedArea = LinkageSize;
4325 
4326   // Set the size that is at least reserved in caller of this function.  Tail
4327   // call optimized functions' reserved stack space needs to be aligned so that
4328   // taking the difference between two stack areas will result in an aligned
4329   // stack.
4330   MinReservedArea =
4331       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4332   FuncInfo->setMinReservedArea(MinReservedArea);
4333 
4334   // If the function takes variable number of arguments, make a frame index for
4335   // the start of the first vararg value... for expansion of llvm.va_start.
4336   // On ELFv2ABI spec, it writes:
4337   // C programs that are intended to be *portable* across different compilers
4338   // and architectures must use the header file <stdarg.h> to deal with variable
4339   // argument lists.
4340   if (isVarArg && MFI.hasVAStart()) {
4341     int Depth = ArgOffset;
4342 
4343     FuncInfo->setVarArgsFrameIndex(
4344       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4345     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4346 
4347     // If this function is vararg, store any remaining integer argument regs
4348     // to their spots on the stack so that they may be loaded by dereferencing
4349     // the result of va_next.
4350     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4351          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4352       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4353       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4354       SDValue Store =
4355           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4356       MemOps.push_back(Store);
4357       // Increment the address by four for the next argument to store
4358       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4359       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4360     }
4361   }
4362 
4363   if (!MemOps.empty())
4364     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4365 
4366   return Chain;
4367 }
4368 
4369 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4370 /// adjusted to accommodate the arguments for the tailcall.
4371 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4372                                    unsigned ParamSize) {
4373 
4374   if (!isTailCall) return 0;
4375 
4376   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4377   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4378   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4379   // Remember only if the new adjustment is bigger.
4380   if (SPDiff < FI->getTailCallSPDelta())
4381     FI->setTailCallSPDelta(SPDiff);
4382 
4383   return SPDiff;
4384 }
4385 
4386 static bool isFunctionGlobalAddress(SDValue Callee);
4387 
4388 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4389                               const TargetMachine &TM) {
4390   // It does not make sense to call callsShareTOCBase() with a caller that
4391   // is PC Relative since PC Relative callers do not have a TOC.
4392 #ifndef NDEBUG
4393   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4394   assert(!STICaller->isUsingPCRelativeCalls() &&
4395          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4396 #endif
4397 
4398   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4399   // don't have enough information to determine if the caller and callee share
4400   // the same  TOC base, so we have to pessimistically assume they don't for
4401   // correctness.
4402   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4403   if (!G)
4404     return false;
4405 
4406   const GlobalValue *GV = G->getGlobal();
4407 
4408   // If the callee is preemptable, then the static linker will use a plt-stub
4409   // which saves the toc to the stack, and needs a nop after the call
4410   // instruction to convert to a toc-restore.
4411   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4412     return false;
4413 
4414   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4415   // We may need a TOC restore in the situation where the caller requires a
4416   // valid TOC but the callee is PC Relative and does not.
4417   const Function *F = dyn_cast<Function>(GV);
4418   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4419 
4420   // If we have an Alias we can try to get the function from there.
4421   if (Alias) {
4422     const GlobalObject *GlobalObj = Alias->getBaseObject();
4423     F = dyn_cast<Function>(GlobalObj);
4424   }
4425 
4426   // If we still have no valid function pointer we do not have enough
4427   // information to determine if the callee uses PC Relative calls so we must
4428   // assume that it does.
4429   if (!F)
4430     return false;
4431 
4432   // If the callee uses PC Relative we cannot guarantee that the callee won't
4433   // clobber the TOC of the caller and so we must assume that the two
4434   // functions do not share a TOC base.
4435   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4436   if (STICallee->isUsingPCRelativeCalls())
4437     return false;
4438 
4439   // If the GV is not a strong definition then we need to assume it can be
4440   // replaced by another function at link time. The function that replaces
4441   // it may not share the same TOC as the caller since the callee may be
4442   // replaced by a PC Relative version of the same function.
4443   if (!GV->isStrongDefinitionForLinker())
4444     return false;
4445 
4446   // The medium and large code models are expected to provide a sufficiently
4447   // large TOC to provide all data addressing needs of a module with a
4448   // single TOC.
4449   if (CodeModel::Medium == TM.getCodeModel() ||
4450       CodeModel::Large == TM.getCodeModel())
4451     return true;
4452 
4453   // Any explicitly-specified sections and section prefixes must also match.
4454   // Also, if we're using -ffunction-sections, then each function is always in
4455   // a different section (the same is true for COMDAT functions).
4456   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4457       GV->getSection() != Caller->getSection())
4458     return false;
4459   if (const auto *F = dyn_cast<Function>(GV)) {
4460     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4461       return false;
4462   }
4463 
4464   return true;
4465 }
4466 
4467 static bool
4468 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4469                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4470   assert(Subtarget.is64BitELFABI());
4471 
4472   const unsigned PtrByteSize = 8;
4473   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4474 
4475   static const MCPhysReg GPR[] = {
4476     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4477     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4478   };
4479   static const MCPhysReg VR[] = {
4480     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4481     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4482   };
4483 
4484   const unsigned NumGPRs = array_lengthof(GPR);
4485   const unsigned NumFPRs = 13;
4486   const unsigned NumVRs = array_lengthof(VR);
4487   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4488 
4489   unsigned NumBytes = LinkageSize;
4490   unsigned AvailableFPRs = NumFPRs;
4491   unsigned AvailableVRs = NumVRs;
4492 
4493   for (const ISD::OutputArg& Param : Outs) {
4494     if (Param.Flags.isNest()) continue;
4495 
4496     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4497                                LinkageSize, ParamAreaSize, NumBytes,
4498                                AvailableFPRs, AvailableVRs))
4499       return true;
4500   }
4501   return false;
4502 }
4503 
4504 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4505   if (CB.arg_size() != CallerFn->arg_size())
4506     return false;
4507 
4508   auto CalleeArgIter = CB.arg_begin();
4509   auto CalleeArgEnd = CB.arg_end();
4510   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4511 
4512   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4513     const Value* CalleeArg = *CalleeArgIter;
4514     const Value* CallerArg = &(*CallerArgIter);
4515     if (CalleeArg == CallerArg)
4516       continue;
4517 
4518     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4519     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4520     //      }
4521     // 1st argument of callee is undef and has the same type as caller.
4522     if (CalleeArg->getType() == CallerArg->getType() &&
4523         isa<UndefValue>(CalleeArg))
4524       continue;
4525 
4526     return false;
4527   }
4528 
4529   return true;
4530 }
4531 
4532 // Returns true if TCO is possible between the callers and callees
4533 // calling conventions.
4534 static bool
4535 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4536                                     CallingConv::ID CalleeCC) {
4537   // Tail calls are possible with fastcc and ccc.
4538   auto isTailCallableCC  = [] (CallingConv::ID CC){
4539       return  CC == CallingConv::C || CC == CallingConv::Fast;
4540   };
4541   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4542     return false;
4543 
4544   // We can safely tail call both fastcc and ccc callees from a c calling
4545   // convention caller. If the caller is fastcc, we may have less stack space
4546   // than a non-fastcc caller with the same signature so disable tail-calls in
4547   // that case.
4548   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4549 }
4550 
4551 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4552     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4553     const SmallVectorImpl<ISD::OutputArg> &Outs,
4554     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4555   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4556 
4557   if (DisableSCO && !TailCallOpt) return false;
4558 
4559   // Variadic argument functions are not supported.
4560   if (isVarArg) return false;
4561 
4562   auto &Caller = DAG.getMachineFunction().getFunction();
4563   // Check that the calling conventions are compatible for tco.
4564   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4565     return false;
4566 
4567   // Caller contains any byval parameter is not supported.
4568   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4569     return false;
4570 
4571   // Callee contains any byval parameter is not supported, too.
4572   // Note: This is a quick work around, because in some cases, e.g.
4573   // caller's stack size > callee's stack size, we are still able to apply
4574   // sibling call optimization. For example, gcc is able to do SCO for caller1
4575   // in the following example, but not for caller2.
4576   //   struct test {
4577   //     long int a;
4578   //     char ary[56];
4579   //   } gTest;
4580   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4581   //     b->a = v.a;
4582   //     return 0;
4583   //   }
4584   //   void caller1(struct test a, struct test c, struct test *b) {
4585   //     callee(gTest, b); }
4586   //   void caller2(struct test *b) { callee(gTest, b); }
4587   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4588     return false;
4589 
4590   // If callee and caller use different calling conventions, we cannot pass
4591   // parameters on stack since offsets for the parameter area may be different.
4592   if (Caller.getCallingConv() != CalleeCC &&
4593       needStackSlotPassParameters(Subtarget, Outs))
4594     return false;
4595 
4596   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4597   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4598   // callee potentially have different TOC bases then we cannot tail call since
4599   // we need to restore the TOC pointer after the call.
4600   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4601   // We cannot guarantee this for indirect calls or calls to external functions.
4602   // When PC-Relative addressing is used, the concept of the TOC is no longer
4603   // applicable so this check is not required.
4604   // Check first for indirect calls.
4605   if (!Subtarget.isUsingPCRelativeCalls() &&
4606       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4607     return false;
4608 
4609   // Check if we share the TOC base.
4610   if (!Subtarget.isUsingPCRelativeCalls() &&
4611       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4612     return false;
4613 
4614   // TCO allows altering callee ABI, so we don't have to check further.
4615   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4616     return true;
4617 
4618   if (DisableSCO) return false;
4619 
4620   // If callee use the same argument list that caller is using, then we can
4621   // apply SCO on this case. If it is not, then we need to check if callee needs
4622   // stack for passing arguments.
4623   // PC Relative tail calls may not have a CallBase.
4624   // If there is no CallBase we cannot verify if we have the same argument
4625   // list so assume that we don't have the same argument list.
4626   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4627       needStackSlotPassParameters(Subtarget, Outs))
4628     return false;
4629   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4630     return false;
4631 
4632   return true;
4633 }
4634 
4635 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4636 /// for tail call optimization. Targets which want to do tail call
4637 /// optimization should implement this function.
4638 bool
4639 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4640                                                      CallingConv::ID CalleeCC,
4641                                                      bool isVarArg,
4642                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4643                                                      SelectionDAG& DAG) const {
4644   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4645     return false;
4646 
4647   // Variable argument functions are not supported.
4648   if (isVarArg)
4649     return false;
4650 
4651   MachineFunction &MF = DAG.getMachineFunction();
4652   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4653   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4654     // Functions containing by val parameters are not supported.
4655     for (unsigned i = 0; i != Ins.size(); i++) {
4656        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4657        if (Flags.isByVal()) return false;
4658     }
4659 
4660     // Non-PIC/GOT tail calls are supported.
4661     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4662       return true;
4663 
4664     // At the moment we can only do local tail calls (in same module, hidden
4665     // or protected) if we are generating PIC.
4666     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4667       return G->getGlobal()->hasHiddenVisibility()
4668           || G->getGlobal()->hasProtectedVisibility();
4669   }
4670 
4671   return false;
4672 }
4673 
4674 /// isCallCompatibleAddress - Return the immediate to use if the specified
4675 /// 32-bit value is representable in the immediate field of a BxA instruction.
4676 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4677   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4678   if (!C) return nullptr;
4679 
4680   int Addr = C->getZExtValue();
4681   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4682       SignExtend32<26>(Addr) != Addr)
4683     return nullptr;  // Top 6 bits have to be sext of immediate.
4684 
4685   return DAG
4686       .getConstant(
4687           (int)C->getZExtValue() >> 2, SDLoc(Op),
4688           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4689       .getNode();
4690 }
4691 
4692 namespace {
4693 
4694 struct TailCallArgumentInfo {
4695   SDValue Arg;
4696   SDValue FrameIdxOp;
4697   int FrameIdx = 0;
4698 
4699   TailCallArgumentInfo() = default;
4700 };
4701 
4702 } // end anonymous namespace
4703 
4704 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4705 static void StoreTailCallArgumentsToStackSlot(
4706     SelectionDAG &DAG, SDValue Chain,
4707     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4708     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4709   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4710     SDValue Arg = TailCallArgs[i].Arg;
4711     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4712     int FI = TailCallArgs[i].FrameIdx;
4713     // Store relative to framepointer.
4714     MemOpChains.push_back(DAG.getStore(
4715         Chain, dl, Arg, FIN,
4716         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4717   }
4718 }
4719 
4720 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4721 /// the appropriate stack slot for the tail call optimized function call.
4722 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4723                                              SDValue OldRetAddr, SDValue OldFP,
4724                                              int SPDiff, const SDLoc &dl) {
4725   if (SPDiff) {
4726     // Calculate the new stack slot for the return address.
4727     MachineFunction &MF = DAG.getMachineFunction();
4728     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4729     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4730     bool isPPC64 = Subtarget.isPPC64();
4731     int SlotSize = isPPC64 ? 8 : 4;
4732     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4733     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4734                                                          NewRetAddrLoc, true);
4735     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4736     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4737     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4738                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4739   }
4740   return Chain;
4741 }
4742 
4743 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4744 /// the position of the argument.
4745 static void
4746 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4747                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4748                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4749   int Offset = ArgOffset + SPDiff;
4750   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4751   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4752   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4753   SDValue FIN = DAG.getFrameIndex(FI, VT);
4754   TailCallArgumentInfo Info;
4755   Info.Arg = Arg;
4756   Info.FrameIdxOp = FIN;
4757   Info.FrameIdx = FI;
4758   TailCallArguments.push_back(Info);
4759 }
4760 
4761 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4762 /// stack slot. Returns the chain as result and the loaded frame pointers in
4763 /// LROpOut/FPOpout. Used when tail calling.
4764 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4765     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4766     SDValue &FPOpOut, const SDLoc &dl) const {
4767   if (SPDiff) {
4768     // Load the LR and FP stack slot for later adjusting.
4769     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4770     LROpOut = getReturnAddrFrameIndex(DAG);
4771     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4772     Chain = SDValue(LROpOut.getNode(), 1);
4773   }
4774   return Chain;
4775 }
4776 
4777 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4778 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4779 /// specified by the specific parameter attribute. The copy will be passed as
4780 /// a byval function parameter.
4781 /// Sometimes what we are copying is the end of a larger object, the part that
4782 /// does not fit in registers.
4783 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4784                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4785                                          SelectionDAG &DAG, const SDLoc &dl) {
4786   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4787   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
4788                        Flags.getNonZeroByValAlign(), false, false, false,
4789                        MachinePointerInfo(), MachinePointerInfo());
4790 }
4791 
4792 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4793 /// tail calls.
4794 static void LowerMemOpCallTo(
4795     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4796     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4797     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4798     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4799   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4800   if (!isTailCall) {
4801     if (isVector) {
4802       SDValue StackPtr;
4803       if (isPPC64)
4804         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4805       else
4806         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4807       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4808                            DAG.getConstant(ArgOffset, dl, PtrVT));
4809     }
4810     MemOpChains.push_back(
4811         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4812     // Calculate and remember argument location.
4813   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4814                                   TailCallArguments);
4815 }
4816 
4817 static void
4818 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4819                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4820                 SDValue FPOp,
4821                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4822   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4823   // might overwrite each other in case of tail call optimization.
4824   SmallVector<SDValue, 8> MemOpChains2;
4825   // Do not flag preceding copytoreg stuff together with the following stuff.
4826   InFlag = SDValue();
4827   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4828                                     MemOpChains2, dl);
4829   if (!MemOpChains2.empty())
4830     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4831 
4832   // Store the return address to the appropriate stack slot.
4833   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4834 
4835   // Emit callseq_end just before tailcall node.
4836   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4837                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4838   InFlag = Chain.getValue(1);
4839 }
4840 
4841 // Is this global address that of a function that can be called by name? (as
4842 // opposed to something that must hold a descriptor for an indirect call).
4843 static bool isFunctionGlobalAddress(SDValue Callee) {
4844   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4845     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4846         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4847       return false;
4848 
4849     return G->getGlobal()->getValueType()->isFunctionTy();
4850   }
4851 
4852   return false;
4853 }
4854 
4855 SDValue PPCTargetLowering::LowerCallResult(
4856     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
4857     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4858     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4859   SmallVector<CCValAssign, 16> RVLocs;
4860   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
4861                     *DAG.getContext());
4862 
4863   CCRetInfo.AnalyzeCallResult(
4864       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
4865                ? RetCC_PPC_Cold
4866                : RetCC_PPC);
4867 
4868   // Copy all of the result registers out of their specified physreg.
4869   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4870     CCValAssign &VA = RVLocs[i];
4871     assert(VA.isRegLoc() && "Can only return in registers!");
4872 
4873     SDValue Val;
4874 
4875     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
4876       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
4877                                       InFlag);
4878       Chain = Lo.getValue(1);
4879       InFlag = Lo.getValue(2);
4880       VA = RVLocs[++i]; // skip ahead to next loc
4881       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
4882                                       InFlag);
4883       Chain = Hi.getValue(1);
4884       InFlag = Hi.getValue(2);
4885       if (!Subtarget.isLittleEndian())
4886         std::swap (Lo, Hi);
4887       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
4888     } else {
4889       Val = DAG.getCopyFromReg(Chain, dl,
4890                                VA.getLocReg(), VA.getLocVT(), InFlag);
4891       Chain = Val.getValue(1);
4892       InFlag = Val.getValue(2);
4893     }
4894 
4895     switch (VA.getLocInfo()) {
4896     default: llvm_unreachable("Unknown loc info!");
4897     case CCValAssign::Full: break;
4898     case CCValAssign::AExt:
4899       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4900       break;
4901     case CCValAssign::ZExt:
4902       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
4903                         DAG.getValueType(VA.getValVT()));
4904       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4905       break;
4906     case CCValAssign::SExt:
4907       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
4908                         DAG.getValueType(VA.getValVT()));
4909       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4910       break;
4911     }
4912 
4913     InVals.push_back(Val);
4914   }
4915 
4916   return Chain;
4917 }
4918 
4919 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
4920                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
4921   // PatchPoint calls are not indirect.
4922   if (isPatchPoint)
4923     return false;
4924 
4925   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
4926     return false;
4927 
4928   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
4929   // becuase the immediate function pointer points to a descriptor instead of
4930   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
4931   // pointer immediate points to the global entry point, while the BLA would
4932   // need to jump to the local entry point (see rL211174).
4933   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
4934       isBLACompatibleAddress(Callee, DAG))
4935     return false;
4936 
4937   return true;
4938 }
4939 
4940 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
4941 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
4942   return Subtarget.isAIXABI() ||
4943          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
4944 }
4945 
4946 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
4947                               const Function &Caller,
4948                               const SDValue &Callee,
4949                               const PPCSubtarget &Subtarget,
4950                               const TargetMachine &TM) {
4951   if (CFlags.IsTailCall)
4952     return PPCISD::TC_RETURN;
4953 
4954   // This is a call through a function pointer.
4955   if (CFlags.IsIndirect) {
4956     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
4957     // indirect calls. The save of the caller's TOC pointer to the stack will be
4958     // inserted into the DAG as part of call lowering. The restore of the TOC
4959     // pointer is modeled by using a pseudo instruction for the call opcode that
4960     // represents the 2 instruction sequence of an indirect branch and link,
4961     // immediately followed by a load of the TOC pointer from the the stack save
4962     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
4963     // as it is not saved or used.
4964     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
4965                                                : PPCISD::BCTRL;
4966   }
4967 
4968   if (Subtarget.isUsingPCRelativeCalls()) {
4969     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
4970     return PPCISD::CALL_NOTOC;
4971   }
4972 
4973   // The ABIs that maintain a TOC pointer accross calls need to have a nop
4974   // immediately following the call instruction if the caller and callee may
4975   // have different TOC bases. At link time if the linker determines the calls
4976   // may not share a TOC base, the call is redirected to a trampoline inserted
4977   // by the linker. The trampoline will (among other things) save the callers
4978   // TOC pointer at an ABI designated offset in the linkage area and the linker
4979   // will rewrite the nop to be a load of the TOC pointer from the linkage area
4980   // into gpr2.
4981   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
4982     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
4983                                                   : PPCISD::CALL_NOP;
4984 
4985   return PPCISD::CALL;
4986 }
4987 
4988 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
4989                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
4990   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
4991     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
4992       return SDValue(Dest, 0);
4993 
4994   // Returns true if the callee is local, and false otherwise.
4995   auto isLocalCallee = [&]() {
4996     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4997     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
4998     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
4999 
5000     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5001            !dyn_cast_or_null<GlobalIFunc>(GV);
5002   };
5003 
5004   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5005   // a static relocation model causes some versions of GNU LD (2.17.50, at
5006   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5007   // built with secure-PLT.
5008   bool UsePlt =
5009       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5010       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5011 
5012   const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5013     const TargetMachine &TM = Subtarget.getTargetMachine();
5014     const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5015     MCSymbolXCOFF *S =
5016         cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5017 
5018     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5019     return DAG.getMCSymbol(S, PtrVT);
5020   };
5021 
5022   if (isFunctionGlobalAddress(Callee)) {
5023     const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5024 
5025     if (Subtarget.isAIXABI()) {
5026       assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5027       return getAIXFuncEntryPointSymbolSDNode(GV);
5028     }
5029     return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5030                                       UsePlt ? PPCII::MO_PLT : 0);
5031   }
5032 
5033   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5034     const char *SymName = S->getSymbol();
5035     if (Subtarget.isAIXABI()) {
5036       // If there exists a user-declared function whose name is the same as the
5037       // ExternalSymbol's, then we pick up the user-declared version.
5038       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5039       if (const Function *F =
5040               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5041         return getAIXFuncEntryPointSymbolSDNode(F);
5042 
5043       // On AIX, direct function calls reference the symbol for the function's
5044       // entry point, which is named by prepending a "." before the function's
5045       // C-linkage name. A Qualname is returned here because an external
5046       // function entry point is a csect with XTY_ER property.
5047       const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5048         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5049         MCSectionXCOFF *Sec = Context.getXCOFFSection(
5050             (Twine(".") + Twine(SymName)).str(), XCOFF::XMC_PR, XCOFF::XTY_ER,
5051             SectionKind::getMetadata());
5052         return Sec->getQualNameSymbol();
5053       };
5054 
5055       SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5056     }
5057     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5058                                        UsePlt ? PPCII::MO_PLT : 0);
5059   }
5060 
5061   // No transformation needed.
5062   assert(Callee.getNode() && "What no callee?");
5063   return Callee;
5064 }
5065 
5066 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5067   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5068          "Expected a CALLSEQ_STARTSDNode.");
5069 
5070   // The last operand is the chain, except when the node has glue. If the node
5071   // has glue, then the last operand is the glue, and the chain is the second
5072   // last operand.
5073   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5074   if (LastValue.getValueType() != MVT::Glue)
5075     return LastValue;
5076 
5077   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5078 }
5079 
5080 // Creates the node that moves a functions address into the count register
5081 // to prepare for an indirect call instruction.
5082 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5083                                 SDValue &Glue, SDValue &Chain,
5084                                 const SDLoc &dl) {
5085   SDValue MTCTROps[] = {Chain, Callee, Glue};
5086   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5087   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5088                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5089   // The glue is the second value produced.
5090   Glue = Chain.getValue(1);
5091 }
5092 
5093 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5094                                           SDValue &Glue, SDValue &Chain,
5095                                           SDValue CallSeqStart,
5096                                           const CallBase *CB, const SDLoc &dl,
5097                                           bool hasNest,
5098                                           const PPCSubtarget &Subtarget) {
5099   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5100   // entry point, but to the function descriptor (the function entry point
5101   // address is part of the function descriptor though).
5102   // The function descriptor is a three doubleword structure with the
5103   // following fields: function entry point, TOC base address and
5104   // environment pointer.
5105   // Thus for a call through a function pointer, the following actions need
5106   // to be performed:
5107   //   1. Save the TOC of the caller in the TOC save area of its stack
5108   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5109   //   2. Load the address of the function entry point from the function
5110   //      descriptor.
5111   //   3. Load the TOC of the callee from the function descriptor into r2.
5112   //   4. Load the environment pointer from the function descriptor into
5113   //      r11.
5114   //   5. Branch to the function entry point address.
5115   //   6. On return of the callee, the TOC of the caller needs to be
5116   //      restored (this is done in FinishCall()).
5117   //
5118   // The loads are scheduled at the beginning of the call sequence, and the
5119   // register copies are flagged together to ensure that no other
5120   // operations can be scheduled in between. E.g. without flagging the
5121   // copies together, a TOC access in the caller could be scheduled between
5122   // the assignment of the callee TOC and the branch to the callee, which leads
5123   // to incorrect code.
5124 
5125   // Start by loading the function address from the descriptor.
5126   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5127   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5128                       ? (MachineMemOperand::MODereferenceable |
5129                          MachineMemOperand::MOInvariant)
5130                       : MachineMemOperand::MONone;
5131 
5132   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5133 
5134   // Registers used in building the DAG.
5135   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5136   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5137 
5138   // Offsets of descriptor members.
5139   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5140   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5141 
5142   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5143   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5144 
5145   // One load for the functions entry point address.
5146   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5147                                     Alignment, MMOFlags);
5148 
5149   // One for loading the TOC anchor for the module that contains the called
5150   // function.
5151   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5152   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5153   SDValue TOCPtr =
5154       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5155                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5156 
5157   // One for loading the environment pointer.
5158   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5159   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5160   SDValue LoadEnvPtr =
5161       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5162                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5163 
5164 
5165   // Then copy the newly loaded TOC anchor to the TOC pointer.
5166   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5167   Chain = TOCVal.getValue(0);
5168   Glue = TOCVal.getValue(1);
5169 
5170   // If the function call has an explicit 'nest' parameter, it takes the
5171   // place of the environment pointer.
5172   assert((!hasNest || !Subtarget.isAIXABI()) &&
5173          "Nest parameter is not supported on AIX.");
5174   if (!hasNest) {
5175     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5176     Chain = EnvVal.getValue(0);
5177     Glue = EnvVal.getValue(1);
5178   }
5179 
5180   // The rest of the indirect call sequence is the same as the non-descriptor
5181   // DAG.
5182   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5183 }
5184 
5185 static void
5186 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5187                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5188                   SelectionDAG &DAG,
5189                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5190                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5191                   const PPCSubtarget &Subtarget) {
5192   const bool IsPPC64 = Subtarget.isPPC64();
5193   // MVT for a general purpose register.
5194   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5195 
5196   // First operand is always the chain.
5197   Ops.push_back(Chain);
5198 
5199   // If it's a direct call pass the callee as the second operand.
5200   if (!CFlags.IsIndirect)
5201     Ops.push_back(Callee);
5202   else {
5203     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5204 
5205     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5206     // on the stack (this would have been done in `LowerCall_64SVR4` or
5207     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5208     // represents both the indirect branch and a load that restores the TOC
5209     // pointer from the linkage area. The operand for the TOC restore is an add
5210     // of the TOC save offset to the stack pointer. This must be the second
5211     // operand: after the chain input but before any other variadic arguments.
5212     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5213     // saved or used.
5214     if (isTOCSaveRestoreRequired(Subtarget)) {
5215       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5216 
5217       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5218       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5219       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5220       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5221       Ops.push_back(AddTOC);
5222     }
5223 
5224     // Add the register used for the environment pointer.
5225     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5226       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5227                                     RegVT));
5228 
5229 
5230     // Add CTR register as callee so a bctr can be emitted later.
5231     if (CFlags.IsTailCall)
5232       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5233   }
5234 
5235   // If this is a tail call add stack pointer delta.
5236   if (CFlags.IsTailCall)
5237     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5238 
5239   // Add argument registers to the end of the list so that they are known live
5240   // into the call.
5241   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5242     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5243                                   RegsToPass[i].second.getValueType()));
5244 
5245   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5246   // no way to mark dependencies as implicit here.
5247   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5248   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5249        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5250     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5251 
5252   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5253   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5254     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5255 
5256   // Add a register mask operand representing the call-preserved registers.
5257   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5258   const uint32_t *Mask =
5259       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5260   assert(Mask && "Missing call preserved mask for calling convention");
5261   Ops.push_back(DAG.getRegisterMask(Mask));
5262 
5263   // If the glue is valid, it is the last operand.
5264   if (Glue.getNode())
5265     Ops.push_back(Glue);
5266 }
5267 
5268 SDValue PPCTargetLowering::FinishCall(
5269     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5270     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5271     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5272     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5273     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5274 
5275   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5276       Subtarget.isAIXABI())
5277     setUsesTOCBasePtr(DAG);
5278 
5279   unsigned CallOpc =
5280       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5281                     Subtarget, DAG.getTarget());
5282 
5283   if (!CFlags.IsIndirect)
5284     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5285   else if (Subtarget.usesFunctionDescriptors())
5286     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5287                                   dl, CFlags.HasNest, Subtarget);
5288   else
5289     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5290 
5291   // Build the operand list for the call instruction.
5292   SmallVector<SDValue, 8> Ops;
5293   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5294                     SPDiff, Subtarget);
5295 
5296   // Emit tail call.
5297   if (CFlags.IsTailCall) {
5298     // Indirect tail call when using PC Relative calls do not have the same
5299     // constraints.
5300     assert(((Callee.getOpcode() == ISD::Register &&
5301              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5302             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5303             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5304             isa<ConstantSDNode>(Callee) ||
5305             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5306            "Expecting a global address, external symbol, absolute value, "
5307            "register or an indirect tail call when PC Relative calls are "
5308            "used.");
5309     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5310     assert(CallOpc == PPCISD::TC_RETURN &&
5311            "Unexpected call opcode for a tail call.");
5312     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5313     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5314   }
5315 
5316   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5317   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5318   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5319   Glue = Chain.getValue(1);
5320 
5321   // When performing tail call optimization the callee pops its arguments off
5322   // the stack. Account for this here so these bytes can be pushed back on in
5323   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5324   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5325                          getTargetMachine().Options.GuaranteedTailCallOpt)
5326                             ? NumBytes
5327                             : 0;
5328 
5329   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5330                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5331                              Glue, dl);
5332   Glue = Chain.getValue(1);
5333 
5334   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5335                          DAG, InVals);
5336 }
5337 
5338 SDValue
5339 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5340                              SmallVectorImpl<SDValue> &InVals) const {
5341   SelectionDAG &DAG                     = CLI.DAG;
5342   SDLoc &dl                             = CLI.DL;
5343   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5344   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5345   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5346   SDValue Chain                         = CLI.Chain;
5347   SDValue Callee                        = CLI.Callee;
5348   bool &isTailCall                      = CLI.IsTailCall;
5349   CallingConv::ID CallConv              = CLI.CallConv;
5350   bool isVarArg                         = CLI.IsVarArg;
5351   bool isPatchPoint                     = CLI.IsPatchPoint;
5352   const CallBase *CB                    = CLI.CB;
5353 
5354   if (isTailCall) {
5355     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5356       isTailCall = false;
5357     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5358       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5359           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5360     else
5361       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5362                                                      Ins, DAG);
5363     if (isTailCall) {
5364       ++NumTailCalls;
5365       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5366         ++NumSiblingCalls;
5367 
5368       // PC Relative calls no longer guarantee that the callee is a Global
5369       // Address Node. The callee could be an indirect tail call in which
5370       // case the SDValue for the callee could be a load (to load the address
5371       // of a function pointer) or it may be a register copy (to move the
5372       // address of the callee from a function parameter into a virtual
5373       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5374       assert((Subtarget.isUsingPCRelativeCalls() ||
5375               isa<GlobalAddressSDNode>(Callee)) &&
5376              "Callee should be an llvm::Function object.");
5377 
5378       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5379                         << "\nTCO callee: ");
5380       LLVM_DEBUG(Callee.dump());
5381     }
5382   }
5383 
5384   if (!isTailCall && CB && CB->isMustTailCall())
5385     report_fatal_error("failed to perform tail call elimination on a call "
5386                        "site marked musttail");
5387 
5388   // When long calls (i.e. indirect calls) are always used, calls are always
5389   // made via function pointer. If we have a function name, first translate it
5390   // into a pointer.
5391   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5392       !isTailCall)
5393     Callee = LowerGlobalAddress(Callee, DAG);
5394 
5395   CallFlags CFlags(
5396       CallConv, isTailCall, isVarArg, isPatchPoint,
5397       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5398       // hasNest
5399       Subtarget.is64BitELFABI() &&
5400           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5401       CLI.NoMerge);
5402 
5403   if (Subtarget.isAIXABI())
5404     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5405                          InVals, CB);
5406 
5407   assert(Subtarget.isSVR4ABI());
5408   if (Subtarget.isPPC64())
5409     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5410                             InVals, CB);
5411   return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5412                           InVals, CB);
5413 }
5414 
5415 SDValue PPCTargetLowering::LowerCall_32SVR4(
5416     SDValue Chain, SDValue Callee, CallFlags CFlags,
5417     const SmallVectorImpl<ISD::OutputArg> &Outs,
5418     const SmallVectorImpl<SDValue> &OutVals,
5419     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5420     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5421     const CallBase *CB) const {
5422   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5423   // of the 32-bit SVR4 ABI stack frame layout.
5424 
5425   const CallingConv::ID CallConv = CFlags.CallConv;
5426   const bool IsVarArg = CFlags.IsVarArg;
5427   const bool IsTailCall = CFlags.IsTailCall;
5428 
5429   assert((CallConv == CallingConv::C ||
5430           CallConv == CallingConv::Cold ||
5431           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5432 
5433   const Align PtrAlign(4);
5434 
5435   MachineFunction &MF = DAG.getMachineFunction();
5436 
5437   // Mark this function as potentially containing a function that contains a
5438   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5439   // and restoring the callers stack pointer in this functions epilog. This is
5440   // done because by tail calling the called function might overwrite the value
5441   // in this function's (MF) stack pointer stack slot 0(SP).
5442   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5443       CallConv == CallingConv::Fast)
5444     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5445 
5446   // Count how many bytes are to be pushed on the stack, including the linkage
5447   // area, parameter list area and the part of the local variable space which
5448   // contains copies of aggregates which are passed by value.
5449 
5450   // Assign locations to all of the outgoing arguments.
5451   SmallVector<CCValAssign, 16> ArgLocs;
5452   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5453 
5454   // Reserve space for the linkage area on the stack.
5455   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5456                        PtrAlign);
5457   if (useSoftFloat())
5458     CCInfo.PreAnalyzeCallOperands(Outs);
5459 
5460   if (IsVarArg) {
5461     // Handle fixed and variable vector arguments differently.
5462     // Fixed vector arguments go into registers as long as registers are
5463     // available. Variable vector arguments always go into memory.
5464     unsigned NumArgs = Outs.size();
5465 
5466     for (unsigned i = 0; i != NumArgs; ++i) {
5467       MVT ArgVT = Outs[i].VT;
5468       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5469       bool Result;
5470 
5471       if (Outs[i].IsFixed) {
5472         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5473                                CCInfo);
5474       } else {
5475         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5476                                       ArgFlags, CCInfo);
5477       }
5478 
5479       if (Result) {
5480 #ifndef NDEBUG
5481         errs() << "Call operand #" << i << " has unhandled type "
5482              << EVT(ArgVT).getEVTString() << "\n";
5483 #endif
5484         llvm_unreachable(nullptr);
5485       }
5486     }
5487   } else {
5488     // All arguments are treated the same.
5489     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5490   }
5491   CCInfo.clearWasPPCF128();
5492 
5493   // Assign locations to all of the outgoing aggregate by value arguments.
5494   SmallVector<CCValAssign, 16> ByValArgLocs;
5495   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5496 
5497   // Reserve stack space for the allocations in CCInfo.
5498   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5499 
5500   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5501 
5502   // Size of the linkage area, parameter list area and the part of the local
5503   // space variable where copies of aggregates which are passed by value are
5504   // stored.
5505   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5506 
5507   // Calculate by how many bytes the stack has to be adjusted in case of tail
5508   // call optimization.
5509   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5510 
5511   // Adjust the stack pointer for the new arguments...
5512   // These operations are automatically eliminated by the prolog/epilog pass
5513   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5514   SDValue CallSeqStart = Chain;
5515 
5516   // Load the return address and frame pointer so it can be moved somewhere else
5517   // later.
5518   SDValue LROp, FPOp;
5519   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5520 
5521   // Set up a copy of the stack pointer for use loading and storing any
5522   // arguments that may not fit in the registers available for argument
5523   // passing.
5524   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5525 
5526   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5527   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5528   SmallVector<SDValue, 8> MemOpChains;
5529 
5530   bool seenFloatArg = false;
5531   // Walk the register/memloc assignments, inserting copies/loads.
5532   // i - Tracks the index into the list of registers allocated for the call
5533   // RealArgIdx - Tracks the index into the list of actual function arguments
5534   // j - Tracks the index into the list of byval arguments
5535   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5536        i != e;
5537        ++i, ++RealArgIdx) {
5538     CCValAssign &VA = ArgLocs[i];
5539     SDValue Arg = OutVals[RealArgIdx];
5540     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5541 
5542     if (Flags.isByVal()) {
5543       // Argument is an aggregate which is passed by value, thus we need to
5544       // create a copy of it in the local variable space of the current stack
5545       // frame (which is the stack frame of the caller) and pass the address of
5546       // this copy to the callee.
5547       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5548       CCValAssign &ByValVA = ByValArgLocs[j++];
5549       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5550 
5551       // Memory reserved in the local variable space of the callers stack frame.
5552       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5553 
5554       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5555       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5556                            StackPtr, PtrOff);
5557 
5558       // Create a copy of the argument in the local area of the current
5559       // stack frame.
5560       SDValue MemcpyCall =
5561         CreateCopyOfByValArgument(Arg, PtrOff,
5562                                   CallSeqStart.getNode()->getOperand(0),
5563                                   Flags, DAG, dl);
5564 
5565       // This must go outside the CALLSEQ_START..END.
5566       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5567                                                      SDLoc(MemcpyCall));
5568       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5569                              NewCallSeqStart.getNode());
5570       Chain = CallSeqStart = NewCallSeqStart;
5571 
5572       // Pass the address of the aggregate copy on the stack either in a
5573       // physical register or in the parameter list area of the current stack
5574       // frame to the callee.
5575       Arg = PtrOff;
5576     }
5577 
5578     // When useCRBits() is true, there can be i1 arguments.
5579     // It is because getRegisterType(MVT::i1) => MVT::i1,
5580     // and for other integer types getRegisterType() => MVT::i32.
5581     // Extend i1 and ensure callee will get i32.
5582     if (Arg.getValueType() == MVT::i1)
5583       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5584                         dl, MVT::i32, Arg);
5585 
5586     if (VA.isRegLoc()) {
5587       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5588       // Put argument in a physical register.
5589       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5590         bool IsLE = Subtarget.isLittleEndian();
5591         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5592                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5593         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5594         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5595                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5596         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5597                              SVal.getValue(0)));
5598       } else
5599         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5600     } else {
5601       // Put argument in the parameter list area of the current stack frame.
5602       assert(VA.isMemLoc());
5603       unsigned LocMemOffset = VA.getLocMemOffset();
5604 
5605       if (!IsTailCall) {
5606         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5607         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5608                              StackPtr, PtrOff);
5609 
5610         MemOpChains.push_back(
5611             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5612       } else {
5613         // Calculate and remember argument location.
5614         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5615                                  TailCallArguments);
5616       }
5617     }
5618   }
5619 
5620   if (!MemOpChains.empty())
5621     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5622 
5623   // Build a sequence of copy-to-reg nodes chained together with token chain
5624   // and flag operands which copy the outgoing args into the appropriate regs.
5625   SDValue InFlag;
5626   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5627     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5628                              RegsToPass[i].second, InFlag);
5629     InFlag = Chain.getValue(1);
5630   }
5631 
5632   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5633   // registers.
5634   if (IsVarArg) {
5635     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5636     SDValue Ops[] = { Chain, InFlag };
5637 
5638     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5639                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5640 
5641     InFlag = Chain.getValue(1);
5642   }
5643 
5644   if (IsTailCall)
5645     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5646                     TailCallArguments);
5647 
5648   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5649                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5650 }
5651 
5652 // Copy an argument into memory, being careful to do this outside the
5653 // call sequence for the call to which the argument belongs.
5654 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5655     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5656     SelectionDAG &DAG, const SDLoc &dl) const {
5657   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5658                         CallSeqStart.getNode()->getOperand(0),
5659                         Flags, DAG, dl);
5660   // The MEMCPY must go outside the CALLSEQ_START..END.
5661   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5662   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5663                                                  SDLoc(MemcpyCall));
5664   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5665                          NewCallSeqStart.getNode());
5666   return NewCallSeqStart;
5667 }
5668 
5669 SDValue PPCTargetLowering::LowerCall_64SVR4(
5670     SDValue Chain, SDValue Callee, CallFlags CFlags,
5671     const SmallVectorImpl<ISD::OutputArg> &Outs,
5672     const SmallVectorImpl<SDValue> &OutVals,
5673     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5674     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5675     const CallBase *CB) const {
5676   bool isELFv2ABI = Subtarget.isELFv2ABI();
5677   bool isLittleEndian = Subtarget.isLittleEndian();
5678   unsigned NumOps = Outs.size();
5679   bool IsSibCall = false;
5680   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5681 
5682   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5683   unsigned PtrByteSize = 8;
5684 
5685   MachineFunction &MF = DAG.getMachineFunction();
5686 
5687   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5688     IsSibCall = true;
5689 
5690   // Mark this function as potentially containing a function that contains a
5691   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5692   // and restoring the callers stack pointer in this functions epilog. This is
5693   // done because by tail calling the called function might overwrite the value
5694   // in this function's (MF) stack pointer stack slot 0(SP).
5695   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5696     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5697 
5698   assert(!(IsFastCall && CFlags.IsVarArg) &&
5699          "fastcc not supported on varargs functions");
5700 
5701   // Count how many bytes are to be pushed on the stack, including the linkage
5702   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5703   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5704   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5705   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5706   unsigned NumBytes = LinkageSize;
5707   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5708 
5709   static const MCPhysReg GPR[] = {
5710     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5711     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5712   };
5713   static const MCPhysReg VR[] = {
5714     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5715     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5716   };
5717 
5718   const unsigned NumGPRs = array_lengthof(GPR);
5719   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5720   const unsigned NumVRs  = array_lengthof(VR);
5721 
5722   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5723   // can be passed to the callee in registers.
5724   // For the fast calling convention, there is another check below.
5725   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5726   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5727   if (!HasParameterArea) {
5728     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5729     unsigned AvailableFPRs = NumFPRs;
5730     unsigned AvailableVRs = NumVRs;
5731     unsigned NumBytesTmp = NumBytes;
5732     for (unsigned i = 0; i != NumOps; ++i) {
5733       if (Outs[i].Flags.isNest()) continue;
5734       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5735                                  PtrByteSize, LinkageSize, ParamAreaSize,
5736                                  NumBytesTmp, AvailableFPRs, AvailableVRs))
5737         HasParameterArea = true;
5738     }
5739   }
5740 
5741   // When using the fast calling convention, we don't provide backing for
5742   // arguments that will be in registers.
5743   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5744 
5745   // Avoid allocating parameter area for fastcc functions if all the arguments
5746   // can be passed in the registers.
5747   if (IsFastCall)
5748     HasParameterArea = false;
5749 
5750   // Add up all the space actually used.
5751   for (unsigned i = 0; i != NumOps; ++i) {
5752     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5753     EVT ArgVT = Outs[i].VT;
5754     EVT OrigVT = Outs[i].ArgVT;
5755 
5756     if (Flags.isNest())
5757       continue;
5758 
5759     if (IsFastCall) {
5760       if (Flags.isByVal()) {
5761         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5762         if (NumGPRsUsed > NumGPRs)
5763           HasParameterArea = true;
5764       } else {
5765         switch (ArgVT.getSimpleVT().SimpleTy) {
5766         default: llvm_unreachable("Unexpected ValueType for argument!");
5767         case MVT::i1:
5768         case MVT::i32:
5769         case MVT::i64:
5770           if (++NumGPRsUsed <= NumGPRs)
5771             continue;
5772           break;
5773         case MVT::v4i32:
5774         case MVT::v8i16:
5775         case MVT::v16i8:
5776         case MVT::v2f64:
5777         case MVT::v2i64:
5778         case MVT::v1i128:
5779         case MVT::f128:
5780           if (++NumVRsUsed <= NumVRs)
5781             continue;
5782           break;
5783         case MVT::v4f32:
5784           if (++NumVRsUsed <= NumVRs)
5785             continue;
5786           break;
5787         case MVT::f32:
5788         case MVT::f64:
5789           if (++NumFPRsUsed <= NumFPRs)
5790             continue;
5791           break;
5792         }
5793         HasParameterArea = true;
5794       }
5795     }
5796 
5797     /* Respect alignment of argument on the stack.  */
5798     auto Alignement =
5799         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5800     NumBytes = alignTo(NumBytes, Alignement);
5801 
5802     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5803     if (Flags.isInConsecutiveRegsLast())
5804       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5805   }
5806 
5807   unsigned NumBytesActuallyUsed = NumBytes;
5808 
5809   // In the old ELFv1 ABI,
5810   // the prolog code of the callee may store up to 8 GPR argument registers to
5811   // the stack, allowing va_start to index over them in memory if its varargs.
5812   // Because we cannot tell if this is needed on the caller side, we have to
5813   // conservatively assume that it is needed.  As such, make sure we have at
5814   // least enough stack space for the caller to store the 8 GPRs.
5815   // In the ELFv2 ABI, we allocate the parameter area iff a callee
5816   // really requires memory operands, e.g. a vararg function.
5817   if (HasParameterArea)
5818     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5819   else
5820     NumBytes = LinkageSize;
5821 
5822   // Tail call needs the stack to be aligned.
5823   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5824     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5825 
5826   int SPDiff = 0;
5827 
5828   // Calculate by how many bytes the stack has to be adjusted in case of tail
5829   // call optimization.
5830   if (!IsSibCall)
5831     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
5832 
5833   // To protect arguments on the stack from being clobbered in a tail call,
5834   // force all the loads to happen before doing any other lowering.
5835   if (CFlags.IsTailCall)
5836     Chain = DAG.getStackArgumentTokenFactor(Chain);
5837 
5838   // Adjust the stack pointer for the new arguments...
5839   // These operations are automatically eliminated by the prolog/epilog pass
5840   if (!IsSibCall)
5841     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5842   SDValue CallSeqStart = Chain;
5843 
5844   // Load the return address and frame pointer so it can be move somewhere else
5845   // later.
5846   SDValue LROp, FPOp;
5847   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5848 
5849   // Set up a copy of the stack pointer for use loading and storing any
5850   // arguments that may not fit in the registers available for argument
5851   // passing.
5852   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5853 
5854   // Figure out which arguments are going to go in registers, and which in
5855   // memory.  Also, if this is a vararg function, floating point operations
5856   // must be stored to our stack, and loaded into integer regs as well, if
5857   // any integer regs are available for argument passing.
5858   unsigned ArgOffset = LinkageSize;
5859 
5860   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5861   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5862 
5863   SmallVector<SDValue, 8> MemOpChains;
5864   for (unsigned i = 0; i != NumOps; ++i) {
5865     SDValue Arg = OutVals[i];
5866     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5867     EVT ArgVT = Outs[i].VT;
5868     EVT OrigVT = Outs[i].ArgVT;
5869 
5870     // PtrOff will be used to store the current argument to the stack if a
5871     // register cannot be found for it.
5872     SDValue PtrOff;
5873 
5874     // We re-align the argument offset for each argument, except when using the
5875     // fast calling convention, when we need to make sure we do that only when
5876     // we'll actually use a stack slot.
5877     auto ComputePtrOff = [&]() {
5878       /* Respect alignment of argument on the stack.  */
5879       auto Alignment =
5880           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5881       ArgOffset = alignTo(ArgOffset, Alignment);
5882 
5883       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
5884 
5885       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5886     };
5887 
5888     if (!IsFastCall) {
5889       ComputePtrOff();
5890 
5891       /* Compute GPR index associated with argument offset.  */
5892       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5893       GPR_idx = std::min(GPR_idx, NumGPRs);
5894     }
5895 
5896     // Promote integers to 64-bit values.
5897     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
5898       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
5899       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5900       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
5901     }
5902 
5903     // FIXME memcpy is used way more than necessary.  Correctness first.
5904     // Note: "by value" is code for passing a structure by value, not
5905     // basic types.
5906     if (Flags.isByVal()) {
5907       // Note: Size includes alignment padding, so
5908       //   struct x { short a; char b; }
5909       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
5910       // These are the proper values we need for right-justifying the
5911       // aggregate in a parameter register.
5912       unsigned Size = Flags.getByValSize();
5913 
5914       // An empty aggregate parameter takes up no storage and no
5915       // registers.
5916       if (Size == 0)
5917         continue;
5918 
5919       if (IsFastCall)
5920         ComputePtrOff();
5921 
5922       // All aggregates smaller than 8 bytes must be passed right-justified.
5923       if (Size==1 || Size==2 || Size==4) {
5924         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
5925         if (GPR_idx != NumGPRs) {
5926           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
5927                                         MachinePointerInfo(), VT);
5928           MemOpChains.push_back(Load.getValue(1));
5929           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5930 
5931           ArgOffset += PtrByteSize;
5932           continue;
5933         }
5934       }
5935 
5936       if (GPR_idx == NumGPRs && Size < 8) {
5937         SDValue AddPtr = PtrOff;
5938         if (!isLittleEndian) {
5939           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
5940                                           PtrOff.getValueType());
5941           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5942         }
5943         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5944                                                           CallSeqStart,
5945                                                           Flags, DAG, dl);
5946         ArgOffset += PtrByteSize;
5947         continue;
5948       }
5949       // Copy entire object into memory.  There are cases where gcc-generated
5950       // code assumes it is there, even if it could be put entirely into
5951       // registers.  (This is not what the doc says.)
5952 
5953       // FIXME: The above statement is likely due to a misunderstanding of the
5954       // documents.  All arguments must be copied into the parameter area BY
5955       // THE CALLEE in the event that the callee takes the address of any
5956       // formal argument.  That has not yet been implemented.  However, it is
5957       // reasonable to use the stack area as a staging area for the register
5958       // load.
5959 
5960       // Skip this for small aggregates, as we will use the same slot for a
5961       // right-justified copy, below.
5962       if (Size >= 8)
5963         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5964                                                           CallSeqStart,
5965                                                           Flags, DAG, dl);
5966 
5967       // When a register is available, pass a small aggregate right-justified.
5968       if (Size < 8 && GPR_idx != NumGPRs) {
5969         // The easiest way to get this right-justified in a register
5970         // is to copy the structure into the rightmost portion of a
5971         // local variable slot, then load the whole slot into the
5972         // register.
5973         // FIXME: The memcpy seems to produce pretty awful code for
5974         // small aggregates, particularly for packed ones.
5975         // FIXME: It would be preferable to use the slot in the
5976         // parameter save area instead of a new local variable.
5977         SDValue AddPtr = PtrOff;
5978         if (!isLittleEndian) {
5979           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
5980           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5981         }
5982         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5983                                                           CallSeqStart,
5984                                                           Flags, DAG, dl);
5985 
5986         // Load the slot into the register.
5987         SDValue Load =
5988             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
5989         MemOpChains.push_back(Load.getValue(1));
5990         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5991 
5992         // Done with this argument.
5993         ArgOffset += PtrByteSize;
5994         continue;
5995       }
5996 
5997       // For aggregates larger than PtrByteSize, copy the pieces of the
5998       // object that fit into registers from the parameter save area.
5999       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6000         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6001         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6002         if (GPR_idx != NumGPRs) {
6003           SDValue Load =
6004               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6005           MemOpChains.push_back(Load.getValue(1));
6006           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6007           ArgOffset += PtrByteSize;
6008         } else {
6009           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6010           break;
6011         }
6012       }
6013       continue;
6014     }
6015 
6016     switch (Arg.getSimpleValueType().SimpleTy) {
6017     default: llvm_unreachable("Unexpected ValueType for argument!");
6018     case MVT::i1:
6019     case MVT::i32:
6020     case MVT::i64:
6021       if (Flags.isNest()) {
6022         // The 'nest' parameter, if any, is passed in R11.
6023         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6024         break;
6025       }
6026 
6027       // These can be scalar arguments or elements of an integer array type
6028       // passed directly.  Clang may use those instead of "byval" aggregate
6029       // types to avoid forcing arguments to memory unnecessarily.
6030       if (GPR_idx != NumGPRs) {
6031         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6032       } else {
6033         if (IsFastCall)
6034           ComputePtrOff();
6035 
6036         assert(HasParameterArea &&
6037                "Parameter area must exist to pass an argument in memory.");
6038         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6039                          true, CFlags.IsTailCall, false, MemOpChains,
6040                          TailCallArguments, dl);
6041         if (IsFastCall)
6042           ArgOffset += PtrByteSize;
6043       }
6044       if (!IsFastCall)
6045         ArgOffset += PtrByteSize;
6046       break;
6047     case MVT::f32:
6048     case MVT::f64: {
6049       // These can be scalar arguments or elements of a float array type
6050       // passed directly.  The latter are used to implement ELFv2 homogenous
6051       // float aggregates.
6052 
6053       // Named arguments go into FPRs first, and once they overflow, the
6054       // remaining arguments go into GPRs and then the parameter save area.
6055       // Unnamed arguments for vararg functions always go to GPRs and
6056       // then the parameter save area.  For now, put all arguments to vararg
6057       // routines always in both locations (FPR *and* GPR or stack slot).
6058       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6059       bool NeededLoad = false;
6060 
6061       // First load the argument into the next available FPR.
6062       if (FPR_idx != NumFPRs)
6063         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6064 
6065       // Next, load the argument into GPR or stack slot if needed.
6066       if (!NeedGPROrStack)
6067         ;
6068       else if (GPR_idx != NumGPRs && !IsFastCall) {
6069         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6070         // once we support fp <-> gpr moves.
6071 
6072         // In the non-vararg case, this can only ever happen in the
6073         // presence of f32 array types, since otherwise we never run
6074         // out of FPRs before running out of GPRs.
6075         SDValue ArgVal;
6076 
6077         // Double values are always passed in a single GPR.
6078         if (Arg.getValueType() != MVT::f32) {
6079           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6080 
6081         // Non-array float values are extended and passed in a GPR.
6082         } else if (!Flags.isInConsecutiveRegs()) {
6083           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6084           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6085 
6086         // If we have an array of floats, we collect every odd element
6087         // together with its predecessor into one GPR.
6088         } else if (ArgOffset % PtrByteSize != 0) {
6089           SDValue Lo, Hi;
6090           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6091           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6092           if (!isLittleEndian)
6093             std::swap(Lo, Hi);
6094           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6095 
6096         // The final element, if even, goes into the first half of a GPR.
6097         } else if (Flags.isInConsecutiveRegsLast()) {
6098           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6099           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6100           if (!isLittleEndian)
6101             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6102                                  DAG.getConstant(32, dl, MVT::i32));
6103 
6104         // Non-final even elements are skipped; they will be handled
6105         // together the with subsequent argument on the next go-around.
6106         } else
6107           ArgVal = SDValue();
6108 
6109         if (ArgVal.getNode())
6110           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6111       } else {
6112         if (IsFastCall)
6113           ComputePtrOff();
6114 
6115         // Single-precision floating-point values are mapped to the
6116         // second (rightmost) word of the stack doubleword.
6117         if (Arg.getValueType() == MVT::f32 &&
6118             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6119           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6120           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6121         }
6122 
6123         assert(HasParameterArea &&
6124                "Parameter area must exist to pass an argument in memory.");
6125         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6126                          true, CFlags.IsTailCall, false, MemOpChains,
6127                          TailCallArguments, dl);
6128 
6129         NeededLoad = true;
6130       }
6131       // When passing an array of floats, the array occupies consecutive
6132       // space in the argument area; only round up to the next doubleword
6133       // at the end of the array.  Otherwise, each float takes 8 bytes.
6134       if (!IsFastCall || NeededLoad) {
6135         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6136                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6137         if (Flags.isInConsecutiveRegsLast())
6138           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6139       }
6140       break;
6141     }
6142     case MVT::v4f32:
6143     case MVT::v4i32:
6144     case MVT::v8i16:
6145     case MVT::v16i8:
6146     case MVT::v2f64:
6147     case MVT::v2i64:
6148     case MVT::v1i128:
6149     case MVT::f128:
6150       // These can be scalar arguments or elements of a vector array type
6151       // passed directly.  The latter are used to implement ELFv2 homogenous
6152       // vector aggregates.
6153 
6154       // For a varargs call, named arguments go into VRs or on the stack as
6155       // usual; unnamed arguments always go to the stack or the corresponding
6156       // GPRs when within range.  For now, we always put the value in both
6157       // locations (or even all three).
6158       if (CFlags.IsVarArg) {
6159         assert(HasParameterArea &&
6160                "Parameter area must exist if we have a varargs call.");
6161         // We could elide this store in the case where the object fits
6162         // entirely in R registers.  Maybe later.
6163         SDValue Store =
6164             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6165         MemOpChains.push_back(Store);
6166         if (VR_idx != NumVRs) {
6167           SDValue Load =
6168               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6169           MemOpChains.push_back(Load.getValue(1));
6170           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6171         }
6172         ArgOffset += 16;
6173         for (unsigned i=0; i<16; i+=PtrByteSize) {
6174           if (GPR_idx == NumGPRs)
6175             break;
6176           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6177                                    DAG.getConstant(i, dl, PtrVT));
6178           SDValue Load =
6179               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6180           MemOpChains.push_back(Load.getValue(1));
6181           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6182         }
6183         break;
6184       }
6185 
6186       // Non-varargs Altivec params go into VRs or on the stack.
6187       if (VR_idx != NumVRs) {
6188         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6189       } else {
6190         if (IsFastCall)
6191           ComputePtrOff();
6192 
6193         assert(HasParameterArea &&
6194                "Parameter area must exist to pass an argument in memory.");
6195         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6196                          true, CFlags.IsTailCall, true, MemOpChains,
6197                          TailCallArguments, dl);
6198         if (IsFastCall)
6199           ArgOffset += 16;
6200       }
6201 
6202       if (!IsFastCall)
6203         ArgOffset += 16;
6204       break;
6205     }
6206   }
6207 
6208   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6209          "mismatch in size of parameter area");
6210   (void)NumBytesActuallyUsed;
6211 
6212   if (!MemOpChains.empty())
6213     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6214 
6215   // Check if this is an indirect call (MTCTR/BCTRL).
6216   // See prepareDescriptorIndirectCall and buildCallOperands for more
6217   // information about calls through function pointers in the 64-bit SVR4 ABI.
6218   if (CFlags.IsIndirect) {
6219     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6220     // caller in the TOC save area.
6221     if (isTOCSaveRestoreRequired(Subtarget)) {
6222       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6223       // Load r2 into a virtual register and store it to the TOC save area.
6224       setUsesTOCBasePtr(DAG);
6225       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6226       // TOC save area offset.
6227       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6228       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6229       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6230       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6231                            MachinePointerInfo::getStack(
6232                                DAG.getMachineFunction(), TOCSaveOffset));
6233     }
6234     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6235     // This does not mean the MTCTR instruction must use R12; it's easier
6236     // to model this as an extra parameter, so do that.
6237     if (isELFv2ABI && !CFlags.IsPatchPoint)
6238       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6239   }
6240 
6241   // Build a sequence of copy-to-reg nodes chained together with token chain
6242   // and flag operands which copy the outgoing args into the appropriate regs.
6243   SDValue InFlag;
6244   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6245     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6246                              RegsToPass[i].second, InFlag);
6247     InFlag = Chain.getValue(1);
6248   }
6249 
6250   if (CFlags.IsTailCall && !IsSibCall)
6251     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6252                     TailCallArguments);
6253 
6254   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6255                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6256 }
6257 
6258 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6259                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6260                    CCState &State) {
6261 
6262   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6263       State.getMachineFunction().getSubtarget());
6264   const bool IsPPC64 = Subtarget.isPPC64();
6265   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6266   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6267 
6268   if (ValVT.isVector() && !State.getMachineFunction()
6269                                .getTarget()
6270                                .Options.EnableAIXExtendedAltivecABI)
6271     report_fatal_error("the default Altivec AIX ABI is not yet supported");
6272 
6273   if (ValVT == MVT::f128)
6274     report_fatal_error("f128 is unimplemented on AIX.");
6275 
6276   if (ArgFlags.isNest())
6277     report_fatal_error("Nest arguments are unimplemented.");
6278 
6279   static const MCPhysReg GPR_32[] = {// 32-bit registers.
6280                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6281                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6282   static const MCPhysReg GPR_64[] = {// 64-bit registers.
6283                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6284                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6285 
6286   static const MCPhysReg VR[] = {// Vector registers.
6287                                  PPC::V2,  PPC::V3,  PPC::V4,  PPC::V5,
6288                                  PPC::V6,  PPC::V7,  PPC::V8,  PPC::V9,
6289                                  PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6290 
6291   if (ArgFlags.isByVal()) {
6292     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6293       report_fatal_error("Pass-by-value arguments with alignment greater than "
6294                          "register width are not supported.");
6295 
6296     const unsigned ByValSize = ArgFlags.getByValSize();
6297 
6298     // An empty aggregate parameter takes up no storage and no registers,
6299     // but needs a MemLoc for a stack slot for the formal arguments side.
6300     if (ByValSize == 0) {
6301       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6302                                        State.getNextStackOffset(), RegVT,
6303                                        LocInfo));
6304       return false;
6305     }
6306 
6307     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6308     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6309     for (const unsigned E = Offset + StackSize; Offset < E;
6310          Offset += PtrAlign.value()) {
6311       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6312         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6313       else {
6314         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6315                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
6316                                          LocInfo));
6317         break;
6318       }
6319     }
6320     return false;
6321   }
6322 
6323   // Arguments always reserve parameter save area.
6324   switch (ValVT.SimpleTy) {
6325   default:
6326     report_fatal_error("Unhandled value type for argument.");
6327   case MVT::i64:
6328     // i64 arguments should have been split to i32 for PPC32.
6329     assert(IsPPC64 && "PPC32 should have split i64 values.");
6330     LLVM_FALLTHROUGH;
6331   case MVT::i1:
6332   case MVT::i32: {
6333     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6334     // AIX integer arguments are always passed in register width.
6335     if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
6336       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6337                                   : CCValAssign::LocInfo::ZExt;
6338     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6339       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6340     else
6341       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6342 
6343     return false;
6344   }
6345   case MVT::f32:
6346   case MVT::f64: {
6347     // Parameter save area (PSA) is reserved even if the float passes in fpr.
6348     const unsigned StoreSize = LocVT.getStoreSize();
6349     // Floats are always 4-byte aligned in the PSA on AIX.
6350     // This includes f64 in 64-bit mode for ABI compatibility.
6351     const unsigned Offset =
6352         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6353     unsigned FReg = State.AllocateReg(FPR);
6354     if (FReg)
6355       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6356 
6357     // Reserve and initialize GPRs or initialize the PSA as required.
6358     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6359       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6360         assert(FReg && "An FPR should be available when a GPR is reserved.");
6361         if (State.isVarArg()) {
6362           // Successfully reserved GPRs are only initialized for vararg calls.
6363           // Custom handling is required for:
6364           //   f64 in PPC32 needs to be split into 2 GPRs.
6365           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6366           State.addLoc(
6367               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6368         }
6369       } else {
6370         // If there are insufficient GPRs, the PSA needs to be initialized.
6371         // Initialization occurs even if an FPR was initialized for
6372         // compatibility with the AIX XL compiler. The full memory for the
6373         // argument will be initialized even if a prior word is saved in GPR.
6374         // A custom memLoc is used when the argument also passes in FPR so
6375         // that the callee handling can skip over it easily.
6376         State.addLoc(
6377             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6378                                              LocInfo)
6379                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6380         break;
6381       }
6382     }
6383 
6384     return false;
6385   }
6386   case MVT::v4f32:
6387   case MVT::v4i32:
6388   case MVT::v8i16:
6389   case MVT::v16i8:
6390   case MVT::v2i64:
6391   case MVT::v2f64:
6392   case MVT::v1i128: {
6393     if (State.isVarArg())
6394       report_fatal_error(
6395           "variadic arguments for vector types are unimplemented for AIX");
6396 
6397     if (unsigned VReg = State.AllocateReg(VR))
6398       State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6399     else {
6400       report_fatal_error(
6401           "passing vector parameters to the stack is unimplemented for AIX");
6402     }
6403     return false;
6404   }
6405   }
6406   return true;
6407 }
6408 
6409 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6410                                                     bool IsPPC64) {
6411   assert((IsPPC64 || SVT != MVT::i64) &&
6412          "i64 should have been split for 32-bit codegen.");
6413 
6414   switch (SVT) {
6415   default:
6416     report_fatal_error("Unexpected value type for formal argument");
6417   case MVT::i1:
6418   case MVT::i32:
6419   case MVT::i64:
6420     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6421   case MVT::f32:
6422     return &PPC::F4RCRegClass;
6423   case MVT::f64:
6424     return &PPC::F8RCRegClass;
6425   case MVT::v4f32:
6426   case MVT::v4i32:
6427   case MVT::v8i16:
6428   case MVT::v16i8:
6429   case MVT::v2i64:
6430   case MVT::v2f64:
6431   case MVT::v1i128:
6432     return &PPC::VRRCRegClass;
6433   }
6434 }
6435 
6436 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6437                                         SelectionDAG &DAG, SDValue ArgValue,
6438                                         MVT LocVT, const SDLoc &dl) {
6439   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
6440   assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits());
6441 
6442   if (Flags.isSExt())
6443     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6444                            DAG.getValueType(ValVT));
6445   else if (Flags.isZExt())
6446     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6447                            DAG.getValueType(ValVT));
6448 
6449   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
6450 }
6451 
6452 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
6453   const unsigned LASize = FL->getLinkageSize();
6454 
6455   if (PPC::GPRCRegClass.contains(Reg)) {
6456     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
6457            "Reg must be a valid argument register!");
6458     return LASize + 4 * (Reg - PPC::R3);
6459   }
6460 
6461   if (PPC::G8RCRegClass.contains(Reg)) {
6462     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
6463            "Reg must be a valid argument register!");
6464     return LASize + 8 * (Reg - PPC::X3);
6465   }
6466 
6467   llvm_unreachable("Only general purpose registers expected.");
6468 }
6469 
6470 //   AIX ABI Stack Frame Layout:
6471 //
6472 //   Low Memory +--------------------------------------------+
6473 //   SP   +---> | Back chain                                 | ---+
6474 //        |     +--------------------------------------------+    |
6475 //        |     | Saved Condition Register                   |    |
6476 //        |     +--------------------------------------------+    |
6477 //        |     | Saved Linkage Register                     |    |
6478 //        |     +--------------------------------------------+    | Linkage Area
6479 //        |     | Reserved for compilers                     |    |
6480 //        |     +--------------------------------------------+    |
6481 //        |     | Reserved for binders                       |    |
6482 //        |     +--------------------------------------------+    |
6483 //        |     | Saved TOC pointer                          | ---+
6484 //        |     +--------------------------------------------+
6485 //        |     | Parameter save area                        |
6486 //        |     +--------------------------------------------+
6487 //        |     | Alloca space                               |
6488 //        |     +--------------------------------------------+
6489 //        |     | Local variable space                       |
6490 //        |     +--------------------------------------------+
6491 //        |     | Float/int conversion temporary             |
6492 //        |     +--------------------------------------------+
6493 //        |     | Save area for AltiVec registers            |
6494 //        |     +--------------------------------------------+
6495 //        |     | AltiVec alignment padding                  |
6496 //        |     +--------------------------------------------+
6497 //        |     | Save area for VRSAVE register              |
6498 //        |     +--------------------------------------------+
6499 //        |     | Save area for General Purpose registers    |
6500 //        |     +--------------------------------------------+
6501 //        |     | Save area for Floating Point registers     |
6502 //        |     +--------------------------------------------+
6503 //        +---- | Back chain                                 |
6504 // High Memory  +--------------------------------------------+
6505 //
6506 //  Specifications:
6507 //  AIX 7.2 Assembler Language Reference
6508 //  Subroutine linkage convention
6509 
6510 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
6511     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
6512     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6513     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
6514 
6515   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
6516           CallConv == CallingConv::Fast) &&
6517          "Unexpected calling convention!");
6518 
6519   if (getTargetMachine().Options.GuaranteedTailCallOpt)
6520     report_fatal_error("Tail call support is unimplemented on AIX.");
6521 
6522   if (useSoftFloat())
6523     report_fatal_error("Soft float support is unimplemented on AIX.");
6524 
6525   const PPCSubtarget &Subtarget =
6526       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
6527 
6528   const bool IsPPC64 = Subtarget.isPPC64();
6529   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6530 
6531   // Assign locations to all of the incoming arguments.
6532   SmallVector<CCValAssign, 16> ArgLocs;
6533   MachineFunction &MF = DAG.getMachineFunction();
6534   MachineFrameInfo &MFI = MF.getFrameInfo();
6535   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
6536   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
6537 
6538   const EVT PtrVT = getPointerTy(MF.getDataLayout());
6539   // Reserve space for the linkage area on the stack.
6540   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6541   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
6542   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
6543 
6544   SmallVector<SDValue, 8> MemOps;
6545 
6546   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
6547     CCValAssign &VA = ArgLocs[I++];
6548     MVT LocVT = VA.getLocVT();
6549     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
6550     if (VA.isMemLoc() && VA.getValVT().isVector())
6551       report_fatal_error(
6552           "passing vector parameters to the stack is unimplemented for AIX");
6553 
6554     // For compatibility with the AIX XL compiler, the float args in the
6555     // parameter save area are initialized even if the argument is available
6556     // in register.  The caller is required to initialize both the register
6557     // and memory, however, the callee can choose to expect it in either.
6558     // The memloc is dismissed here because the argument is retrieved from
6559     // the register.
6560     if (VA.isMemLoc() && VA.needsCustom())
6561       continue;
6562 
6563     if (VA.isRegLoc()) {
6564       if (VA.getValVT().isScalarInteger())
6565         FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6566       else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector())
6567         FuncInfo->appendParameterType(VA.getValVT().SimpleTy == MVT::f32
6568                                           ? PPCFunctionInfo::ShortFloatPoint
6569                                           : PPCFunctionInfo::LongFloatPoint);
6570     }
6571 
6572     if (Flags.isByVal() && VA.isMemLoc()) {
6573       const unsigned Size =
6574           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
6575                   PtrByteSize);
6576       const int FI = MF.getFrameInfo().CreateFixedObject(
6577           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
6578           /* IsAliased */ true);
6579       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6580       InVals.push_back(FIN);
6581 
6582       continue;
6583     }
6584 
6585     if (Flags.isByVal()) {
6586       assert(VA.isRegLoc() && "MemLocs should already be handled.");
6587 
6588       const MCPhysReg ArgReg = VA.getLocReg();
6589       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
6590 
6591       if (Flags.getNonZeroByValAlign() > PtrByteSize)
6592         report_fatal_error("Over aligned byvals not supported yet.");
6593 
6594       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
6595       const int FI = MF.getFrameInfo().CreateFixedObject(
6596           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
6597           /* IsAliased */ true);
6598       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6599       InVals.push_back(FIN);
6600 
6601       // Add live ins for all the RegLocs for the same ByVal.
6602       const TargetRegisterClass *RegClass =
6603           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6604 
6605       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
6606                                                unsigned Offset) {
6607         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
6608         // Since the callers side has left justified the aggregate in the
6609         // register, we can simply store the entire register into the stack
6610         // slot.
6611         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
6612         // The store to the fixedstack object is needed becuase accessing a
6613         // field of the ByVal will use a gep and load. Ideally we will optimize
6614         // to extracting the value from the register directly, and elide the
6615         // stores when the arguments address is not taken, but that will need to
6616         // be future work.
6617         SDValue Store = DAG.getStore(
6618             CopyFrom.getValue(1), dl, CopyFrom,
6619             DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
6620             MachinePointerInfo::getFixedStack(MF, FI, Offset));
6621 
6622         MemOps.push_back(Store);
6623       };
6624 
6625       unsigned Offset = 0;
6626       HandleRegLoc(VA.getLocReg(), Offset);
6627       Offset += PtrByteSize;
6628       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
6629            Offset += PtrByteSize) {
6630         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
6631                "RegLocs should be for ByVal argument.");
6632 
6633         const CCValAssign RL = ArgLocs[I++];
6634         HandleRegLoc(RL.getLocReg(), Offset);
6635         FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6636       }
6637 
6638       if (Offset != StackSize) {
6639         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
6640                "Expected MemLoc for remaining bytes.");
6641         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
6642         // Consume the MemLoc.The InVal has already been emitted, so nothing
6643         // more needs to be done.
6644         ++I;
6645       }
6646 
6647       continue;
6648     }
6649 
6650     EVT ValVT = VA.getValVT();
6651     if (VA.isRegLoc() && !VA.needsCustom()) {
6652       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
6653       unsigned VReg =
6654           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
6655       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
6656       if (ValVT.isScalarInteger() &&
6657           (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) {
6658         ArgValue =
6659             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
6660       }
6661       InVals.push_back(ArgValue);
6662       continue;
6663     }
6664     if (VA.isMemLoc()) {
6665       const unsigned LocSize = LocVT.getStoreSize();
6666       const unsigned ValSize = ValVT.getStoreSize();
6667       assert((ValSize <= LocSize) &&
6668              "Object size is larger than size of MemLoc");
6669       int CurArgOffset = VA.getLocMemOffset();
6670       // Objects are right-justified because AIX is big-endian.
6671       if (LocSize > ValSize)
6672         CurArgOffset += LocSize - ValSize;
6673       // Potential tail calls could cause overwriting of argument stack slots.
6674       const bool IsImmutable =
6675           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
6676             (CallConv == CallingConv::Fast));
6677       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
6678       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6679       SDValue ArgValue =
6680           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
6681       InVals.push_back(ArgValue);
6682       continue;
6683     }
6684   }
6685 
6686   // On AIX a minimum of 8 words is saved to the parameter save area.
6687   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
6688   // Area that is at least reserved in the caller of this function.
6689   unsigned CallerReservedArea =
6690       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
6691 
6692   // Set the size that is at least reserved in caller of this function. Tail
6693   // call optimized function's reserved stack space needs to be aligned so
6694   // that taking the difference between two stack areas will result in an
6695   // aligned stack.
6696   CallerReservedArea =
6697       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
6698   FuncInfo->setMinReservedArea(CallerReservedArea);
6699 
6700   if (isVarArg) {
6701     FuncInfo->setVarArgsFrameIndex(
6702         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
6703     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
6704 
6705     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6706                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6707 
6708     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6709                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6710     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
6711 
6712     // The fixed integer arguments of a variadic function are stored to the
6713     // VarArgsFrameIndex on the stack so that they may be loaded by
6714     // dereferencing the result of va_next.
6715     for (unsigned GPRIndex =
6716              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
6717          GPRIndex < NumGPArgRegs; ++GPRIndex) {
6718 
6719       const unsigned VReg =
6720           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
6721                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
6722 
6723       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
6724       SDValue Store =
6725           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
6726       MemOps.push_back(Store);
6727       // Increment the address for the next argument to store.
6728       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
6729       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
6730     }
6731   }
6732 
6733   if (!MemOps.empty())
6734     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
6735 
6736   return Chain;
6737 }
6738 
6739 SDValue PPCTargetLowering::LowerCall_AIX(
6740     SDValue Chain, SDValue Callee, CallFlags CFlags,
6741     const SmallVectorImpl<ISD::OutputArg> &Outs,
6742     const SmallVectorImpl<SDValue> &OutVals,
6743     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6744     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6745     const CallBase *CB) const {
6746   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
6747   // AIX ABI stack frame layout.
6748 
6749   assert((CFlags.CallConv == CallingConv::C ||
6750           CFlags.CallConv == CallingConv::Cold ||
6751           CFlags.CallConv == CallingConv::Fast) &&
6752          "Unexpected calling convention!");
6753 
6754   if (CFlags.IsPatchPoint)
6755     report_fatal_error("This call type is unimplemented on AIX.");
6756 
6757   const PPCSubtarget& Subtarget =
6758       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
6759 
6760   MachineFunction &MF = DAG.getMachineFunction();
6761   SmallVector<CCValAssign, 16> ArgLocs;
6762   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
6763                  *DAG.getContext());
6764 
6765   // Reserve space for the linkage save area (LSA) on the stack.
6766   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
6767   //   [SP][CR][LR][2 x reserved][TOC].
6768   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
6769   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6770   const bool IsPPC64 = Subtarget.isPPC64();
6771   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
6772   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6773   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
6774   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
6775 
6776   // The prolog code of the callee may store up to 8 GPR argument registers to
6777   // the stack, allowing va_start to index over them in memory if the callee
6778   // is variadic.
6779   // Because we cannot tell if this is needed on the caller side, we have to
6780   // conservatively assume that it is needed.  As such, make sure we have at
6781   // least enough stack space for the caller to store the 8 GPRs.
6782   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
6783   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
6784                                      CCInfo.getNextStackOffset());
6785 
6786   // Adjust the stack pointer for the new arguments...
6787   // These operations are automatically eliminated by the prolog/epilog pass.
6788   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6789   SDValue CallSeqStart = Chain;
6790 
6791   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6792   SmallVector<SDValue, 8> MemOpChains;
6793 
6794   // Set up a copy of the stack pointer for loading and storing any
6795   // arguments that may not fit in the registers available for argument
6796   // passing.
6797   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
6798                                    : DAG.getRegister(PPC::R1, MVT::i32);
6799 
6800   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
6801     const unsigned ValNo = ArgLocs[I].getValNo();
6802     SDValue Arg = OutVals[ValNo];
6803     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
6804 
6805     if (Flags.isByVal()) {
6806       const unsigned ByValSize = Flags.getByValSize();
6807 
6808       // Nothing to do for zero-sized ByVals on the caller side.
6809       if (!ByValSize) {
6810         ++I;
6811         continue;
6812       }
6813 
6814       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
6815         return DAG.getExtLoad(
6816             ISD::ZEXTLOAD, dl, PtrVT, Chain,
6817             (LoadOffset != 0)
6818                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
6819                 : Arg,
6820             MachinePointerInfo(), VT);
6821       };
6822 
6823       unsigned LoadOffset = 0;
6824 
6825       // Initialize registers, which are fully occupied by the by-val argument.
6826       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
6827         SDValue Load = GetLoad(PtrVT, LoadOffset);
6828         MemOpChains.push_back(Load.getValue(1));
6829         LoadOffset += PtrByteSize;
6830         const CCValAssign &ByValVA = ArgLocs[I++];
6831         assert(ByValVA.getValNo() == ValNo &&
6832                "Unexpected location for pass-by-value argument.");
6833         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
6834       }
6835 
6836       if (LoadOffset == ByValSize)
6837         continue;
6838 
6839       // There must be one more loc to handle the remainder.
6840       assert(ArgLocs[I].getValNo() == ValNo &&
6841              "Expected additional location for by-value argument.");
6842 
6843       if (ArgLocs[I].isMemLoc()) {
6844         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
6845         const CCValAssign &ByValVA = ArgLocs[I++];
6846         ISD::ArgFlagsTy MemcpyFlags = Flags;
6847         // Only memcpy the bytes that don't pass in register.
6848         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
6849         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
6850             (LoadOffset != 0)
6851                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
6852                 : Arg,
6853             DAG.getObjectPtrOffset(dl, StackPtr,
6854                                    TypeSize::Fixed(ByValVA.getLocMemOffset())),
6855             CallSeqStart, MemcpyFlags, DAG, dl);
6856         continue;
6857       }
6858 
6859       // Initialize the final register residue.
6860       // Any residue that occupies the final by-val arg register must be
6861       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
6862       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
6863       // 2 and 1 byte loads.
6864       const unsigned ResidueBytes = ByValSize % PtrByteSize;
6865       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
6866              "Unexpected register residue for by-value argument.");
6867       SDValue ResidueVal;
6868       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
6869         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
6870         const MVT VT =
6871             N == 1 ? MVT::i8
6872                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
6873         SDValue Load = GetLoad(VT, LoadOffset);
6874         MemOpChains.push_back(Load.getValue(1));
6875         LoadOffset += N;
6876         Bytes += N;
6877 
6878         // By-val arguments are passed left-justfied in register.
6879         // Every load here needs to be shifted, otherwise a full register load
6880         // should have been used.
6881         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
6882                "Unexpected load emitted during handling of pass-by-value "
6883                "argument.");
6884         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
6885         EVT ShiftAmountTy =
6886             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
6887         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
6888         SDValue ShiftedLoad =
6889             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
6890         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
6891                                               ShiftedLoad)
6892                                 : ShiftedLoad;
6893       }
6894 
6895       const CCValAssign &ByValVA = ArgLocs[I++];
6896       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
6897       continue;
6898     }
6899 
6900     CCValAssign &VA = ArgLocs[I++];
6901     const MVT LocVT = VA.getLocVT();
6902     const MVT ValVT = VA.getValVT();
6903 
6904     if (VA.isMemLoc() && VA.getValVT().isVector())
6905       report_fatal_error(
6906           "passing vector parameters to the stack is unimplemented for AIX");
6907 
6908     switch (VA.getLocInfo()) {
6909     default:
6910       report_fatal_error("Unexpected argument extension type.");
6911     case CCValAssign::Full:
6912       break;
6913     case CCValAssign::ZExt:
6914       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
6915       break;
6916     case CCValAssign::SExt:
6917       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
6918       break;
6919     }
6920 
6921     if (VA.isRegLoc() && !VA.needsCustom()) {
6922       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
6923       continue;
6924     }
6925 
6926     if (VA.isMemLoc()) {
6927       SDValue PtrOff =
6928           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
6929       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6930       MemOpChains.push_back(
6931           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
6932 
6933       continue;
6934     }
6935 
6936     // Custom handling is used for GPR initializations for vararg float
6937     // arguments.
6938     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
6939            ValVT.isFloatingPoint() && LocVT.isInteger() &&
6940            "Unexpected register handling for calling convention.");
6941 
6942     SDValue ArgAsInt =
6943         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
6944 
6945     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
6946       // f32 in 32-bit GPR
6947       // f64 in 64-bit GPR
6948       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
6949     else if (Arg.getValueType().getFixedSizeInBits() <
6950              LocVT.getFixedSizeInBits())
6951       // f32 in 64-bit GPR.
6952       RegsToPass.push_back(std::make_pair(
6953           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
6954     else {
6955       // f64 in two 32-bit GPRs
6956       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
6957       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
6958              "Unexpected custom register for argument!");
6959       CCValAssign &GPR1 = VA;
6960       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
6961                                      DAG.getConstant(32, dl, MVT::i8));
6962       RegsToPass.push_back(std::make_pair(
6963           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
6964 
6965       if (I != E) {
6966         // If only 1 GPR was available, there will only be one custom GPR and
6967         // the argument will also pass in memory.
6968         CCValAssign &PeekArg = ArgLocs[I];
6969         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
6970           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
6971           CCValAssign &GPR2 = ArgLocs[I++];
6972           RegsToPass.push_back(std::make_pair(
6973               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
6974         }
6975       }
6976     }
6977   }
6978 
6979   if (!MemOpChains.empty())
6980     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6981 
6982   // For indirect calls, we need to save the TOC base to the stack for
6983   // restoration after the call.
6984   if (CFlags.IsIndirect) {
6985     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
6986     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
6987     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
6988     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
6989     const unsigned TOCSaveOffset =
6990         Subtarget.getFrameLowering()->getTOCSaveOffset();
6991 
6992     setUsesTOCBasePtr(DAG);
6993     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
6994     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6995     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
6996     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6997     Chain = DAG.getStore(
6998         Val.getValue(1), dl, Val, AddPtr,
6999         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7000   }
7001 
7002   // Build a sequence of copy-to-reg nodes chained together with token chain
7003   // and flag operands which copy the outgoing args into the appropriate regs.
7004   SDValue InFlag;
7005   for (auto Reg : RegsToPass) {
7006     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7007     InFlag = Chain.getValue(1);
7008   }
7009 
7010   const int SPDiff = 0;
7011   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7012                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7013 }
7014 
7015 bool
7016 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7017                                   MachineFunction &MF, bool isVarArg,
7018                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7019                                   LLVMContext &Context) const {
7020   SmallVector<CCValAssign, 16> RVLocs;
7021   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7022   return CCInfo.CheckReturn(
7023       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7024                 ? RetCC_PPC_Cold
7025                 : RetCC_PPC);
7026 }
7027 
7028 SDValue
7029 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7030                                bool isVarArg,
7031                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7032                                const SmallVectorImpl<SDValue> &OutVals,
7033                                const SDLoc &dl, SelectionDAG &DAG) const {
7034   SmallVector<CCValAssign, 16> RVLocs;
7035   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7036                  *DAG.getContext());
7037   CCInfo.AnalyzeReturn(Outs,
7038                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7039                            ? RetCC_PPC_Cold
7040                            : RetCC_PPC);
7041 
7042   SDValue Flag;
7043   SmallVector<SDValue, 4> RetOps(1, Chain);
7044 
7045   // Copy the result values into the output registers.
7046   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7047     CCValAssign &VA = RVLocs[i];
7048     assert(VA.isRegLoc() && "Can only return in registers!");
7049 
7050     SDValue Arg = OutVals[RealResIdx];
7051 
7052     switch (VA.getLocInfo()) {
7053     default: llvm_unreachable("Unknown loc info!");
7054     case CCValAssign::Full: break;
7055     case CCValAssign::AExt:
7056       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7057       break;
7058     case CCValAssign::ZExt:
7059       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7060       break;
7061     case CCValAssign::SExt:
7062       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7063       break;
7064     }
7065     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7066       bool isLittleEndian = Subtarget.isLittleEndian();
7067       // Legalize ret f64 -> ret 2 x i32.
7068       SDValue SVal =
7069           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7070                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7071       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7072       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7073       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7074                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7075       Flag = Chain.getValue(1);
7076       VA = RVLocs[++i]; // skip ahead to next loc
7077       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7078     } else
7079       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7080     Flag = Chain.getValue(1);
7081     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7082   }
7083 
7084   RetOps[0] = Chain;  // Update chain.
7085 
7086   // Add the flag if we have it.
7087   if (Flag.getNode())
7088     RetOps.push_back(Flag);
7089 
7090   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7091 }
7092 
7093 SDValue
7094 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7095                                                 SelectionDAG &DAG) const {
7096   SDLoc dl(Op);
7097 
7098   // Get the correct type for integers.
7099   EVT IntVT = Op.getValueType();
7100 
7101   // Get the inputs.
7102   SDValue Chain = Op.getOperand(0);
7103   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7104   // Build a DYNAREAOFFSET node.
7105   SDValue Ops[2] = {Chain, FPSIdx};
7106   SDVTList VTs = DAG.getVTList(IntVT);
7107   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7108 }
7109 
7110 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7111                                              SelectionDAG &DAG) const {
7112   // When we pop the dynamic allocation we need to restore the SP link.
7113   SDLoc dl(Op);
7114 
7115   // Get the correct type for pointers.
7116   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7117 
7118   // Construct the stack pointer operand.
7119   bool isPPC64 = Subtarget.isPPC64();
7120   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7121   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7122 
7123   // Get the operands for the STACKRESTORE.
7124   SDValue Chain = Op.getOperand(0);
7125   SDValue SaveSP = Op.getOperand(1);
7126 
7127   // Load the old link SP.
7128   SDValue LoadLinkSP =
7129       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7130 
7131   // Restore the stack pointer.
7132   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7133 
7134   // Store the old link SP.
7135   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7136 }
7137 
7138 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7139   MachineFunction &MF = DAG.getMachineFunction();
7140   bool isPPC64 = Subtarget.isPPC64();
7141   EVT PtrVT = getPointerTy(MF.getDataLayout());
7142 
7143   // Get current frame pointer save index.  The users of this index will be
7144   // primarily DYNALLOC instructions.
7145   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7146   int RASI = FI->getReturnAddrSaveIndex();
7147 
7148   // If the frame pointer save index hasn't been defined yet.
7149   if (!RASI) {
7150     // Find out what the fix offset of the frame pointer save area.
7151     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7152     // Allocate the frame index for frame pointer save area.
7153     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7154     // Save the result.
7155     FI->setReturnAddrSaveIndex(RASI);
7156   }
7157   return DAG.getFrameIndex(RASI, PtrVT);
7158 }
7159 
7160 SDValue
7161 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7162   MachineFunction &MF = DAG.getMachineFunction();
7163   bool isPPC64 = Subtarget.isPPC64();
7164   EVT PtrVT = getPointerTy(MF.getDataLayout());
7165 
7166   // Get current frame pointer save index.  The users of this index will be
7167   // primarily DYNALLOC instructions.
7168   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7169   int FPSI = FI->getFramePointerSaveIndex();
7170 
7171   // If the frame pointer save index hasn't been defined yet.
7172   if (!FPSI) {
7173     // Find out what the fix offset of the frame pointer save area.
7174     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7175     // Allocate the frame index for frame pointer save area.
7176     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7177     // Save the result.
7178     FI->setFramePointerSaveIndex(FPSI);
7179   }
7180   return DAG.getFrameIndex(FPSI, PtrVT);
7181 }
7182 
7183 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7184                                                    SelectionDAG &DAG) const {
7185   MachineFunction &MF = DAG.getMachineFunction();
7186   // Get the inputs.
7187   SDValue Chain = Op.getOperand(0);
7188   SDValue Size  = Op.getOperand(1);
7189   SDLoc dl(Op);
7190 
7191   // Get the correct type for pointers.
7192   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7193   // Negate the size.
7194   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7195                                 DAG.getConstant(0, dl, PtrVT), Size);
7196   // Construct a node for the frame pointer save index.
7197   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7198   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7199   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7200   if (hasInlineStackProbe(MF))
7201     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7202   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7203 }
7204 
7205 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7206                                                      SelectionDAG &DAG) const {
7207   MachineFunction &MF = DAG.getMachineFunction();
7208 
7209   bool isPPC64 = Subtarget.isPPC64();
7210   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7211 
7212   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7213   return DAG.getFrameIndex(FI, PtrVT);
7214 }
7215 
7216 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7217                                                SelectionDAG &DAG) const {
7218   SDLoc DL(Op);
7219   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7220                      DAG.getVTList(MVT::i32, MVT::Other),
7221                      Op.getOperand(0), Op.getOperand(1));
7222 }
7223 
7224 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7225                                                 SelectionDAG &DAG) const {
7226   SDLoc DL(Op);
7227   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7228                      Op.getOperand(0), Op.getOperand(1));
7229 }
7230 
7231 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7232   if (Op.getValueType().isVector())
7233     return LowerVectorLoad(Op, DAG);
7234 
7235   assert(Op.getValueType() == MVT::i1 &&
7236          "Custom lowering only for i1 loads");
7237 
7238   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7239 
7240   SDLoc dl(Op);
7241   LoadSDNode *LD = cast<LoadSDNode>(Op);
7242 
7243   SDValue Chain = LD->getChain();
7244   SDValue BasePtr = LD->getBasePtr();
7245   MachineMemOperand *MMO = LD->getMemOperand();
7246 
7247   SDValue NewLD =
7248       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7249                      BasePtr, MVT::i8, MMO);
7250   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7251 
7252   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7253   return DAG.getMergeValues(Ops, dl);
7254 }
7255 
7256 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7257   if (Op.getOperand(1).getValueType().isVector())
7258     return LowerVectorStore(Op, DAG);
7259 
7260   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7261          "Custom lowering only for i1 stores");
7262 
7263   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7264 
7265   SDLoc dl(Op);
7266   StoreSDNode *ST = cast<StoreSDNode>(Op);
7267 
7268   SDValue Chain = ST->getChain();
7269   SDValue BasePtr = ST->getBasePtr();
7270   SDValue Value = ST->getValue();
7271   MachineMemOperand *MMO = ST->getMemOperand();
7272 
7273   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7274                       Value);
7275   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7276 }
7277 
7278 // FIXME: Remove this once the ANDI glue bug is fixed:
7279 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7280   assert(Op.getValueType() == MVT::i1 &&
7281          "Custom lowering only for i1 results");
7282 
7283   SDLoc DL(Op);
7284   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7285 }
7286 
7287 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7288                                                SelectionDAG &DAG) const {
7289 
7290   // Implements a vector truncate that fits in a vector register as a shuffle.
7291   // We want to legalize vector truncates down to where the source fits in
7292   // a vector register (and target is therefore smaller than vector register
7293   // size).  At that point legalization will try to custom lower the sub-legal
7294   // result and get here - where we can contain the truncate as a single target
7295   // operation.
7296 
7297   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7298   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7299   //
7300   // We will implement it for big-endian ordering as this (where x denotes
7301   // undefined):
7302   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7303   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7304   //
7305   // The same operation in little-endian ordering will be:
7306   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7307   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7308 
7309   EVT TrgVT = Op.getValueType();
7310   assert(TrgVT.isVector() && "Vector type expected.");
7311   unsigned TrgNumElts = TrgVT.getVectorNumElements();
7312   EVT EltVT = TrgVT.getVectorElementType();
7313   if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
7314       TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
7315       !isPowerOf2_32(EltVT.getSizeInBits()))
7316     return SDValue();
7317 
7318   SDValue N1 = Op.getOperand(0);
7319   EVT SrcVT = N1.getValueType();
7320   unsigned SrcSize = SrcVT.getSizeInBits();
7321   if (SrcSize > 256 ||
7322       !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
7323       !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
7324     return SDValue();
7325   if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
7326     return SDValue();
7327 
7328   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7329   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7330 
7331   SDLoc DL(Op);
7332   SDValue Op1, Op2;
7333   if (SrcSize == 256) {
7334     EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
7335     EVT SplitVT =
7336         N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
7337     unsigned SplitNumElts = SplitVT.getVectorNumElements();
7338     Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7339                       DAG.getConstant(0, DL, VecIdxTy));
7340     Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7341                       DAG.getConstant(SplitNumElts, DL, VecIdxTy));
7342   }
7343   else {
7344     Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7345     Op2 = DAG.getUNDEF(WideVT);
7346   }
7347 
7348   // First list the elements we want to keep.
7349   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7350   SmallVector<int, 16> ShuffV;
7351   if (Subtarget.isLittleEndian())
7352     for (unsigned i = 0; i < TrgNumElts; ++i)
7353       ShuffV.push_back(i * SizeMult);
7354   else
7355     for (unsigned i = 1; i <= TrgNumElts; ++i)
7356       ShuffV.push_back(i * SizeMult - 1);
7357 
7358   // Populate the remaining elements with undefs.
7359   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7360     // ShuffV.push_back(i + WideNumElts);
7361     ShuffV.push_back(WideNumElts + 1);
7362 
7363   Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
7364   Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
7365   return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
7366 }
7367 
7368 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7369 /// possible.
7370 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7371   // Not FP, or using SPE? Not a fsel.
7372   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
7373       !Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.hasSPE())
7374     return Op;
7375 
7376   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7377 
7378   EVT ResVT = Op.getValueType();
7379   EVT CmpVT = Op.getOperand(0).getValueType();
7380   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7381   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
7382   SDLoc dl(Op);
7383   SDNodeFlags Flags = Op.getNode()->getFlags();
7384 
7385   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7386   // presence of infinities.
7387   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7388     switch (CC) {
7389     default:
7390       break;
7391     case ISD::SETOGT:
7392     case ISD::SETGT:
7393       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7394     case ISD::SETOLT:
7395     case ISD::SETLT:
7396       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7397     }
7398   }
7399 
7400   // We might be able to do better than this under some circumstances, but in
7401   // general, fsel-based lowering of select is a finite-math-only optimization.
7402   // For more information, see section F.3 of the 2.06 ISA specification.
7403   // With ISA 3.0
7404   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
7405       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
7406     return Op;
7407 
7408   // If the RHS of the comparison is a 0.0, we don't need to do the
7409   // subtraction at all.
7410   SDValue Sel1;
7411   if (isFloatingPointZero(RHS))
7412     switch (CC) {
7413     default: break;       // SETUO etc aren't handled by fsel.
7414     case ISD::SETNE:
7415       std::swap(TV, FV);
7416       LLVM_FALLTHROUGH;
7417     case ISD::SETEQ:
7418       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7419         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7420       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7421       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7422         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7423       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7424                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7425     case ISD::SETULT:
7426     case ISD::SETLT:
7427       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7428       LLVM_FALLTHROUGH;
7429     case ISD::SETOGE:
7430     case ISD::SETGE:
7431       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7432         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7433       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7434     case ISD::SETUGT:
7435     case ISD::SETGT:
7436       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7437       LLVM_FALLTHROUGH;
7438     case ISD::SETOLE:
7439     case ISD::SETLE:
7440       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7441         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7442       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7443                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7444     }
7445 
7446   SDValue Cmp;
7447   switch (CC) {
7448   default: break;       // SETUO etc aren't handled by fsel.
7449   case ISD::SETNE:
7450     std::swap(TV, FV);
7451     LLVM_FALLTHROUGH;
7452   case ISD::SETEQ:
7453     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7454     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7455       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7456     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7457     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7458       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7459     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7460                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7461   case ISD::SETULT:
7462   case ISD::SETLT:
7463     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7464     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7465       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7466     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7467   case ISD::SETOGE:
7468   case ISD::SETGE:
7469     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7470     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7471       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7472     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7473   case ISD::SETUGT:
7474   case ISD::SETGT:
7475     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7476     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7477       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7478     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7479   case ISD::SETOLE:
7480   case ISD::SETLE:
7481     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7482     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7483       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7484     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7485   }
7486   return Op;
7487 }
7488 
7489 static unsigned getPPCStrictOpcode(unsigned Opc) {
7490   switch (Opc) {
7491   default:
7492     llvm_unreachable("No strict version of this opcode!");
7493   case PPCISD::FCTIDZ:
7494     return PPCISD::STRICT_FCTIDZ;
7495   case PPCISD::FCTIWZ:
7496     return PPCISD::STRICT_FCTIWZ;
7497   case PPCISD::FCTIDUZ:
7498     return PPCISD::STRICT_FCTIDUZ;
7499   case PPCISD::FCTIWUZ:
7500     return PPCISD::STRICT_FCTIWUZ;
7501   case PPCISD::FCFID:
7502     return PPCISD::STRICT_FCFID;
7503   case PPCISD::FCFIDU:
7504     return PPCISD::STRICT_FCFIDU;
7505   case PPCISD::FCFIDS:
7506     return PPCISD::STRICT_FCFIDS;
7507   case PPCISD::FCFIDUS:
7508     return PPCISD::STRICT_FCFIDUS;
7509   }
7510 }
7511 
7512 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
7513                               const PPCSubtarget &Subtarget) {
7514   SDLoc dl(Op);
7515   bool IsStrict = Op->isStrictFPOpcode();
7516   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7517                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7518 
7519   // TODO: Any other flags to propagate?
7520   SDNodeFlags Flags;
7521   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7522 
7523   // For strict nodes, source is the second operand.
7524   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7525   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
7526   assert(Src.getValueType().isFloatingPoint());
7527   if (Src.getValueType() == MVT::f32) {
7528     if (IsStrict) {
7529       Src =
7530           DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
7531                       DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
7532       Chain = Src.getValue(1);
7533     } else
7534       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7535   }
7536   SDValue Conv;
7537   unsigned Opc = ISD::DELETED_NODE;
7538   switch (Op.getSimpleValueType().SimpleTy) {
7539   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7540   case MVT::i32:
7541     Opc = IsSigned ? PPCISD::FCTIWZ
7542                    : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
7543     break;
7544   case MVT::i64:
7545     assert((IsSigned || Subtarget.hasFPCVT()) &&
7546            "i64 FP_TO_UINT is supported only with FPCVT");
7547     Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
7548   }
7549   if (IsStrict) {
7550     Opc = getPPCStrictOpcode(Opc);
7551     Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other),
7552                        {Chain, Src}, Flags);
7553   } else {
7554     Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
7555   }
7556   return Conv;
7557 }
7558 
7559 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7560                                                SelectionDAG &DAG,
7561                                                const SDLoc &dl) const {
7562   SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
7563   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7564                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7565   bool IsStrict = Op->isStrictFPOpcode();
7566 
7567   // Convert the FP value to an int value through memory.
7568   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7569                   (IsSigned || Subtarget.hasFPCVT());
7570   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7571   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7572   MachinePointerInfo MPI =
7573       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7574 
7575   // Emit a store to the stack slot.
7576   SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
7577   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
7578   if (i32Stack) {
7579     MachineFunction &MF = DAG.getMachineFunction();
7580     Alignment = Align(4);
7581     MachineMemOperand *MMO =
7582         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
7583     SDValue Ops[] = { Chain, Tmp, FIPtr };
7584     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7585               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7586   } else
7587     Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
7588 
7589   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
7590   // add in a bias on big endian.
7591   if (Op.getValueType() == MVT::i32 && !i32Stack) {
7592     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7593                         DAG.getConstant(4, dl, FIPtr.getValueType()));
7594     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7595   }
7596 
7597   RLI.Chain = Chain;
7598   RLI.Ptr = FIPtr;
7599   RLI.MPI = MPI;
7600   RLI.Alignment = Alignment;
7601 }
7602 
7603 /// Custom lowers floating point to integer conversions to use
7604 /// the direct move instructions available in ISA 2.07 to avoid the
7605 /// need for load/store combinations.
7606 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7607                                                     SelectionDAG &DAG,
7608                                                     const SDLoc &dl) const {
7609   SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
7610   SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
7611   if (Op->isStrictFPOpcode())
7612     return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
7613   else
7614     return Mov;
7615 }
7616 
7617 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7618                                           const SDLoc &dl) const {
7619   bool IsStrict = Op->isStrictFPOpcode();
7620   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7621                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7622   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7623   EVT SrcVT = Src.getValueType();
7624   EVT DstVT = Op.getValueType();
7625 
7626   // FP to INT conversions are legal for f128.
7627   if (SrcVT == MVT::f128)
7628     return Subtarget.hasP9Vector() ? Op : SDValue();
7629 
7630   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
7631   // PPC (the libcall is not available).
7632   if (SrcVT == MVT::ppcf128) {
7633     if (DstVT == MVT::i32) {
7634       // TODO: Conservatively pass only nofpexcept flag here. Need to check and
7635       // set other fast-math flags to FP operations in both strict and
7636       // non-strict cases. (FP_TO_SINT, FSUB)
7637       SDNodeFlags Flags;
7638       Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7639 
7640       if (IsSigned) {
7641         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
7642                                  DAG.getIntPtrConstant(0, dl));
7643         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
7644                                  DAG.getIntPtrConstant(1, dl));
7645 
7646         // Add the two halves of the long double in round-to-zero mode, and use
7647         // a smaller FP_TO_SINT.
7648         if (IsStrict) {
7649           SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl,
7650                                     DAG.getVTList(MVT::f64, MVT::Other),
7651                                     {Op.getOperand(0), Lo, Hi}, Flags);
7652           return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
7653                              DAG.getVTList(MVT::i32, MVT::Other),
7654                              {Res.getValue(1), Res}, Flags);
7655         } else {
7656           SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
7657           return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
7658         }
7659       } else {
7660         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
7661         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
7662         SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
7663         SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
7664         if (IsStrict) {
7665           // Sel = Src < 0x80000000
7666           // FltOfs = select Sel, 0.0, 0x80000000
7667           // IntOfs = select Sel, 0, 0x80000000
7668           // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
7669           SDValue Chain = Op.getOperand(0);
7670           EVT SetCCVT =
7671               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
7672           EVT DstSetCCVT =
7673               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
7674           SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
7675                                      Chain, true);
7676           Chain = Sel.getValue(1);
7677 
7678           SDValue FltOfs = DAG.getSelect(
7679               dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
7680           Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
7681 
7682           SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
7683                                     DAG.getVTList(SrcVT, MVT::Other),
7684                                     {Chain, Src, FltOfs}, Flags);
7685           Chain = Val.getValue(1);
7686           SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
7687                                      DAG.getVTList(DstVT, MVT::Other),
7688                                      {Chain, Val}, Flags);
7689           Chain = SInt.getValue(1);
7690           SDValue IntOfs = DAG.getSelect(
7691               dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
7692           SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
7693           return DAG.getMergeValues({Result, Chain}, dl);
7694         } else {
7695           // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
7696           // FIXME: generated code sucks.
7697           SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
7698           True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
7699           True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
7700           SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
7701           return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
7702         }
7703       }
7704     }
7705 
7706     return SDValue();
7707   }
7708 
7709   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
7710     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7711 
7712   ReuseLoadInfo RLI;
7713   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7714 
7715   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7716                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7717 }
7718 
7719 // We're trying to insert a regular store, S, and then a load, L. If the
7720 // incoming value, O, is a load, we might just be able to have our load use the
7721 // address used by O. However, we don't know if anything else will store to
7722 // that address before we can load from it. To prevent this situation, we need
7723 // to insert our load, L, into the chain as a peer of O. To do this, we give L
7724 // the same chain operand as O, we create a token factor from the chain results
7725 // of O and L, and we replace all uses of O's chain result with that token
7726 // factor (see spliceIntoChain below for this last part).
7727 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
7728                                             ReuseLoadInfo &RLI,
7729                                             SelectionDAG &DAG,
7730                                             ISD::LoadExtType ET) const {
7731   // Conservatively skip reusing for constrained FP nodes.
7732   if (Op->isStrictFPOpcode())
7733     return false;
7734 
7735   SDLoc dl(Op);
7736   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
7737                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
7738   if (ET == ISD::NON_EXTLOAD &&
7739       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
7740       isOperationLegalOrCustom(Op.getOpcode(),
7741                                Op.getOperand(0).getValueType())) {
7742 
7743     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7744     return true;
7745   }
7746 
7747   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
7748   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
7749       LD->isNonTemporal())
7750     return false;
7751   if (LD->getMemoryVT() != MemVT)
7752     return false;
7753 
7754   // If the result of the load is an illegal type, then we can't build a
7755   // valid chain for reuse since the legalised loads and token factor node that
7756   // ties the legalised loads together uses a different output chain then the
7757   // illegal load.
7758   if (!isTypeLegal(LD->getValueType(0)))
7759     return false;
7760 
7761   RLI.Ptr = LD->getBasePtr();
7762   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
7763     assert(LD->getAddressingMode() == ISD::PRE_INC &&
7764            "Non-pre-inc AM on PPC?");
7765     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
7766                           LD->getOffset());
7767   }
7768 
7769   RLI.Chain = LD->getChain();
7770   RLI.MPI = LD->getPointerInfo();
7771   RLI.IsDereferenceable = LD->isDereferenceable();
7772   RLI.IsInvariant = LD->isInvariant();
7773   RLI.Alignment = LD->getAlign();
7774   RLI.AAInfo = LD->getAAInfo();
7775   RLI.Ranges = LD->getRanges();
7776 
7777   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
7778   return true;
7779 }
7780 
7781 // Given the head of the old chain, ResChain, insert a token factor containing
7782 // it and NewResChain, and make users of ResChain now be users of that token
7783 // factor.
7784 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
7785 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
7786                                         SDValue NewResChain,
7787                                         SelectionDAG &DAG) const {
7788   if (!ResChain)
7789     return;
7790 
7791   SDLoc dl(NewResChain);
7792 
7793   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
7794                            NewResChain, DAG.getUNDEF(MVT::Other));
7795   assert(TF.getNode() != NewResChain.getNode() &&
7796          "A new TF really is required here");
7797 
7798   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
7799   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
7800 }
7801 
7802 /// Analyze profitability of direct move
7803 /// prefer float load to int load plus direct move
7804 /// when there is no integer use of int load
7805 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
7806   SDNode *Origin = Op.getOperand(0).getNode();
7807   if (Origin->getOpcode() != ISD::LOAD)
7808     return true;
7809 
7810   // If there is no LXSIBZX/LXSIHZX, like Power8,
7811   // prefer direct move if the memory size is 1 or 2 bytes.
7812   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
7813   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
7814     return true;
7815 
7816   for (SDNode::use_iterator UI = Origin->use_begin(),
7817                             UE = Origin->use_end();
7818        UI != UE; ++UI) {
7819 
7820     // Only look at the users of the loaded value.
7821     if (UI.getUse().get().getResNo() != 0)
7822       continue;
7823 
7824     if (UI->getOpcode() != ISD::SINT_TO_FP &&
7825         UI->getOpcode() != ISD::UINT_TO_FP &&
7826         UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
7827         UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
7828       return true;
7829   }
7830 
7831   return false;
7832 }
7833 
7834 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
7835                               const PPCSubtarget &Subtarget,
7836                               SDValue Chain = SDValue()) {
7837   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
7838                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
7839   SDLoc dl(Op);
7840 
7841   // TODO: Any other flags to propagate?
7842   SDNodeFlags Flags;
7843   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7844 
7845   // If we have FCFIDS, then use it when converting to single-precision.
7846   // Otherwise, convert to double-precision and then round.
7847   bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
7848   unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
7849                               : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
7850   EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
7851   if (Op->isStrictFPOpcode()) {
7852     if (!Chain)
7853       Chain = Op.getOperand(0);
7854     return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl,
7855                        DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
7856   } else
7857     return DAG.getNode(ConvOpc, dl, ConvTy, Src);
7858 }
7859 
7860 /// Custom lowers integer to floating point conversions to use
7861 /// the direct move instructions available in ISA 2.07 to avoid the
7862 /// need for load/store combinations.
7863 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
7864                                                     SelectionDAG &DAG,
7865                                                     const SDLoc &dl) const {
7866   assert((Op.getValueType() == MVT::f32 ||
7867           Op.getValueType() == MVT::f64) &&
7868          "Invalid floating point type as target of conversion");
7869   assert(Subtarget.hasFPCVT() &&
7870          "Int to FP conversions with direct moves require FPCVT");
7871   SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
7872   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
7873   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
7874                 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
7875   unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
7876   SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
7877   return convertIntToFP(Op, Mov, DAG, Subtarget);
7878 }
7879 
7880 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
7881 
7882   EVT VecVT = Vec.getValueType();
7883   assert(VecVT.isVector() && "Expected a vector type.");
7884   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
7885 
7886   EVT EltVT = VecVT.getVectorElementType();
7887   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7888   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7889 
7890   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
7891   SmallVector<SDValue, 16> Ops(NumConcat);
7892   Ops[0] = Vec;
7893   SDValue UndefVec = DAG.getUNDEF(VecVT);
7894   for (unsigned i = 1; i < NumConcat; ++i)
7895     Ops[i] = UndefVec;
7896 
7897   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
7898 }
7899 
7900 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
7901                                                 const SDLoc &dl) const {
7902   bool IsStrict = Op->isStrictFPOpcode();
7903   unsigned Opc = Op.getOpcode();
7904   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7905   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||
7906           Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
7907          "Unexpected conversion type");
7908   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
7909          "Supports conversions to v2f64/v4f32 only.");
7910 
7911   // TODO: Any other flags to propagate?
7912   SDNodeFlags Flags;
7913   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7914 
7915   bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
7916   bool FourEltRes = Op.getValueType() == MVT::v4f32;
7917 
7918   SDValue Wide = widenVec(DAG, Src, dl);
7919   EVT WideVT = Wide.getValueType();
7920   unsigned WideNumElts = WideVT.getVectorNumElements();
7921   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
7922 
7923   SmallVector<int, 16> ShuffV;
7924   for (unsigned i = 0; i < WideNumElts; ++i)
7925     ShuffV.push_back(i + WideNumElts);
7926 
7927   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
7928   int SaveElts = FourEltRes ? 4 : 2;
7929   if (Subtarget.isLittleEndian())
7930     for (int i = 0; i < SaveElts; i++)
7931       ShuffV[i * Stride] = i;
7932   else
7933     for (int i = 1; i <= SaveElts; i++)
7934       ShuffV[i * Stride - 1] = i - 1;
7935 
7936   SDValue ShuffleSrc2 =
7937       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
7938   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
7939 
7940   SDValue Extend;
7941   if (SignedConv) {
7942     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
7943     EVT ExtVT = Src.getValueType();
7944     if (Subtarget.hasP9Altivec())
7945       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
7946                                IntermediateVT.getVectorNumElements());
7947 
7948     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
7949                          DAG.getValueType(ExtVT));
7950   } else
7951     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
7952 
7953   if (IsStrict)
7954     return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
7955                        {Op.getOperand(0), Extend}, Flags);
7956 
7957   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
7958 }
7959 
7960 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
7961                                           SelectionDAG &DAG) const {
7962   SDLoc dl(Op);
7963   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
7964                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
7965   bool IsStrict = Op->isStrictFPOpcode();
7966   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7967   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
7968 
7969   // TODO: Any other flags to propagate?
7970   SDNodeFlags Flags;
7971   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7972 
7973   EVT InVT = Src.getValueType();
7974   EVT OutVT = Op.getValueType();
7975   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
7976       isOperationCustom(Op.getOpcode(), InVT))
7977     return LowerINT_TO_FPVector(Op, DAG, dl);
7978 
7979   // Conversions to f128 are legal.
7980   if (Op.getValueType() == MVT::f128)
7981     return Subtarget.hasP9Vector() ? Op : SDValue();
7982 
7983   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
7984   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
7985     return SDValue();
7986 
7987   if (Src.getValueType() == MVT::i1) {
7988     SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
7989                               DAG.getConstantFP(1.0, dl, Op.getValueType()),
7990                               DAG.getConstantFP(0.0, dl, Op.getValueType()));
7991     if (IsStrict)
7992       return DAG.getMergeValues({Sel, Chain}, dl);
7993     else
7994       return Sel;
7995   }
7996 
7997   // If we have direct moves, we can do all the conversion, skip the store/load
7998   // however, without FPCVT we can't do most conversions.
7999   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8000       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8001     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8002 
8003   assert((IsSigned || Subtarget.hasFPCVT()) &&
8004          "UINT_TO_FP is supported only with FPCVT");
8005 
8006   if (Src.getValueType() == MVT::i64) {
8007     SDValue SINT = Src;
8008     // When converting to single-precision, we actually need to convert
8009     // to double-precision first and then round to single-precision.
8010     // To avoid double-rounding effects during that operation, we have
8011     // to prepare the input operand.  Bits that might be truncated when
8012     // converting to double-precision are replaced by a bit that won't
8013     // be lost at this stage, but is below the single-precision rounding
8014     // position.
8015     //
8016     // However, if -enable-unsafe-fp-math is in effect, accept double
8017     // rounding to avoid the extra overhead.
8018     if (Op.getValueType() == MVT::f32 &&
8019         !Subtarget.hasFPCVT() &&
8020         !DAG.getTarget().Options.UnsafeFPMath) {
8021 
8022       // Twiddle input to make sure the low 11 bits are zero.  (If this
8023       // is the case, we are guaranteed the value will fit into the 53 bit
8024       // mantissa of an IEEE double-precision value without rounding.)
8025       // If any of those low 11 bits were not zero originally, make sure
8026       // bit 12 (value 2048) is set instead, so that the final rounding
8027       // to single-precision gets the correct result.
8028       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8029                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8030       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8031                           Round, DAG.getConstant(2047, dl, MVT::i64));
8032       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8033       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8034                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8035 
8036       // However, we cannot use that value unconditionally: if the magnitude
8037       // of the input value is small, the bit-twiddling we did above might
8038       // end up visibly changing the output.  Fortunately, in that case, we
8039       // don't need to twiddle bits since the original input will convert
8040       // exactly to double-precision floating-point already.  Therefore,
8041       // construct a conditional to use the original value if the top 11
8042       // bits are all sign-bit copies, and use the rounded value computed
8043       // above otherwise.
8044       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8045                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8046       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8047                          Cond, DAG.getConstant(1, dl, MVT::i64));
8048       Cond = DAG.getSetCC(
8049           dl,
8050           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8051           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8052 
8053       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8054     }
8055 
8056     ReuseLoadInfo RLI;
8057     SDValue Bits;
8058 
8059     MachineFunction &MF = DAG.getMachineFunction();
8060     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8061       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8062                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8063       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8064     } else if (Subtarget.hasLFIWAX() &&
8065                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8066       MachineMemOperand *MMO =
8067         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8068                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8069       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8070       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8071                                      DAG.getVTList(MVT::f64, MVT::Other),
8072                                      Ops, MVT::i32, MMO);
8073       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8074     } else if (Subtarget.hasFPCVT() &&
8075                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8076       MachineMemOperand *MMO =
8077         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8078                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8079       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8080       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8081                                      DAG.getVTList(MVT::f64, MVT::Other),
8082                                      Ops, MVT::i32, MMO);
8083       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8084     } else if (((Subtarget.hasLFIWAX() &&
8085                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8086                 (Subtarget.hasFPCVT() &&
8087                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8088                SINT.getOperand(0).getValueType() == MVT::i32) {
8089       MachineFrameInfo &MFI = MF.getFrameInfo();
8090       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8091 
8092       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8093       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8094 
8095       SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8096                                    MachinePointerInfo::getFixedStack(
8097                                        DAG.getMachineFunction(), FrameIdx));
8098       Chain = Store;
8099 
8100       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8101              "Expected an i32 store");
8102 
8103       RLI.Ptr = FIdx;
8104       RLI.Chain = Chain;
8105       RLI.MPI =
8106           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8107       RLI.Alignment = Align(4);
8108 
8109       MachineMemOperand *MMO =
8110         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8111                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8112       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8113       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8114                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8115                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8116                                      Ops, MVT::i32, MMO);
8117       Chain = Bits.getValue(1);
8118     } else
8119       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8120 
8121     SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8122     if (IsStrict)
8123       Chain = FP.getValue(1);
8124 
8125     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8126       if (IsStrict)
8127         FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8128                          DAG.getVTList(MVT::f32, MVT::Other),
8129                          {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8130       else
8131         FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8132                          DAG.getIntPtrConstant(0, dl));
8133     }
8134     return FP;
8135   }
8136 
8137   assert(Src.getValueType() == MVT::i32 &&
8138          "Unhandled INT_TO_FP type in custom expander!");
8139   // Since we only generate this in 64-bit mode, we can take advantage of
8140   // 64-bit registers.  In particular, sign extend the input value into the
8141   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8142   // then lfd it and fcfid it.
8143   MachineFunction &MF = DAG.getMachineFunction();
8144   MachineFrameInfo &MFI = MF.getFrameInfo();
8145   EVT PtrVT = getPointerTy(MF.getDataLayout());
8146 
8147   SDValue Ld;
8148   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8149     ReuseLoadInfo RLI;
8150     bool ReusingLoad;
8151     if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8152       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8153       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8154 
8155       SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8156                                    MachinePointerInfo::getFixedStack(
8157                                        DAG.getMachineFunction(), FrameIdx));
8158       Chain = Store;
8159 
8160       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8161              "Expected an i32 store");
8162 
8163       RLI.Ptr = FIdx;
8164       RLI.Chain = Chain;
8165       RLI.MPI =
8166           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8167       RLI.Alignment = Align(4);
8168     }
8169 
8170     MachineMemOperand *MMO =
8171       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8172                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8173     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8174     Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8175                                  DAG.getVTList(MVT::f64, MVT::Other), Ops,
8176                                  MVT::i32, MMO);
8177     Chain = Ld.getValue(1);
8178     if (ReusingLoad)
8179       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8180   } else {
8181     assert(Subtarget.isPPC64() &&
8182            "i32->FP without LFIWAX supported only on PPC64");
8183 
8184     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8185     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8186 
8187     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8188 
8189     // STD the extended value into the stack slot.
8190     SDValue Store = DAG.getStore(
8191         Chain, dl, Ext64, FIdx,
8192         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8193     Chain = Store;
8194 
8195     // Load the value as a double.
8196     Ld = DAG.getLoad(
8197         MVT::f64, dl, Chain, FIdx,
8198         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8199     Chain = Ld.getValue(1);
8200   }
8201 
8202   // FCFID it and return it.
8203   SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8204   if (IsStrict)
8205     Chain = FP.getValue(1);
8206   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8207     if (IsStrict)
8208       FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8209                        DAG.getVTList(MVT::f32, MVT::Other),
8210                        {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8211     else
8212       FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8213                        DAG.getIntPtrConstant(0, dl));
8214   }
8215   return FP;
8216 }
8217 
8218 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8219                                             SelectionDAG &DAG) const {
8220   SDLoc dl(Op);
8221   /*
8222    The rounding mode is in bits 30:31 of FPSR, and has the following
8223    settings:
8224      00 Round to nearest
8225      01 Round to 0
8226      10 Round to +inf
8227      11 Round to -inf
8228 
8229   FLT_ROUNDS, on the other hand, expects the following:
8230     -1 Undefined
8231      0 Round to 0
8232      1 Round to nearest
8233      2 Round to +inf
8234      3 Round to -inf
8235 
8236   To perform the conversion, we do:
8237     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8238   */
8239 
8240   MachineFunction &MF = DAG.getMachineFunction();
8241   EVT VT = Op.getValueType();
8242   EVT PtrVT = getPointerTy(MF.getDataLayout());
8243 
8244   // Save FP Control Word to register
8245   SDValue Chain = Op.getOperand(0);
8246   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8247   Chain = MFFS.getValue(1);
8248 
8249   SDValue CWD;
8250   if (isTypeLegal(MVT::i64)) {
8251     CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
8252                       DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
8253   } else {
8254     // Save FP register to stack slot
8255     int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8256     SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8257     Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8258 
8259     // Load FP Control Word from low 32 bits of stack slot.
8260     assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&
8261            "Stack slot adjustment is valid only on big endian subtargets!");
8262     SDValue Four = DAG.getConstant(4, dl, PtrVT);
8263     SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8264     CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8265     Chain = CWD.getValue(1);
8266   }
8267 
8268   // Transform as necessary
8269   SDValue CWD1 =
8270     DAG.getNode(ISD::AND, dl, MVT::i32,
8271                 CWD, DAG.getConstant(3, dl, MVT::i32));
8272   SDValue CWD2 =
8273     DAG.getNode(ISD::SRL, dl, MVT::i32,
8274                 DAG.getNode(ISD::AND, dl, MVT::i32,
8275                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8276                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8277                             DAG.getConstant(3, dl, MVT::i32)),
8278                 DAG.getConstant(1, dl, MVT::i32));
8279 
8280   SDValue RetVal =
8281     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8282 
8283   RetVal =
8284       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8285                   dl, VT, RetVal);
8286 
8287   return DAG.getMergeValues({RetVal, Chain}, dl);
8288 }
8289 
8290 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8291   EVT VT = Op.getValueType();
8292   unsigned BitWidth = VT.getSizeInBits();
8293   SDLoc dl(Op);
8294   assert(Op.getNumOperands() == 3 &&
8295          VT == Op.getOperand(1).getValueType() &&
8296          "Unexpected SHL!");
8297 
8298   // Expand into a bunch of logical ops.  Note that these ops
8299   // depend on the PPC behavior for oversized shift amounts.
8300   SDValue Lo = Op.getOperand(0);
8301   SDValue Hi = Op.getOperand(1);
8302   SDValue Amt = Op.getOperand(2);
8303   EVT AmtVT = Amt.getValueType();
8304 
8305   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8306                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8307   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8308   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8309   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8310   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8311                              DAG.getConstant(-BitWidth, dl, AmtVT));
8312   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8313   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8314   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8315   SDValue OutOps[] = { OutLo, OutHi };
8316   return DAG.getMergeValues(OutOps, dl);
8317 }
8318 
8319 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8320   EVT VT = Op.getValueType();
8321   SDLoc dl(Op);
8322   unsigned BitWidth = VT.getSizeInBits();
8323   assert(Op.getNumOperands() == 3 &&
8324          VT == Op.getOperand(1).getValueType() &&
8325          "Unexpected SRL!");
8326 
8327   // Expand into a bunch of logical ops.  Note that these ops
8328   // depend on the PPC behavior for oversized shift amounts.
8329   SDValue Lo = Op.getOperand(0);
8330   SDValue Hi = Op.getOperand(1);
8331   SDValue Amt = Op.getOperand(2);
8332   EVT AmtVT = Amt.getValueType();
8333 
8334   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8335                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8336   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8337   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8338   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8339   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8340                              DAG.getConstant(-BitWidth, dl, AmtVT));
8341   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8342   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8343   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8344   SDValue OutOps[] = { OutLo, OutHi };
8345   return DAG.getMergeValues(OutOps, dl);
8346 }
8347 
8348 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8349   SDLoc dl(Op);
8350   EVT VT = Op.getValueType();
8351   unsigned BitWidth = VT.getSizeInBits();
8352   assert(Op.getNumOperands() == 3 &&
8353          VT == Op.getOperand(1).getValueType() &&
8354          "Unexpected SRA!");
8355 
8356   // Expand into a bunch of logical ops, followed by a select_cc.
8357   SDValue Lo = Op.getOperand(0);
8358   SDValue Hi = Op.getOperand(1);
8359   SDValue Amt = Op.getOperand(2);
8360   EVT AmtVT = Amt.getValueType();
8361 
8362   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8363                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8364   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8365   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8366   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8367   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8368                              DAG.getConstant(-BitWidth, dl, AmtVT));
8369   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8370   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8371   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8372                                   Tmp4, Tmp6, ISD::SETLE);
8373   SDValue OutOps[] = { OutLo, OutHi };
8374   return DAG.getMergeValues(OutOps, dl);
8375 }
8376 
8377 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
8378                                             SelectionDAG &DAG) const {
8379   SDLoc dl(Op);
8380   EVT VT = Op.getValueType();
8381   unsigned BitWidth = VT.getSizeInBits();
8382 
8383   bool IsFSHL = Op.getOpcode() == ISD::FSHL;
8384   SDValue X = Op.getOperand(0);
8385   SDValue Y = Op.getOperand(1);
8386   SDValue Z = Op.getOperand(2);
8387   EVT AmtVT = Z.getValueType();
8388 
8389   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8390   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8391   // This is simpler than TargetLowering::expandFunnelShift because we can rely
8392   // on PowerPC shift by BW being well defined.
8393   Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
8394                   DAG.getConstant(BitWidth - 1, dl, AmtVT));
8395   SDValue SubZ =
8396       DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
8397   X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
8398   Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
8399   return DAG.getNode(ISD::OR, dl, VT, X, Y);
8400 }
8401 
8402 //===----------------------------------------------------------------------===//
8403 // Vector related lowering.
8404 //
8405 
8406 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8407 /// element size of SplatSize. Cast the result to VT.
8408 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8409                                       SelectionDAG &DAG, const SDLoc &dl) {
8410   static const MVT VTys[] = { // canonical VT to use for each size.
8411     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8412   };
8413 
8414   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8415 
8416   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8417   if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
8418     SplatSize = 1;
8419     Val = 0xFF;
8420   }
8421 
8422   EVT CanonicalVT = VTys[SplatSize-1];
8423 
8424   // Build a canonical splat for this value.
8425   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8426 }
8427 
8428 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8429 /// specified intrinsic ID.
8430 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8431                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
8432   if (DestVT == MVT::Other) DestVT = Op.getValueType();
8433   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8434                      DAG.getConstant(IID, dl, MVT::i32), Op);
8435 }
8436 
8437 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8438 /// specified intrinsic ID.
8439 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8440                                 SelectionDAG &DAG, const SDLoc &dl,
8441                                 EVT DestVT = MVT::Other) {
8442   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8443   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8444                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8445 }
8446 
8447 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8448 /// specified intrinsic ID.
8449 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8450                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8451                                 EVT DestVT = MVT::Other) {
8452   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8453   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8454                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8455 }
8456 
8457 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8458 /// amount.  The result has the specified value type.
8459 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8460                            SelectionDAG &DAG, const SDLoc &dl) {
8461   // Force LHS/RHS to be the right type.
8462   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8463   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8464 
8465   int Ops[16];
8466   for (unsigned i = 0; i != 16; ++i)
8467     Ops[i] = i + Amt;
8468   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8469   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8470 }
8471 
8472 /// Do we have an efficient pattern in a .td file for this node?
8473 ///
8474 /// \param V - pointer to the BuildVectorSDNode being matched
8475 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8476 ///
8477 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8478 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8479 /// the opposite is true (expansion is beneficial) are:
8480 /// - The node builds a vector out of integers that are not 32 or 64-bits
8481 /// - The node builds a vector out of constants
8482 /// - The node is a "load-and-splat"
8483 /// In all other cases, we will choose to keep the BUILD_VECTOR.
8484 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8485                                             bool HasDirectMove,
8486                                             bool HasP8Vector) {
8487   EVT VecVT = V->getValueType(0);
8488   bool RightType = VecVT == MVT::v2f64 ||
8489     (HasP8Vector && VecVT == MVT::v4f32) ||
8490     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8491   if (!RightType)
8492     return false;
8493 
8494   bool IsSplat = true;
8495   bool IsLoad = false;
8496   SDValue Op0 = V->getOperand(0);
8497 
8498   // This function is called in a block that confirms the node is not a constant
8499   // splat. So a constant BUILD_VECTOR here means the vector is built out of
8500   // different constants.
8501   if (V->isConstant())
8502     return false;
8503   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8504     if (V->getOperand(i).isUndef())
8505       return false;
8506     // We want to expand nodes that represent load-and-splat even if the
8507     // loaded value is a floating point truncation or conversion to int.
8508     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8509         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8510          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8511         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8512          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8513         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8514          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8515       IsLoad = true;
8516     // If the operands are different or the input is not a load and has more
8517     // uses than just this BV node, then it isn't a splat.
8518     if (V->getOperand(i) != Op0 ||
8519         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8520       IsSplat = false;
8521   }
8522   return !(IsSplat && IsLoad);
8523 }
8524 
8525 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8526 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8527 
8528   SDLoc dl(Op);
8529   SDValue Op0 = Op->getOperand(0);
8530 
8531   if ((Op.getValueType() != MVT::f128) ||
8532       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8533       (Op0.getOperand(0).getValueType() != MVT::i64) ||
8534       (Op0.getOperand(1).getValueType() != MVT::i64))
8535     return SDValue();
8536 
8537   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8538                      Op0.getOperand(1));
8539 }
8540 
8541 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
8542   const SDValue *InputLoad = &Op;
8543   if (InputLoad->getOpcode() == ISD::BITCAST)
8544     InputLoad = &InputLoad->getOperand(0);
8545   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
8546       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
8547     IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
8548     InputLoad = &InputLoad->getOperand(0);
8549   }
8550   if (InputLoad->getOpcode() != ISD::LOAD)
8551     return nullptr;
8552   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8553   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
8554 }
8555 
8556 // Convert the argument APFloat to a single precision APFloat if there is no
8557 // loss in information during the conversion to single precision APFloat and the
8558 // resulting number is not a denormal number. Return true if successful.
8559 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
8560   APFloat APFloatToConvert = ArgAPFloat;
8561   bool LosesInfo = true;
8562   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
8563                            &LosesInfo);
8564   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
8565   if (Success)
8566     ArgAPFloat = APFloatToConvert;
8567   return Success;
8568 }
8569 
8570 // Bitcast the argument APInt to a double and convert it to a single precision
8571 // APFloat, bitcast the APFloat to an APInt and assign it to the original
8572 // argument if there is no loss in information during the conversion from
8573 // double to single precision APFloat and the resulting number is not a denormal
8574 // number. Return true if successful.
8575 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
8576   double DpValue = ArgAPInt.bitsToDouble();
8577   APFloat APFloatDp(DpValue);
8578   bool Success = convertToNonDenormSingle(APFloatDp);
8579   if (Success)
8580     ArgAPInt = APFloatDp.bitcastToAPInt();
8581   return Success;
8582 }
8583 
8584 // If this is a case we can't handle, return null and let the default
8585 // expansion code take care of it.  If we CAN select this case, and if it
8586 // selects to a single instruction, return Op.  Otherwise, if we can codegen
8587 // this case more efficiently than a constant pool load, lower it to the
8588 // sequence of ops that should be used.
8589 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
8590                                              SelectionDAG &DAG) const {
8591   SDLoc dl(Op);
8592   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
8593   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
8594 
8595   // Check if this is a splat of a constant value.
8596   APInt APSplatBits, APSplatUndef;
8597   unsigned SplatBitSize;
8598   bool HasAnyUndefs;
8599   bool BVNIsConstantSplat =
8600       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
8601                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
8602 
8603   // If it is a splat of a double, check if we can shrink it to a 32 bit
8604   // non-denormal float which when converted back to double gives us the same
8605   // double. This is to exploit the XXSPLTIDP instruction.
8606   if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
8607       (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
8608       convertToNonDenormSingle(APSplatBits)) {
8609     SDValue SplatNode = DAG.getNode(
8610         PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
8611         DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
8612     return DAG.getBitcast(Op.getValueType(), SplatNode);
8613   }
8614 
8615   if (!BVNIsConstantSplat || SplatBitSize > 32) {
8616 
8617     bool IsPermutedLoad = false;
8618     const SDValue *InputLoad =
8619         getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
8620     // Handle load-and-splat patterns as we have instructions that will do this
8621     // in one go.
8622     if (InputLoad && DAG.isSplatValue(Op, true)) {
8623       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8624 
8625       // We have handling for 4 and 8 byte elements.
8626       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
8627 
8628       // Checking for a single use of this load, we have to check for vector
8629       // width (128 bits) / ElementSize uses (since each operand of the
8630       // BUILD_VECTOR is a separate use of the value.
8631       unsigned NumUsesOfInputLD = 128 / ElementSize;
8632       for (SDValue BVInOp : Op->ops())
8633         if (BVInOp.isUndef())
8634           NumUsesOfInputLD--;
8635       assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?");
8636       if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
8637           ((Subtarget.hasVSX() && ElementSize == 64) ||
8638            (Subtarget.hasP9Vector() && ElementSize == 32))) {
8639         SDValue Ops[] = {
8640           LD->getChain(),    // Chain
8641           LD->getBasePtr(),  // Ptr
8642           DAG.getValueType(Op.getValueType()) // VT
8643         };
8644         SDValue LdSplt = DAG.getMemIntrinsicNode(
8645             PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
8646             Ops, LD->getMemoryVT(), LD->getMemOperand());
8647         // Replace all uses of the output chain of the original load with the
8648         // output chain of the new load.
8649         DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
8650                                       LdSplt.getValue(1));
8651         return LdSplt;
8652       }
8653     }
8654 
8655     // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to
8656     // 32-bits can be lowered to VSX instructions under certain conditions.
8657     // Without VSX, there is no pattern more efficient than expanding the node.
8658     if (Subtarget.hasVSX() && Subtarget.isPPC64() &&
8659         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
8660                                         Subtarget.hasP8Vector()))
8661       return Op;
8662     return SDValue();
8663   }
8664 
8665   uint64_t SplatBits = APSplatBits.getZExtValue();
8666   uint64_t SplatUndef = APSplatUndef.getZExtValue();
8667   unsigned SplatSize = SplatBitSize / 8;
8668 
8669   // First, handle single instruction cases.
8670 
8671   // All zeros?
8672   if (SplatBits == 0) {
8673     // Canonicalize all zero vectors to be v4i32.
8674     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
8675       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
8676       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
8677     }
8678     return Op;
8679   }
8680 
8681   // We have XXSPLTIW for constant splats four bytes wide.
8682   // Given vector length is a multiple of 4, 2-byte splats can be replaced
8683   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
8684   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
8685   // turned into a 4-byte splat of 0xABABABAB.
8686   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
8687     return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2,
8688                                   Op.getValueType(), DAG, dl);
8689 
8690   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
8691     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
8692                                   dl);
8693 
8694   // We have XXSPLTIB for constant splats one byte wide.
8695   if (Subtarget.hasP9Vector() && SplatSize == 1)
8696     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
8697                                   dl);
8698 
8699   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
8700   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
8701                     (32-SplatBitSize));
8702   if (SextVal >= -16 && SextVal <= 15)
8703     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
8704                                   dl);
8705 
8706   // Two instruction sequences.
8707 
8708   // If this value is in the range [-32,30] and is even, use:
8709   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
8710   // If this value is in the range [17,31] and is odd, use:
8711   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
8712   // If this value is in the range [-31,-17] and is odd, use:
8713   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
8714   // Note the last two are three-instruction sequences.
8715   if (SextVal >= -32 && SextVal <= 31) {
8716     // To avoid having these optimizations undone by constant folding,
8717     // we convert to a pseudo that will be expanded later into one of
8718     // the above forms.
8719     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
8720     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
8721               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
8722     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
8723     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
8724     if (VT == Op.getValueType())
8725       return RetVal;
8726     else
8727       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
8728   }
8729 
8730   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
8731   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
8732   // for fneg/fabs.
8733   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
8734     // Make -1 and vspltisw -1:
8735     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
8736 
8737     // Make the VSLW intrinsic, computing 0x8000_0000.
8738     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
8739                                    OnesV, DAG, dl);
8740 
8741     // xor by OnesV to invert it.
8742     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
8743     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8744   }
8745 
8746   // Check to see if this is a wide variety of vsplti*, binop self cases.
8747   static const signed char SplatCsts[] = {
8748     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
8749     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
8750   };
8751 
8752   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
8753     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
8754     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
8755     int i = SplatCsts[idx];
8756 
8757     // Figure out what shift amount will be used by altivec if shifted by i in
8758     // this splat size.
8759     unsigned TypeShiftAmt = i & (SplatBitSize-1);
8760 
8761     // vsplti + shl self.
8762     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
8763       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
8764       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8765         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
8766         Intrinsic::ppc_altivec_vslw
8767       };
8768       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8769       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8770     }
8771 
8772     // vsplti + srl self.
8773     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8774       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
8775       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8776         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
8777         Intrinsic::ppc_altivec_vsrw
8778       };
8779       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8780       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8781     }
8782 
8783     // vsplti + rol self.
8784     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
8785                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
8786       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
8787       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8788         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
8789         Intrinsic::ppc_altivec_vrlw
8790       };
8791       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8792       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8793     }
8794 
8795     // t = vsplti c, result = vsldoi t, t, 1
8796     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
8797       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
8798       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
8799       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8800     }
8801     // t = vsplti c, result = vsldoi t, t, 2
8802     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
8803       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
8804       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
8805       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8806     }
8807     // t = vsplti c, result = vsldoi t, t, 3
8808     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
8809       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
8810       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
8811       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8812     }
8813   }
8814 
8815   return SDValue();
8816 }
8817 
8818 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
8819 /// the specified operations to build the shuffle.
8820 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
8821                                       SDValue RHS, SelectionDAG &DAG,
8822                                       const SDLoc &dl) {
8823   unsigned OpNum = (PFEntry >> 26) & 0x0F;
8824   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8825   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
8826 
8827   enum {
8828     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
8829     OP_VMRGHW,
8830     OP_VMRGLW,
8831     OP_VSPLTISW0,
8832     OP_VSPLTISW1,
8833     OP_VSPLTISW2,
8834     OP_VSPLTISW3,
8835     OP_VSLDOI4,
8836     OP_VSLDOI8,
8837     OP_VSLDOI12
8838   };
8839 
8840   if (OpNum == OP_COPY) {
8841     if (LHSID == (1*9+2)*9+3) return LHS;
8842     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
8843     return RHS;
8844   }
8845 
8846   SDValue OpLHS, OpRHS;
8847   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
8848   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
8849 
8850   int ShufIdxs[16];
8851   switch (OpNum) {
8852   default: llvm_unreachable("Unknown i32 permute!");
8853   case OP_VMRGHW:
8854     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
8855     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
8856     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
8857     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
8858     break;
8859   case OP_VMRGLW:
8860     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
8861     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
8862     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
8863     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
8864     break;
8865   case OP_VSPLTISW0:
8866     for (unsigned i = 0; i != 16; ++i)
8867       ShufIdxs[i] = (i&3)+0;
8868     break;
8869   case OP_VSPLTISW1:
8870     for (unsigned i = 0; i != 16; ++i)
8871       ShufIdxs[i] = (i&3)+4;
8872     break;
8873   case OP_VSPLTISW2:
8874     for (unsigned i = 0; i != 16; ++i)
8875       ShufIdxs[i] = (i&3)+8;
8876     break;
8877   case OP_VSPLTISW3:
8878     for (unsigned i = 0; i != 16; ++i)
8879       ShufIdxs[i] = (i&3)+12;
8880     break;
8881   case OP_VSLDOI4:
8882     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
8883   case OP_VSLDOI8:
8884     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
8885   case OP_VSLDOI12:
8886     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
8887   }
8888   EVT VT = OpLHS.getValueType();
8889   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
8890   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
8891   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
8892   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8893 }
8894 
8895 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
8896 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
8897 /// SDValue.
8898 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
8899                                            SelectionDAG &DAG) const {
8900   const unsigned BytesInVector = 16;
8901   bool IsLE = Subtarget.isLittleEndian();
8902   SDLoc dl(N);
8903   SDValue V1 = N->getOperand(0);
8904   SDValue V2 = N->getOperand(1);
8905   unsigned ShiftElts = 0, InsertAtByte = 0;
8906   bool Swap = false;
8907 
8908   // Shifts required to get the byte we want at element 7.
8909   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
8910                                    0, 15, 14, 13, 12, 11, 10, 9};
8911   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
8912                                 1, 2,  3,  4,  5,  6,  7,  8};
8913 
8914   ArrayRef<int> Mask = N->getMask();
8915   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
8916 
8917   // For each mask element, find out if we're just inserting something
8918   // from V2 into V1 or vice versa.
8919   // Possible permutations inserting an element from V2 into V1:
8920   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8921   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8922   //   ...
8923   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
8924   // Inserting from V1 into V2 will be similar, except mask range will be
8925   // [16,31].
8926 
8927   bool FoundCandidate = false;
8928   // If both vector operands for the shuffle are the same vector, the mask
8929   // will contain only elements from the first one and the second one will be
8930   // undef.
8931   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
8932   // Go through the mask of half-words to find an element that's being moved
8933   // from one vector to the other.
8934   for (unsigned i = 0; i < BytesInVector; ++i) {
8935     unsigned CurrentElement = Mask[i];
8936     // If 2nd operand is undefined, we should only look for element 7 in the
8937     // Mask.
8938     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
8939       continue;
8940 
8941     bool OtherElementsInOrder = true;
8942     // Examine the other elements in the Mask to see if they're in original
8943     // order.
8944     for (unsigned j = 0; j < BytesInVector; ++j) {
8945       if (j == i)
8946         continue;
8947       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
8948       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
8949       // in which we always assume we're always picking from the 1st operand.
8950       int MaskOffset =
8951           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
8952       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
8953         OtherElementsInOrder = false;
8954         break;
8955       }
8956     }
8957     // If other elements are in original order, we record the number of shifts
8958     // we need to get the element we want into element 7. Also record which byte
8959     // in the vector we should insert into.
8960     if (OtherElementsInOrder) {
8961       // If 2nd operand is undefined, we assume no shifts and no swapping.
8962       if (V2.isUndef()) {
8963         ShiftElts = 0;
8964         Swap = false;
8965       } else {
8966         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
8967         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
8968                          : BigEndianShifts[CurrentElement & 0xF];
8969         Swap = CurrentElement < BytesInVector;
8970       }
8971       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
8972       FoundCandidate = true;
8973       break;
8974     }
8975   }
8976 
8977   if (!FoundCandidate)
8978     return SDValue();
8979 
8980   // Candidate found, construct the proper SDAG sequence with VINSERTB,
8981   // optionally with VECSHL if shift is required.
8982   if (Swap)
8983     std::swap(V1, V2);
8984   if (V2.isUndef())
8985     V2 = V1;
8986   if (ShiftElts) {
8987     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8988                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8989     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
8990                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
8991   }
8992   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
8993                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
8994 }
8995 
8996 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
8997 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
8998 /// SDValue.
8999 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9000                                            SelectionDAG &DAG) const {
9001   const unsigned NumHalfWords = 8;
9002   const unsigned BytesInVector = NumHalfWords * 2;
9003   // Check that the shuffle is on half-words.
9004   if (!isNByteElemShuffleMask(N, 2, 1))
9005     return SDValue();
9006 
9007   bool IsLE = Subtarget.isLittleEndian();
9008   SDLoc dl(N);
9009   SDValue V1 = N->getOperand(0);
9010   SDValue V2 = N->getOperand(1);
9011   unsigned ShiftElts = 0, InsertAtByte = 0;
9012   bool Swap = false;
9013 
9014   // Shifts required to get the half-word we want at element 3.
9015   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9016   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9017 
9018   uint32_t Mask = 0;
9019   uint32_t OriginalOrderLow = 0x1234567;
9020   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9021   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9022   // 32-bit space, only need 4-bit nibbles per element.
9023   for (unsigned i = 0; i < NumHalfWords; ++i) {
9024     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9025     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9026   }
9027 
9028   // For each mask element, find out if we're just inserting something
9029   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9030   // from V2 into V1:
9031   //   X, 1, 2, 3, 4, 5, 6, 7
9032   //   0, X, 2, 3, 4, 5, 6, 7
9033   //   0, 1, X, 3, 4, 5, 6, 7
9034   //   0, 1, 2, X, 4, 5, 6, 7
9035   //   0, 1, 2, 3, X, 5, 6, 7
9036   //   0, 1, 2, 3, 4, X, 6, 7
9037   //   0, 1, 2, 3, 4, 5, X, 7
9038   //   0, 1, 2, 3, 4, 5, 6, X
9039   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9040 
9041   bool FoundCandidate = false;
9042   // Go through the mask of half-words to find an element that's being moved
9043   // from one vector to the other.
9044   for (unsigned i = 0; i < NumHalfWords; ++i) {
9045     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9046     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9047     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9048     uint32_t TargetOrder = 0x0;
9049 
9050     // If both vector operands for the shuffle are the same vector, the mask
9051     // will contain only elements from the first one and the second one will be
9052     // undef.
9053     if (V2.isUndef()) {
9054       ShiftElts = 0;
9055       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9056       TargetOrder = OriginalOrderLow;
9057       Swap = false;
9058       // Skip if not the correct element or mask of other elements don't equal
9059       // to our expected order.
9060       if (MaskOneElt == VINSERTHSrcElem &&
9061           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9062         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9063         FoundCandidate = true;
9064         break;
9065       }
9066     } else { // If both operands are defined.
9067       // Target order is [8,15] if the current mask is between [0,7].
9068       TargetOrder =
9069           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9070       // Skip if mask of other elements don't equal our expected order.
9071       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9072         // We only need the last 3 bits for the number of shifts.
9073         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9074                          : BigEndianShifts[MaskOneElt & 0x7];
9075         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9076         Swap = MaskOneElt < NumHalfWords;
9077         FoundCandidate = true;
9078         break;
9079       }
9080     }
9081   }
9082 
9083   if (!FoundCandidate)
9084     return SDValue();
9085 
9086   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9087   // optionally with VECSHL if shift is required.
9088   if (Swap)
9089     std::swap(V1, V2);
9090   if (V2.isUndef())
9091     V2 = V1;
9092   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9093   if (ShiftElts) {
9094     // Double ShiftElts because we're left shifting on v16i8 type.
9095     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9096                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9097     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9098     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9099                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9100     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9101   }
9102   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9103   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9104                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9105   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9106 }
9107 
9108 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9109 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9110 /// return the default SDValue.
9111 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9112                                               SelectionDAG &DAG) const {
9113   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9114   // to v16i8. Peek through the bitcasts to get the actual operands.
9115   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9116   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9117 
9118   auto ShuffleMask = SVN->getMask();
9119   SDValue VecShuffle(SVN, 0);
9120   SDLoc DL(SVN);
9121 
9122   // Check that we have a four byte shuffle.
9123   if (!isNByteElemShuffleMask(SVN, 4, 1))
9124     return SDValue();
9125 
9126   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9127   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9128     std::swap(LHS, RHS);
9129     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9130     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9131   }
9132 
9133   // Ensure that the RHS is a vector of constants.
9134   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9135   if (!BVN)
9136     return SDValue();
9137 
9138   // Check if RHS is a splat of 4-bytes (or smaller).
9139   APInt APSplatValue, APSplatUndef;
9140   unsigned SplatBitSize;
9141   bool HasAnyUndefs;
9142   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9143                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9144       SplatBitSize > 32)
9145     return SDValue();
9146 
9147   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9148   // The instruction splats a constant C into two words of the source vector
9149   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9150   // Thus we check that the shuffle mask is the equivalent  of
9151   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9152   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9153   // within each word are consecutive, so we only need to check the first byte.
9154   SDValue Index;
9155   bool IsLE = Subtarget.isLittleEndian();
9156   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9157       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9158        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9159     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9160   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9161            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9162             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9163     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9164   else
9165     return SDValue();
9166 
9167   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9168   // for XXSPLTI32DX.
9169   unsigned SplatVal = APSplatValue.getZExtValue();
9170   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9171     SplatVal |= (SplatVal << SplatBitSize);
9172 
9173   SDValue SplatNode = DAG.getNode(
9174       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9175       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9176   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9177 }
9178 
9179 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9180 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9181 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9182 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9183 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9184   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9185   assert(Op.getValueType() == MVT::v1i128 &&
9186          "Only set v1i128 as custom, other type shouldn't reach here!");
9187   SDLoc dl(Op);
9188   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9189   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9190   unsigned SHLAmt = N1.getConstantOperandVal(0);
9191   if (SHLAmt % 8 == 0) {
9192     SmallVector<int, 16> Mask(16, 0);
9193     std::iota(Mask.begin(), Mask.end(), 0);
9194     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9195     if (SDValue Shuffle =
9196             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9197                                  DAG.getUNDEF(MVT::v16i8), Mask))
9198       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9199   }
9200   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9201   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9202                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9203   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9204                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9205   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9206   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9207 }
9208 
9209 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9210 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9211 /// return the code it can be lowered into.  Worst case, it can always be
9212 /// lowered into a vperm.
9213 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9214                                                SelectionDAG &DAG) const {
9215   SDLoc dl(Op);
9216   SDValue V1 = Op.getOperand(0);
9217   SDValue V2 = Op.getOperand(1);
9218   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9219 
9220   // Any nodes that were combined in the target-independent combiner prior
9221   // to vector legalization will not be sent to the target combine. Try to
9222   // combine it here.
9223   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9224     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9225       return NewShuffle;
9226     Op = NewShuffle;
9227     SVOp = cast<ShuffleVectorSDNode>(Op);
9228     V1 = Op.getOperand(0);
9229     V2 = Op.getOperand(1);
9230   }
9231   EVT VT = Op.getValueType();
9232   bool isLittleEndian = Subtarget.isLittleEndian();
9233 
9234   unsigned ShiftElts, InsertAtByte;
9235   bool Swap = false;
9236 
9237   // If this is a load-and-splat, we can do that with a single instruction
9238   // in some cases. However if the load has multiple uses, we don't want to
9239   // combine it because that will just produce multiple loads.
9240   bool IsPermutedLoad = false;
9241   const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
9242   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9243       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9244       InputLoad->hasOneUse()) {
9245     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9246     int SplatIdx =
9247       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9248 
9249     // The splat index for permuted loads will be in the left half of the vector
9250     // which is strictly wider than the loaded value by 8 bytes. So we need to
9251     // adjust the splat index to point to the correct address in memory.
9252     if (IsPermutedLoad) {
9253       assert(isLittleEndian && "Unexpected permuted load on big endian target");
9254       SplatIdx += IsFourByte ? 2 : 1;
9255       assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
9256              "Splat of a value outside of the loaded memory");
9257     }
9258 
9259     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9260     // For 4-byte load-and-splat, we need Power9.
9261     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9262       uint64_t Offset = 0;
9263       if (IsFourByte)
9264         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9265       else
9266         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9267 
9268       SDValue BasePtr = LD->getBasePtr();
9269       if (Offset != 0)
9270         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9271                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9272       SDValue Ops[] = {
9273         LD->getChain(),    // Chain
9274         BasePtr,           // BasePtr
9275         DAG.getValueType(Op.getValueType()) // VT
9276       };
9277       SDVTList VTL =
9278         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9279       SDValue LdSplt =
9280         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9281                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9282       DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
9283       if (LdSplt.getValueType() != SVOp->getValueType(0))
9284         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9285       return LdSplt;
9286     }
9287   }
9288   if (Subtarget.hasP9Vector() &&
9289       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9290                            isLittleEndian)) {
9291     if (Swap)
9292       std::swap(V1, V2);
9293     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9294     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9295     if (ShiftElts) {
9296       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9297                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9298       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9299                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9300       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9301     }
9302     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9303                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9304     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9305   }
9306 
9307   if (Subtarget.hasPrefixInstrs()) {
9308     SDValue SplatInsertNode;
9309     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9310       return SplatInsertNode;
9311   }
9312 
9313   if (Subtarget.hasP9Altivec()) {
9314     SDValue NewISDNode;
9315     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9316       return NewISDNode;
9317 
9318     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9319       return NewISDNode;
9320   }
9321 
9322   if (Subtarget.hasVSX() &&
9323       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9324     if (Swap)
9325       std::swap(V1, V2);
9326     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9327     SDValue Conv2 =
9328         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9329 
9330     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9331                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9332     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9333   }
9334 
9335   if (Subtarget.hasVSX() &&
9336     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9337     if (Swap)
9338       std::swap(V1, V2);
9339     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9340     SDValue Conv2 =
9341         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9342 
9343     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9344                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9345     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9346   }
9347 
9348   if (Subtarget.hasP9Vector()) {
9349      if (PPC::isXXBRHShuffleMask(SVOp)) {
9350       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9351       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9352       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9353     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9354       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9355       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9356       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9357     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9358       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9359       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9360       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9361     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9362       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9363       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9364       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9365     }
9366   }
9367 
9368   if (Subtarget.hasVSX()) {
9369     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9370       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9371 
9372       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9373       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9374                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
9375       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9376     }
9377 
9378     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9379     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9380       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9381       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9382       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9383     }
9384   }
9385 
9386   // Cases that are handled by instructions that take permute immediates
9387   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9388   // selected by the instruction selector.
9389   if (V2.isUndef()) {
9390     if (PPC::isSplatShuffleMask(SVOp, 1) ||
9391         PPC::isSplatShuffleMask(SVOp, 2) ||
9392         PPC::isSplatShuffleMask(SVOp, 4) ||
9393         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9394         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9395         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9396         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9397         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9398         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9399         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9400         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9401         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9402         (Subtarget.hasP8Altivec() && (
9403          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9404          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9405          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9406       return Op;
9407     }
9408   }
9409 
9410   // Altivec has a variety of "shuffle immediates" that take two vector inputs
9411   // and produce a fixed permutation.  If any of these match, do not lower to
9412   // VPERM.
9413   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9414   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9415       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9416       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9417       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9418       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9419       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9420       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9421       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9422       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9423       (Subtarget.hasP8Altivec() && (
9424        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9425        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9426        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9427     return Op;
9428 
9429   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
9430   // perfect shuffle table to emit an optimal matching sequence.
9431   ArrayRef<int> PermMask = SVOp->getMask();
9432 
9433   unsigned PFIndexes[4];
9434   bool isFourElementShuffle = true;
9435   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9436     unsigned EltNo = 8;   // Start out undef.
9437     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
9438       if (PermMask[i*4+j] < 0)
9439         continue;   // Undef, ignore it.
9440 
9441       unsigned ByteSource = PermMask[i*4+j];
9442       if ((ByteSource & 3) != j) {
9443         isFourElementShuffle = false;
9444         break;
9445       }
9446 
9447       if (EltNo == 8) {
9448         EltNo = ByteSource/4;
9449       } else if (EltNo != ByteSource/4) {
9450         isFourElementShuffle = false;
9451         break;
9452       }
9453     }
9454     PFIndexes[i] = EltNo;
9455   }
9456 
9457   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9458   // perfect shuffle vector to determine if it is cost effective to do this as
9459   // discrete instructions, or whether we should use a vperm.
9460   // For now, we skip this for little endian until such time as we have a
9461   // little-endian perfect shuffle table.
9462   if (isFourElementShuffle && !isLittleEndian) {
9463     // Compute the index in the perfect shuffle table.
9464     unsigned PFTableIndex =
9465       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9466 
9467     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9468     unsigned Cost  = (PFEntry >> 30);
9469 
9470     // Determining when to avoid vperm is tricky.  Many things affect the cost
9471     // of vperm, particularly how many times the perm mask needs to be computed.
9472     // For example, if the perm mask can be hoisted out of a loop or is already
9473     // used (perhaps because there are multiple permutes with the same shuffle
9474     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
9475     // the loop requires an extra register.
9476     //
9477     // As a compromise, we only emit discrete instructions if the shuffle can be
9478     // generated in 3 or fewer operations.  When we have loop information
9479     // available, if this block is within a loop, we should avoid using vperm
9480     // for 3-operation perms and use a constant pool load instead.
9481     if (Cost < 3)
9482       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9483   }
9484 
9485   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9486   // vector that will get spilled to the constant pool.
9487   if (V2.isUndef()) V2 = V1;
9488 
9489   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9490   // that it is in input element units, not in bytes.  Convert now.
9491 
9492   // For little endian, the order of the input vectors is reversed, and
9493   // the permutation mask is complemented with respect to 31.  This is
9494   // necessary to produce proper semantics with the big-endian-biased vperm
9495   // instruction.
9496   EVT EltVT = V1.getValueType().getVectorElementType();
9497   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9498 
9499   SmallVector<SDValue, 16> ResultMask;
9500   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9501     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9502 
9503     for (unsigned j = 0; j != BytesPerElement; ++j)
9504       if (isLittleEndian)
9505         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9506                                              dl, MVT::i32));
9507       else
9508         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9509                                              MVT::i32));
9510   }
9511 
9512   ShufflesHandledWithVPERM++;
9513   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9514   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
9515   LLVM_DEBUG(SVOp->dump());
9516   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
9517   LLVM_DEBUG(VPermMask.dump());
9518 
9519   if (isLittleEndian)
9520     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9521                        V2, V1, VPermMask);
9522   else
9523     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9524                        V1, V2, VPermMask);
9525 }
9526 
9527 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9528 /// vector comparison.  If it is, return true and fill in Opc/isDot with
9529 /// information about the intrinsic.
9530 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9531                                  bool &isDot, const PPCSubtarget &Subtarget) {
9532   unsigned IntrinsicID =
9533       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9534   CompareOpc = -1;
9535   isDot = false;
9536   switch (IntrinsicID) {
9537   default:
9538     return false;
9539   // Comparison predicates.
9540   case Intrinsic::ppc_altivec_vcmpbfp_p:
9541     CompareOpc = 966;
9542     isDot = true;
9543     break;
9544   case Intrinsic::ppc_altivec_vcmpeqfp_p:
9545     CompareOpc = 198;
9546     isDot = true;
9547     break;
9548   case Intrinsic::ppc_altivec_vcmpequb_p:
9549     CompareOpc = 6;
9550     isDot = true;
9551     break;
9552   case Intrinsic::ppc_altivec_vcmpequh_p:
9553     CompareOpc = 70;
9554     isDot = true;
9555     break;
9556   case Intrinsic::ppc_altivec_vcmpequw_p:
9557     CompareOpc = 134;
9558     isDot = true;
9559     break;
9560   case Intrinsic::ppc_altivec_vcmpequd_p:
9561     if (Subtarget.hasP8Altivec()) {
9562       CompareOpc = 199;
9563       isDot = true;
9564     } else
9565       return false;
9566     break;
9567   case Intrinsic::ppc_altivec_vcmpneb_p:
9568   case Intrinsic::ppc_altivec_vcmpneh_p:
9569   case Intrinsic::ppc_altivec_vcmpnew_p:
9570   case Intrinsic::ppc_altivec_vcmpnezb_p:
9571   case Intrinsic::ppc_altivec_vcmpnezh_p:
9572   case Intrinsic::ppc_altivec_vcmpnezw_p:
9573     if (Subtarget.hasP9Altivec()) {
9574       switch (IntrinsicID) {
9575       default:
9576         llvm_unreachable("Unknown comparison intrinsic.");
9577       case Intrinsic::ppc_altivec_vcmpneb_p:
9578         CompareOpc = 7;
9579         break;
9580       case Intrinsic::ppc_altivec_vcmpneh_p:
9581         CompareOpc = 71;
9582         break;
9583       case Intrinsic::ppc_altivec_vcmpnew_p:
9584         CompareOpc = 135;
9585         break;
9586       case Intrinsic::ppc_altivec_vcmpnezb_p:
9587         CompareOpc = 263;
9588         break;
9589       case Intrinsic::ppc_altivec_vcmpnezh_p:
9590         CompareOpc = 327;
9591         break;
9592       case Intrinsic::ppc_altivec_vcmpnezw_p:
9593         CompareOpc = 391;
9594         break;
9595       }
9596       isDot = true;
9597     } else
9598       return false;
9599     break;
9600   case Intrinsic::ppc_altivec_vcmpgefp_p:
9601     CompareOpc = 454;
9602     isDot = true;
9603     break;
9604   case Intrinsic::ppc_altivec_vcmpgtfp_p:
9605     CompareOpc = 710;
9606     isDot = true;
9607     break;
9608   case Intrinsic::ppc_altivec_vcmpgtsb_p:
9609     CompareOpc = 774;
9610     isDot = true;
9611     break;
9612   case Intrinsic::ppc_altivec_vcmpgtsh_p:
9613     CompareOpc = 838;
9614     isDot = true;
9615     break;
9616   case Intrinsic::ppc_altivec_vcmpgtsw_p:
9617     CompareOpc = 902;
9618     isDot = true;
9619     break;
9620   case Intrinsic::ppc_altivec_vcmpgtsd_p:
9621     if (Subtarget.hasP8Altivec()) {
9622       CompareOpc = 967;
9623       isDot = true;
9624     } else
9625       return false;
9626     break;
9627   case Intrinsic::ppc_altivec_vcmpgtub_p:
9628     CompareOpc = 518;
9629     isDot = true;
9630     break;
9631   case Intrinsic::ppc_altivec_vcmpgtuh_p:
9632     CompareOpc = 582;
9633     isDot = true;
9634     break;
9635   case Intrinsic::ppc_altivec_vcmpgtuw_p:
9636     CompareOpc = 646;
9637     isDot = true;
9638     break;
9639   case Intrinsic::ppc_altivec_vcmpgtud_p:
9640     if (Subtarget.hasP8Altivec()) {
9641       CompareOpc = 711;
9642       isDot = true;
9643     } else
9644       return false;
9645     break;
9646 
9647   case Intrinsic::ppc_altivec_vcmpequq:
9648   case Intrinsic::ppc_altivec_vcmpgtsq:
9649   case Intrinsic::ppc_altivec_vcmpgtuq:
9650     if (!Subtarget.isISA3_1())
9651       return false;
9652     switch (IntrinsicID) {
9653     default:
9654       llvm_unreachable("Unknown comparison intrinsic.");
9655     case Intrinsic::ppc_altivec_vcmpequq:
9656       CompareOpc = 455;
9657       break;
9658     case Intrinsic::ppc_altivec_vcmpgtsq:
9659       CompareOpc = 903;
9660       break;
9661     case Intrinsic::ppc_altivec_vcmpgtuq:
9662       CompareOpc = 647;
9663       break;
9664     }
9665     break;
9666 
9667   // VSX predicate comparisons use the same infrastructure
9668   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9669   case Intrinsic::ppc_vsx_xvcmpgedp_p:
9670   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9671   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9672   case Intrinsic::ppc_vsx_xvcmpgesp_p:
9673   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9674     if (Subtarget.hasVSX()) {
9675       switch (IntrinsicID) {
9676       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9677         CompareOpc = 99;
9678         break;
9679       case Intrinsic::ppc_vsx_xvcmpgedp_p:
9680         CompareOpc = 115;
9681         break;
9682       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9683         CompareOpc = 107;
9684         break;
9685       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9686         CompareOpc = 67;
9687         break;
9688       case Intrinsic::ppc_vsx_xvcmpgesp_p:
9689         CompareOpc = 83;
9690         break;
9691       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9692         CompareOpc = 75;
9693         break;
9694       }
9695       isDot = true;
9696     } else
9697       return false;
9698     break;
9699 
9700   // Normal Comparisons.
9701   case Intrinsic::ppc_altivec_vcmpbfp:
9702     CompareOpc = 966;
9703     break;
9704   case Intrinsic::ppc_altivec_vcmpeqfp:
9705     CompareOpc = 198;
9706     break;
9707   case Intrinsic::ppc_altivec_vcmpequb:
9708     CompareOpc = 6;
9709     break;
9710   case Intrinsic::ppc_altivec_vcmpequh:
9711     CompareOpc = 70;
9712     break;
9713   case Intrinsic::ppc_altivec_vcmpequw:
9714     CompareOpc = 134;
9715     break;
9716   case Intrinsic::ppc_altivec_vcmpequd:
9717     if (Subtarget.hasP8Altivec())
9718       CompareOpc = 199;
9719     else
9720       return false;
9721     break;
9722   case Intrinsic::ppc_altivec_vcmpneb:
9723   case Intrinsic::ppc_altivec_vcmpneh:
9724   case Intrinsic::ppc_altivec_vcmpnew:
9725   case Intrinsic::ppc_altivec_vcmpnezb:
9726   case Intrinsic::ppc_altivec_vcmpnezh:
9727   case Intrinsic::ppc_altivec_vcmpnezw:
9728     if (Subtarget.hasP9Altivec())
9729       switch (IntrinsicID) {
9730       default:
9731         llvm_unreachable("Unknown comparison intrinsic.");
9732       case Intrinsic::ppc_altivec_vcmpneb:
9733         CompareOpc = 7;
9734         break;
9735       case Intrinsic::ppc_altivec_vcmpneh:
9736         CompareOpc = 71;
9737         break;
9738       case Intrinsic::ppc_altivec_vcmpnew:
9739         CompareOpc = 135;
9740         break;
9741       case Intrinsic::ppc_altivec_vcmpnezb:
9742         CompareOpc = 263;
9743         break;
9744       case Intrinsic::ppc_altivec_vcmpnezh:
9745         CompareOpc = 327;
9746         break;
9747       case Intrinsic::ppc_altivec_vcmpnezw:
9748         CompareOpc = 391;
9749         break;
9750       }
9751     else
9752       return false;
9753     break;
9754   case Intrinsic::ppc_altivec_vcmpgefp:
9755     CompareOpc = 454;
9756     break;
9757   case Intrinsic::ppc_altivec_vcmpgtfp:
9758     CompareOpc = 710;
9759     break;
9760   case Intrinsic::ppc_altivec_vcmpgtsb:
9761     CompareOpc = 774;
9762     break;
9763   case Intrinsic::ppc_altivec_vcmpgtsh:
9764     CompareOpc = 838;
9765     break;
9766   case Intrinsic::ppc_altivec_vcmpgtsw:
9767     CompareOpc = 902;
9768     break;
9769   case Intrinsic::ppc_altivec_vcmpgtsd:
9770     if (Subtarget.hasP8Altivec())
9771       CompareOpc = 967;
9772     else
9773       return false;
9774     break;
9775   case Intrinsic::ppc_altivec_vcmpgtub:
9776     CompareOpc = 518;
9777     break;
9778   case Intrinsic::ppc_altivec_vcmpgtuh:
9779     CompareOpc = 582;
9780     break;
9781   case Intrinsic::ppc_altivec_vcmpgtuw:
9782     CompareOpc = 646;
9783     break;
9784   case Intrinsic::ppc_altivec_vcmpgtud:
9785     if (Subtarget.hasP8Altivec())
9786       CompareOpc = 711;
9787     else
9788       return false;
9789     break;
9790   case Intrinsic::ppc_altivec_vcmpequq_p:
9791   case Intrinsic::ppc_altivec_vcmpgtsq_p:
9792   case Intrinsic::ppc_altivec_vcmpgtuq_p:
9793     if (!Subtarget.isISA3_1())
9794       return false;
9795     switch (IntrinsicID) {
9796     default:
9797       llvm_unreachable("Unknown comparison intrinsic.");
9798     case Intrinsic::ppc_altivec_vcmpequq_p:
9799       CompareOpc = 455;
9800       break;
9801     case Intrinsic::ppc_altivec_vcmpgtsq_p:
9802       CompareOpc = 903;
9803       break;
9804     case Intrinsic::ppc_altivec_vcmpgtuq_p:
9805       CompareOpc = 647;
9806       break;
9807     }
9808     isDot = true;
9809     break;
9810   }
9811   return true;
9812 }
9813 
9814 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
9815 /// lower, do it, otherwise return null.
9816 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
9817                                                    SelectionDAG &DAG) const {
9818   unsigned IntrinsicID =
9819     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
9820 
9821   SDLoc dl(Op);
9822 
9823   switch (IntrinsicID) {
9824   case Intrinsic::thread_pointer:
9825     // Reads the thread pointer register, used for __builtin_thread_pointer.
9826     if (Subtarget.isPPC64())
9827       return DAG.getRegister(PPC::X13, MVT::i64);
9828     return DAG.getRegister(PPC::R2, MVT::i32);
9829 
9830   case Intrinsic::ppc_mma_disassemble_acc:
9831   case Intrinsic::ppc_vsx_disassemble_pair: {
9832     int NumVecs = 2;
9833     SDValue WideVec = Op.getOperand(1);
9834     if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
9835       NumVecs = 4;
9836       WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec);
9837     }
9838     SmallVector<SDValue, 4> RetOps;
9839     for (int VecNo = 0; VecNo < NumVecs; VecNo++) {
9840       SDValue Extract = DAG.getNode(
9841           PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec,
9842           DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
9843                                                      : VecNo,
9844                           dl, MVT::i64));
9845       RetOps.push_back(Extract);
9846     }
9847     return DAG.getMergeValues(RetOps, dl);
9848   }
9849   }
9850 
9851   // If this is a lowered altivec predicate compare, CompareOpc is set to the
9852   // opcode number of the comparison.
9853   int CompareOpc;
9854   bool isDot;
9855   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
9856     return SDValue();    // Don't custom lower most intrinsics.
9857 
9858   // If this is a non-dot comparison, make the VCMP node and we are done.
9859   if (!isDot) {
9860     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
9861                               Op.getOperand(1), Op.getOperand(2),
9862                               DAG.getConstant(CompareOpc, dl, MVT::i32));
9863     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
9864   }
9865 
9866   // Create the PPCISD altivec 'dot' comparison node.
9867   SDValue Ops[] = {
9868     Op.getOperand(2),  // LHS
9869     Op.getOperand(3),  // RHS
9870     DAG.getConstant(CompareOpc, dl, MVT::i32)
9871   };
9872   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
9873   SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
9874 
9875   // Now that we have the comparison, emit a copy from the CR to a GPR.
9876   // This is flagged to the above dot comparison.
9877   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
9878                                 DAG.getRegister(PPC::CR6, MVT::i32),
9879                                 CompNode.getValue(1));
9880 
9881   // Unpack the result based on how the target uses it.
9882   unsigned BitNo;   // Bit # of CR6.
9883   bool InvertBit;   // Invert result?
9884   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
9885   default:  // Can't happen, don't crash on invalid number though.
9886   case 0:   // Return the value of the EQ bit of CR6.
9887     BitNo = 0; InvertBit = false;
9888     break;
9889   case 1:   // Return the inverted value of the EQ bit of CR6.
9890     BitNo = 0; InvertBit = true;
9891     break;
9892   case 2:   // Return the value of the LT bit of CR6.
9893     BitNo = 2; InvertBit = false;
9894     break;
9895   case 3:   // Return the inverted value of the LT bit of CR6.
9896     BitNo = 2; InvertBit = true;
9897     break;
9898   }
9899 
9900   // Shift the bit into the low position.
9901   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
9902                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
9903   // Isolate the bit.
9904   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
9905                       DAG.getConstant(1, dl, MVT::i32));
9906 
9907   // If we are supposed to, toggle the bit.
9908   if (InvertBit)
9909     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
9910                         DAG.getConstant(1, dl, MVT::i32));
9911   return Flags;
9912 }
9913 
9914 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
9915                                                SelectionDAG &DAG) const {
9916   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
9917   // the beginning of the argument list.
9918   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
9919   SDLoc DL(Op);
9920   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
9921   case Intrinsic::ppc_cfence: {
9922     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
9923     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
9924     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
9925                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
9926                                                   Op.getOperand(ArgStart + 1)),
9927                                       Op.getOperand(0)),
9928                    0);
9929   }
9930   default:
9931     break;
9932   }
9933   return SDValue();
9934 }
9935 
9936 // Lower scalar BSWAP64 to xxbrd.
9937 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
9938   SDLoc dl(Op);
9939   // MTVSRDD
9940   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
9941                    Op.getOperand(0));
9942   // XXBRD
9943   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
9944   // MFVSRD
9945   int VectorIndex = 0;
9946   if (Subtarget.isLittleEndian())
9947     VectorIndex = 1;
9948   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
9949                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
9950   return Op;
9951 }
9952 
9953 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
9954 // compared to a value that is atomically loaded (atomic loads zero-extend).
9955 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
9956                                                 SelectionDAG &DAG) const {
9957   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
9958          "Expecting an atomic compare-and-swap here.");
9959   SDLoc dl(Op);
9960   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
9961   EVT MemVT = AtomicNode->getMemoryVT();
9962   if (MemVT.getSizeInBits() >= 32)
9963     return Op;
9964 
9965   SDValue CmpOp = Op.getOperand(2);
9966   // If this is already correctly zero-extended, leave it alone.
9967   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
9968   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
9969     return Op;
9970 
9971   // Clear the high bits of the compare operand.
9972   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
9973   SDValue NewCmpOp =
9974     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
9975                 DAG.getConstant(MaskVal, dl, MVT::i32));
9976 
9977   // Replace the existing compare operand with the properly zero-extended one.
9978   SmallVector<SDValue, 4> Ops;
9979   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
9980     Ops.push_back(AtomicNode->getOperand(i));
9981   Ops[2] = NewCmpOp;
9982   MachineMemOperand *MMO = AtomicNode->getMemOperand();
9983   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
9984   auto NodeTy =
9985     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
9986   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
9987 }
9988 
9989 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
9990                                                  SelectionDAG &DAG) const {
9991   SDLoc dl(Op);
9992   // Create a stack slot that is 16-byte aligned.
9993   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9994   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
9995   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9996   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9997 
9998   // Store the input value into Value#0 of the stack slot.
9999   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10000                                MachinePointerInfo());
10001   // Load it out.
10002   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10003 }
10004 
10005 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10006                                                   SelectionDAG &DAG) const {
10007   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10008          "Should only be called for ISD::INSERT_VECTOR_ELT");
10009 
10010   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10011   // We have legal lowering for constant indices but not for variable ones.
10012   if (!C)
10013     return SDValue();
10014 
10015   EVT VT = Op.getValueType();
10016   SDLoc dl(Op);
10017   SDValue V1 = Op.getOperand(0);
10018   SDValue V2 = Op.getOperand(1);
10019   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10020   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10021     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10022     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10023     unsigned InsertAtElement = C->getZExtValue();
10024     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10025     if (Subtarget.isLittleEndian()) {
10026       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10027     }
10028     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10029                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10030   }
10031   return Op;
10032 }
10033 
10034 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10035                                            SelectionDAG &DAG) const {
10036   SDLoc dl(Op);
10037   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10038   SDValue LoadChain = LN->getChain();
10039   SDValue BasePtr = LN->getBasePtr();
10040   EVT VT = Op.getValueType();
10041 
10042   if (VT != MVT::v256i1 && VT != MVT::v512i1)
10043     return Op;
10044 
10045   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10046   // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in
10047   // 2 or 4 vsx registers.
10048   assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&
10049          "Type unsupported without MMA");
10050   assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10051          "Type unsupported without paired vector support");
10052   Align Alignment = LN->getAlign();
10053   SmallVector<SDValue, 4> Loads;
10054   SmallVector<SDValue, 4> LoadChains;
10055   unsigned NumVecs = VT.getSizeInBits() / 128;
10056   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10057     SDValue Load =
10058         DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
10059                     LN->getPointerInfo().getWithOffset(Idx * 16),
10060                     commonAlignment(Alignment, Idx * 16),
10061                     LN->getMemOperand()->getFlags(), LN->getAAInfo());
10062     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10063                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10064     Loads.push_back(Load);
10065     LoadChains.push_back(Load.getValue(1));
10066   }
10067   if (Subtarget.isLittleEndian()) {
10068     std::reverse(Loads.begin(), Loads.end());
10069     std::reverse(LoadChains.begin(), LoadChains.end());
10070   }
10071   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10072   SDValue Value =
10073       DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD,
10074                   dl, VT, Loads);
10075   SDValue RetOps[] = {Value, TF};
10076   return DAG.getMergeValues(RetOps, dl);
10077 }
10078 
10079 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10080                                             SelectionDAG &DAG) const {
10081   SDLoc dl(Op);
10082   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10083   SDValue StoreChain = SN->getChain();
10084   SDValue BasePtr = SN->getBasePtr();
10085   SDValue Value = SN->getValue();
10086   EVT StoreVT = Value.getValueType();
10087 
10088   if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
10089     return Op;
10090 
10091   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10092   // Here we create 2 or 4 v16i8 stores to store the pair or accumulator
10093   // underlying registers individually.
10094   assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&
10095          "Type unsupported without MMA");
10096   assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10097          "Type unsupported without paired vector support");
10098   Align Alignment = SN->getAlign();
10099   SmallVector<SDValue, 4> Stores;
10100   unsigned NumVecs = 2;
10101   if (StoreVT == MVT::v512i1) {
10102     Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value);
10103     NumVecs = 4;
10104   }
10105   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10106     unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
10107     SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value,
10108                               DAG.getConstant(VecNum, dl, MVT::i64));
10109     SDValue Store =
10110         DAG.getStore(StoreChain, dl, Elt, BasePtr,
10111                      SN->getPointerInfo().getWithOffset(Idx * 16),
10112                      commonAlignment(Alignment, Idx * 16),
10113                      SN->getMemOperand()->getFlags(), SN->getAAInfo());
10114     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10115                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10116     Stores.push_back(Store);
10117   }
10118   SDValue TF = DAG.getTokenFactor(dl, Stores);
10119   return TF;
10120 }
10121 
10122 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10123   SDLoc dl(Op);
10124   if (Op.getValueType() == MVT::v4i32) {
10125     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10126 
10127     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10128     // +16 as shift amt.
10129     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10130     SDValue RHSSwap =   // = vrlw RHS, 16
10131       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10132 
10133     // Shrinkify inputs to v8i16.
10134     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10135     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10136     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10137 
10138     // Low parts multiplied together, generating 32-bit results (we ignore the
10139     // top parts).
10140     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10141                                         LHS, RHS, DAG, dl, MVT::v4i32);
10142 
10143     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10144                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10145     // Shift the high parts up 16 bits.
10146     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10147                               Neg16, DAG, dl);
10148     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10149   } else if (Op.getValueType() == MVT::v16i8) {
10150     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10151     bool isLittleEndian = Subtarget.isLittleEndian();
10152 
10153     // Multiply the even 8-bit parts, producing 16-bit sums.
10154     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10155                                            LHS, RHS, DAG, dl, MVT::v8i16);
10156     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10157 
10158     // Multiply the odd 8-bit parts, producing 16-bit sums.
10159     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10160                                           LHS, RHS, DAG, dl, MVT::v8i16);
10161     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10162 
10163     // Merge the results together.  Because vmuleub and vmuloub are
10164     // instructions with a big-endian bias, we must reverse the
10165     // element numbering and reverse the meaning of "odd" and "even"
10166     // when generating little endian code.
10167     int Ops[16];
10168     for (unsigned i = 0; i != 8; ++i) {
10169       if (isLittleEndian) {
10170         Ops[i*2  ] = 2*i;
10171         Ops[i*2+1] = 2*i+16;
10172       } else {
10173         Ops[i*2  ] = 2*i+1;
10174         Ops[i*2+1] = 2*i+1+16;
10175       }
10176     }
10177     if (isLittleEndian)
10178       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10179     else
10180       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10181   } else {
10182     llvm_unreachable("Unknown mul to lower!");
10183   }
10184 }
10185 
10186 SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
10187   bool IsStrict = Op->isStrictFPOpcode();
10188   if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
10189       !Subtarget.hasP9Vector())
10190     return SDValue();
10191 
10192   return Op;
10193 }
10194 
10195 // Custom lowering for fpext vf32 to v2f64
10196 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10197 
10198   assert(Op.getOpcode() == ISD::FP_EXTEND &&
10199          "Should only be called for ISD::FP_EXTEND");
10200 
10201   // FIXME: handle extends from half precision float vectors on P9.
10202   // We only want to custom lower an extend from v2f32 to v2f64.
10203   if (Op.getValueType() != MVT::v2f64 ||
10204       Op.getOperand(0).getValueType() != MVT::v2f32)
10205     return SDValue();
10206 
10207   SDLoc dl(Op);
10208   SDValue Op0 = Op.getOperand(0);
10209 
10210   switch (Op0.getOpcode()) {
10211   default:
10212     return SDValue();
10213   case ISD::EXTRACT_SUBVECTOR: {
10214     assert(Op0.getNumOperands() == 2 &&
10215            isa<ConstantSDNode>(Op0->getOperand(1)) &&
10216            "Node should have 2 operands with second one being a constant!");
10217 
10218     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10219       return SDValue();
10220 
10221     // Custom lower is only done for high or low doubleword.
10222     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10223     if (Idx % 2 != 0)
10224       return SDValue();
10225 
10226     // Since input is v4f32, at this point Idx is either 0 or 2.
10227     // Shift to get the doubleword position we want.
10228     int DWord = Idx >> 1;
10229 
10230     // High and low word positions are different on little endian.
10231     if (Subtarget.isLittleEndian())
10232       DWord ^= 0x1;
10233 
10234     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10235                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10236   }
10237   case ISD::FADD:
10238   case ISD::FMUL:
10239   case ISD::FSUB: {
10240     SDValue NewLoad[2];
10241     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10242       // Ensure both input are loads.
10243       SDValue LdOp = Op0.getOperand(i);
10244       if (LdOp.getOpcode() != ISD::LOAD)
10245         return SDValue();
10246       // Generate new load node.
10247       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10248       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10249       NewLoad[i] = DAG.getMemIntrinsicNode(
10250           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10251           LD->getMemoryVT(), LD->getMemOperand());
10252     }
10253     SDValue NewOp =
10254         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10255                     NewLoad[1], Op0.getNode()->getFlags());
10256     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10257                        DAG.getConstant(0, dl, MVT::i32));
10258   }
10259   case ISD::LOAD: {
10260     LoadSDNode *LD = cast<LoadSDNode>(Op0);
10261     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10262     SDValue NewLd = DAG.getMemIntrinsicNode(
10263         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10264         LD->getMemoryVT(), LD->getMemOperand());
10265     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10266                        DAG.getConstant(0, dl, MVT::i32));
10267   }
10268   }
10269   llvm_unreachable("ERROR:Should return for all cases within swtich.");
10270 }
10271 
10272 /// LowerOperation - Provide custom lowering hooks for some operations.
10273 ///
10274 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10275   switch (Op.getOpcode()) {
10276   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10277   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
10278   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
10279   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
10280   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
10281   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
10282   case ISD::SETCC:              return LowerSETCC(Op, DAG);
10283   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
10284   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
10285 
10286   // Variable argument lowering.
10287   case ISD::VASTART:            return LowerVASTART(Op, DAG);
10288   case ISD::VAARG:              return LowerVAARG(Op, DAG);
10289   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
10290 
10291   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
10292   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10293   case ISD::GET_DYNAMIC_AREA_OFFSET:
10294     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10295 
10296   // Exception handling lowering.
10297   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
10298   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
10299   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
10300 
10301   case ISD::LOAD:               return LowerLOAD(Op, DAG);
10302   case ISD::STORE:              return LowerSTORE(Op, DAG);
10303   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
10304   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
10305   case ISD::STRICT_FP_TO_UINT:
10306   case ISD::STRICT_FP_TO_SINT:
10307   case ISD::FP_TO_UINT:
10308   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10309   case ISD::STRICT_UINT_TO_FP:
10310   case ISD::STRICT_SINT_TO_FP:
10311   case ISD::UINT_TO_FP:
10312   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
10313   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
10314 
10315   // Lower 64-bit shifts.
10316   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
10317   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
10318   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
10319 
10320   case ISD::FSHL:               return LowerFunnelShift(Op, DAG);
10321   case ISD::FSHR:               return LowerFunnelShift(Op, DAG);
10322 
10323   // Vector-related lowering.
10324   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
10325   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
10326   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10327   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
10328   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
10329   case ISD::MUL:                return LowerMUL(Op, DAG);
10330   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
10331   case ISD::STRICT_FP_ROUND:
10332   case ISD::FP_ROUND:
10333     return LowerFP_ROUND(Op, DAG);
10334   case ISD::ROTL:               return LowerROTL(Op, DAG);
10335 
10336   // For counter-based loop handling.
10337   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
10338 
10339   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
10340 
10341   // Frame & Return address.
10342   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
10343   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
10344 
10345   case ISD::INTRINSIC_VOID:
10346     return LowerINTRINSIC_VOID(Op, DAG);
10347   case ISD::BSWAP:
10348     return LowerBSWAP(Op, DAG);
10349   case ISD::ATOMIC_CMP_SWAP:
10350     return LowerATOMIC_CMP_SWAP(Op, DAG);
10351   }
10352 }
10353 
10354 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10355                                            SmallVectorImpl<SDValue>&Results,
10356                                            SelectionDAG &DAG) const {
10357   SDLoc dl(N);
10358   switch (N->getOpcode()) {
10359   default:
10360     llvm_unreachable("Do not know how to custom type legalize this operation!");
10361   case ISD::READCYCLECOUNTER: {
10362     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10363     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10364 
10365     Results.push_back(
10366         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
10367     Results.push_back(RTB.getValue(2));
10368     break;
10369   }
10370   case ISD::INTRINSIC_W_CHAIN: {
10371     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10372         Intrinsic::loop_decrement)
10373       break;
10374 
10375     assert(N->getValueType(0) == MVT::i1 &&
10376            "Unexpected result type for CTR decrement intrinsic");
10377     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10378                                  N->getValueType(0));
10379     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10380     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10381                                  N->getOperand(1));
10382 
10383     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10384     Results.push_back(NewInt.getValue(1));
10385     break;
10386   }
10387   case ISD::VAARG: {
10388     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10389       return;
10390 
10391     EVT VT = N->getValueType(0);
10392 
10393     if (VT == MVT::i64) {
10394       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10395 
10396       Results.push_back(NewNode);
10397       Results.push_back(NewNode.getValue(1));
10398     }
10399     return;
10400   }
10401   case ISD::STRICT_FP_TO_SINT:
10402   case ISD::STRICT_FP_TO_UINT:
10403   case ISD::FP_TO_SINT:
10404   case ISD::FP_TO_UINT:
10405     // LowerFP_TO_INT() can only handle f32 and f64.
10406     if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
10407         MVT::ppcf128)
10408       return;
10409     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10410     return;
10411   case ISD::TRUNCATE: {
10412     if (!N->getValueType(0).isVector())
10413       return;
10414     SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
10415     if (Lowered)
10416       Results.push_back(Lowered);
10417     return;
10418   }
10419   case ISD::FSHL:
10420   case ISD::FSHR:
10421     // Don't handle funnel shifts here.
10422     return;
10423   case ISD::BITCAST:
10424     // Don't handle bitcast here.
10425     return;
10426   case ISD::FP_EXTEND:
10427     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
10428     if (Lowered)
10429       Results.push_back(Lowered);
10430     return;
10431   }
10432 }
10433 
10434 //===----------------------------------------------------------------------===//
10435 //  Other Lowering Code
10436 //===----------------------------------------------------------------------===//
10437 
10438 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10439   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10440   Function *Func = Intrinsic::getDeclaration(M, Id);
10441   return Builder.CreateCall(Func, {});
10442 }
10443 
10444 // The mappings for emitLeading/TrailingFence is taken from
10445 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10446 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10447                                                  Instruction *Inst,
10448                                                  AtomicOrdering Ord) const {
10449   if (Ord == AtomicOrdering::SequentiallyConsistent)
10450     return callIntrinsic(Builder, Intrinsic::ppc_sync);
10451   if (isReleaseOrStronger(Ord))
10452     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10453   return nullptr;
10454 }
10455 
10456 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10457                                                   Instruction *Inst,
10458                                                   AtomicOrdering Ord) const {
10459   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10460     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10461     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10462     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10463     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10464       return Builder.CreateCall(
10465           Intrinsic::getDeclaration(
10466               Builder.GetInsertBlock()->getParent()->getParent(),
10467               Intrinsic::ppc_cfence, {Inst->getType()}),
10468           {Inst});
10469     // FIXME: Can use isync for rmw operation.
10470     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10471   }
10472   return nullptr;
10473 }
10474 
10475 MachineBasicBlock *
10476 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10477                                     unsigned AtomicSize,
10478                                     unsigned BinOpcode,
10479                                     unsigned CmpOpcode,
10480                                     unsigned CmpPred) const {
10481   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10482   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10483 
10484   auto LoadMnemonic = PPC::LDARX;
10485   auto StoreMnemonic = PPC::STDCX;
10486   switch (AtomicSize) {
10487   default:
10488     llvm_unreachable("Unexpected size of atomic entity");
10489   case 1:
10490     LoadMnemonic = PPC::LBARX;
10491     StoreMnemonic = PPC::STBCX;
10492     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10493     break;
10494   case 2:
10495     LoadMnemonic = PPC::LHARX;
10496     StoreMnemonic = PPC::STHCX;
10497     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10498     break;
10499   case 4:
10500     LoadMnemonic = PPC::LWARX;
10501     StoreMnemonic = PPC::STWCX;
10502     break;
10503   case 8:
10504     LoadMnemonic = PPC::LDARX;
10505     StoreMnemonic = PPC::STDCX;
10506     break;
10507   }
10508 
10509   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10510   MachineFunction *F = BB->getParent();
10511   MachineFunction::iterator It = ++BB->getIterator();
10512 
10513   Register dest = MI.getOperand(0).getReg();
10514   Register ptrA = MI.getOperand(1).getReg();
10515   Register ptrB = MI.getOperand(2).getReg();
10516   Register incr = MI.getOperand(3).getReg();
10517   DebugLoc dl = MI.getDebugLoc();
10518 
10519   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10520   MachineBasicBlock *loop2MBB =
10521     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10522   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10523   F->insert(It, loopMBB);
10524   if (CmpOpcode)
10525     F->insert(It, loop2MBB);
10526   F->insert(It, exitMBB);
10527   exitMBB->splice(exitMBB->begin(), BB,
10528                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10529   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10530 
10531   MachineRegisterInfo &RegInfo = F->getRegInfo();
10532   Register TmpReg = (!BinOpcode) ? incr :
10533     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10534                                            : &PPC::GPRCRegClass);
10535 
10536   //  thisMBB:
10537   //   ...
10538   //   fallthrough --> loopMBB
10539   BB->addSuccessor(loopMBB);
10540 
10541   //  loopMBB:
10542   //   l[wd]arx dest, ptr
10543   //   add r0, dest, incr
10544   //   st[wd]cx. r0, ptr
10545   //   bne- loopMBB
10546   //   fallthrough --> exitMBB
10547 
10548   // For max/min...
10549   //  loopMBB:
10550   //   l[wd]arx dest, ptr
10551   //   cmpl?[wd] incr, dest
10552   //   bgt exitMBB
10553   //  loop2MBB:
10554   //   st[wd]cx. dest, ptr
10555   //   bne- loopMBB
10556   //   fallthrough --> exitMBB
10557 
10558   BB = loopMBB;
10559   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10560     .addReg(ptrA).addReg(ptrB);
10561   if (BinOpcode)
10562     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10563   if (CmpOpcode) {
10564     // Signed comparisons of byte or halfword values must be sign-extended.
10565     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10566       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10567       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10568               ExtReg).addReg(dest);
10569       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10570         .addReg(incr).addReg(ExtReg);
10571     } else
10572       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10573         .addReg(incr).addReg(dest);
10574 
10575     BuildMI(BB, dl, TII->get(PPC::BCC))
10576       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
10577     BB->addSuccessor(loop2MBB);
10578     BB->addSuccessor(exitMBB);
10579     BB = loop2MBB;
10580   }
10581   BuildMI(BB, dl, TII->get(StoreMnemonic))
10582     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
10583   BuildMI(BB, dl, TII->get(PPC::BCC))
10584     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
10585   BB->addSuccessor(loopMBB);
10586   BB->addSuccessor(exitMBB);
10587 
10588   //  exitMBB:
10589   //   ...
10590   BB = exitMBB;
10591   return BB;
10592 }
10593 
10594 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
10595     MachineInstr &MI, MachineBasicBlock *BB,
10596     bool is8bit, // operation
10597     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
10598   // If we support part-word atomic mnemonics, just use them
10599   if (Subtarget.hasPartwordAtomics())
10600     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
10601                             CmpPred);
10602 
10603   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10604   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10605   // In 64 bit mode we have to use 64 bits for addresses, even though the
10606   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
10607   // registers without caring whether they're 32 or 64, but here we're
10608   // doing actual arithmetic on the addresses.
10609   bool is64bit = Subtarget.isPPC64();
10610   bool isLittleEndian = Subtarget.isLittleEndian();
10611   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10612 
10613   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10614   MachineFunction *F = BB->getParent();
10615   MachineFunction::iterator It = ++BB->getIterator();
10616 
10617   Register dest = MI.getOperand(0).getReg();
10618   Register ptrA = MI.getOperand(1).getReg();
10619   Register ptrB = MI.getOperand(2).getReg();
10620   Register incr = MI.getOperand(3).getReg();
10621   DebugLoc dl = MI.getDebugLoc();
10622 
10623   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10624   MachineBasicBlock *loop2MBB =
10625       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10626   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10627   F->insert(It, loopMBB);
10628   if (CmpOpcode)
10629     F->insert(It, loop2MBB);
10630   F->insert(It, exitMBB);
10631   exitMBB->splice(exitMBB->begin(), BB,
10632                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10633   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10634 
10635   MachineRegisterInfo &RegInfo = F->getRegInfo();
10636   const TargetRegisterClass *RC =
10637       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10638   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
10639 
10640   Register PtrReg = RegInfo.createVirtualRegister(RC);
10641   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
10642   Register ShiftReg =
10643       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
10644   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
10645   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
10646   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
10647   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
10648   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
10649   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
10650   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
10651   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
10652   Register Ptr1Reg;
10653   Register TmpReg =
10654       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
10655 
10656   //  thisMBB:
10657   //   ...
10658   //   fallthrough --> loopMBB
10659   BB->addSuccessor(loopMBB);
10660 
10661   // The 4-byte load must be aligned, while a char or short may be
10662   // anywhere in the word.  Hence all this nasty bookkeeping code.
10663   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
10664   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
10665   //   xori shift, shift1, 24 [16]
10666   //   rlwinm ptr, ptr1, 0, 0, 29
10667   //   slw incr2, incr, shift
10668   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
10669   //   slw mask, mask2, shift
10670   //  loopMBB:
10671   //   lwarx tmpDest, ptr
10672   //   add tmp, tmpDest, incr2
10673   //   andc tmp2, tmpDest, mask
10674   //   and tmp3, tmp, mask
10675   //   or tmp4, tmp3, tmp2
10676   //   stwcx. tmp4, ptr
10677   //   bne- loopMBB
10678   //   fallthrough --> exitMBB
10679   //   srw dest, tmpDest, shift
10680   if (ptrA != ZeroReg) {
10681     Ptr1Reg = RegInfo.createVirtualRegister(RC);
10682     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10683         .addReg(ptrA)
10684         .addReg(ptrB);
10685   } else {
10686     Ptr1Reg = ptrB;
10687   }
10688   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
10689   // mode.
10690   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
10691       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
10692       .addImm(3)
10693       .addImm(27)
10694       .addImm(is8bit ? 28 : 27);
10695   if (!isLittleEndian)
10696     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
10697         .addReg(Shift1Reg)
10698         .addImm(is8bit ? 24 : 16);
10699   if (is64bit)
10700     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
10701         .addReg(Ptr1Reg)
10702         .addImm(0)
10703         .addImm(61);
10704   else
10705     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
10706         .addReg(Ptr1Reg)
10707         .addImm(0)
10708         .addImm(0)
10709         .addImm(29);
10710   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
10711   if (is8bit)
10712     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
10713   else {
10714     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
10715     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
10716         .addReg(Mask3Reg)
10717         .addImm(65535);
10718   }
10719   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
10720       .addReg(Mask2Reg)
10721       .addReg(ShiftReg);
10722 
10723   BB = loopMBB;
10724   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
10725       .addReg(ZeroReg)
10726       .addReg(PtrReg);
10727   if (BinOpcode)
10728     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
10729         .addReg(Incr2Reg)
10730         .addReg(TmpDestReg);
10731   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
10732       .addReg(TmpDestReg)
10733       .addReg(MaskReg);
10734   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
10735   if (CmpOpcode) {
10736     // For unsigned comparisons, we can directly compare the shifted values.
10737     // For signed comparisons we shift and sign extend.
10738     Register SReg = RegInfo.createVirtualRegister(GPRC);
10739     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
10740         .addReg(TmpDestReg)
10741         .addReg(MaskReg);
10742     unsigned ValueReg = SReg;
10743     unsigned CmpReg = Incr2Reg;
10744     if (CmpOpcode == PPC::CMPW) {
10745       ValueReg = RegInfo.createVirtualRegister(GPRC);
10746       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
10747           .addReg(SReg)
10748           .addReg(ShiftReg);
10749       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
10750       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
10751           .addReg(ValueReg);
10752       ValueReg = ValueSReg;
10753       CmpReg = incr;
10754     }
10755     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10756         .addReg(CmpReg)
10757         .addReg(ValueReg);
10758     BuildMI(BB, dl, TII->get(PPC::BCC))
10759         .addImm(CmpPred)
10760         .addReg(PPC::CR0)
10761         .addMBB(exitMBB);
10762     BB->addSuccessor(loop2MBB);
10763     BB->addSuccessor(exitMBB);
10764     BB = loop2MBB;
10765   }
10766   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
10767   BuildMI(BB, dl, TII->get(PPC::STWCX))
10768       .addReg(Tmp4Reg)
10769       .addReg(ZeroReg)
10770       .addReg(PtrReg);
10771   BuildMI(BB, dl, TII->get(PPC::BCC))
10772       .addImm(PPC::PRED_NE)
10773       .addReg(PPC::CR0)
10774       .addMBB(loopMBB);
10775   BB->addSuccessor(loopMBB);
10776   BB->addSuccessor(exitMBB);
10777 
10778   //  exitMBB:
10779   //   ...
10780   BB = exitMBB;
10781   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
10782       .addReg(TmpDestReg)
10783       .addReg(ShiftReg);
10784   return BB;
10785 }
10786 
10787 llvm::MachineBasicBlock *
10788 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
10789                                     MachineBasicBlock *MBB) const {
10790   DebugLoc DL = MI.getDebugLoc();
10791   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10792   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
10793 
10794   MachineFunction *MF = MBB->getParent();
10795   MachineRegisterInfo &MRI = MF->getRegInfo();
10796 
10797   const BasicBlock *BB = MBB->getBasicBlock();
10798   MachineFunction::iterator I = ++MBB->getIterator();
10799 
10800   Register DstReg = MI.getOperand(0).getReg();
10801   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
10802   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
10803   Register mainDstReg = MRI.createVirtualRegister(RC);
10804   Register restoreDstReg = MRI.createVirtualRegister(RC);
10805 
10806   MVT PVT = getPointerTy(MF->getDataLayout());
10807   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10808          "Invalid Pointer Size!");
10809   // For v = setjmp(buf), we generate
10810   //
10811   // thisMBB:
10812   //  SjLjSetup mainMBB
10813   //  bl mainMBB
10814   //  v_restore = 1
10815   //  b sinkMBB
10816   //
10817   // mainMBB:
10818   //  buf[LabelOffset] = LR
10819   //  v_main = 0
10820   //
10821   // sinkMBB:
10822   //  v = phi(main, restore)
10823   //
10824 
10825   MachineBasicBlock *thisMBB = MBB;
10826   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
10827   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
10828   MF->insert(I, mainMBB);
10829   MF->insert(I, sinkMBB);
10830 
10831   MachineInstrBuilder MIB;
10832 
10833   // Transfer the remainder of BB and its successor edges to sinkMBB.
10834   sinkMBB->splice(sinkMBB->begin(), MBB,
10835                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10836   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
10837 
10838   // Note that the structure of the jmp_buf used here is not compatible
10839   // with that used by libc, and is not designed to be. Specifically, it
10840   // stores only those 'reserved' registers that LLVM does not otherwise
10841   // understand how to spill. Also, by convention, by the time this
10842   // intrinsic is called, Clang has already stored the frame address in the
10843   // first slot of the buffer and stack address in the third. Following the
10844   // X86 target code, we'll store the jump address in the second slot. We also
10845   // need to save the TOC pointer (R2) to handle jumps between shared
10846   // libraries, and that will be stored in the fourth slot. The thread
10847   // identifier (R13) is not affected.
10848 
10849   // thisMBB:
10850   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10851   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10852   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10853 
10854   // Prepare IP either in reg.
10855   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
10856   Register LabelReg = MRI.createVirtualRegister(PtrRC);
10857   Register BufReg = MI.getOperand(1).getReg();
10858 
10859   if (Subtarget.is64BitELFABI()) {
10860     setUsesTOCBasePtr(*MBB->getParent());
10861     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
10862               .addReg(PPC::X2)
10863               .addImm(TOCOffset)
10864               .addReg(BufReg)
10865               .cloneMemRefs(MI);
10866   }
10867 
10868   // Naked functions never have a base pointer, and so we use r1. For all
10869   // other functions, this decision must be delayed until during PEI.
10870   unsigned BaseReg;
10871   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
10872     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
10873   else
10874     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
10875 
10876   MIB = BuildMI(*thisMBB, MI, DL,
10877                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
10878             .addReg(BaseReg)
10879             .addImm(BPOffset)
10880             .addReg(BufReg)
10881             .cloneMemRefs(MI);
10882 
10883   // Setup
10884   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
10885   MIB.addRegMask(TRI->getNoPreservedMask());
10886 
10887   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
10888 
10889   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
10890           .addMBB(mainMBB);
10891   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
10892 
10893   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
10894   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
10895 
10896   // mainMBB:
10897   //  mainDstReg = 0
10898   MIB =
10899       BuildMI(mainMBB, DL,
10900               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
10901 
10902   // Store IP
10903   if (Subtarget.isPPC64()) {
10904     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
10905             .addReg(LabelReg)
10906             .addImm(LabelOffset)
10907             .addReg(BufReg);
10908   } else {
10909     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
10910             .addReg(LabelReg)
10911             .addImm(LabelOffset)
10912             .addReg(BufReg);
10913   }
10914   MIB.cloneMemRefs(MI);
10915 
10916   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
10917   mainMBB->addSuccessor(sinkMBB);
10918 
10919   // sinkMBB:
10920   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
10921           TII->get(PPC::PHI), DstReg)
10922     .addReg(mainDstReg).addMBB(mainMBB)
10923     .addReg(restoreDstReg).addMBB(thisMBB);
10924 
10925   MI.eraseFromParent();
10926   return sinkMBB;
10927 }
10928 
10929 MachineBasicBlock *
10930 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
10931                                      MachineBasicBlock *MBB) const {
10932   DebugLoc DL = MI.getDebugLoc();
10933   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10934 
10935   MachineFunction *MF = MBB->getParent();
10936   MachineRegisterInfo &MRI = MF->getRegInfo();
10937 
10938   MVT PVT = getPointerTy(MF->getDataLayout());
10939   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10940          "Invalid Pointer Size!");
10941 
10942   const TargetRegisterClass *RC =
10943     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10944   Register Tmp = MRI.createVirtualRegister(RC);
10945   // Since FP is only updated here but NOT referenced, it's treated as GPR.
10946   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
10947   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
10948   unsigned BP =
10949       (PVT == MVT::i64)
10950           ? PPC::X30
10951           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
10952                                                               : PPC::R30);
10953 
10954   MachineInstrBuilder MIB;
10955 
10956   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10957   const int64_t SPOffset    = 2 * PVT.getStoreSize();
10958   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10959   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10960 
10961   Register BufReg = MI.getOperand(0).getReg();
10962 
10963   // Reload FP (the jumped-to function may not have had a
10964   // frame pointer, and if so, then its r31 will be restored
10965   // as necessary).
10966   if (PVT == MVT::i64) {
10967     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
10968             .addImm(0)
10969             .addReg(BufReg);
10970   } else {
10971     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
10972             .addImm(0)
10973             .addReg(BufReg);
10974   }
10975   MIB.cloneMemRefs(MI);
10976 
10977   // Reload IP
10978   if (PVT == MVT::i64) {
10979     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
10980             .addImm(LabelOffset)
10981             .addReg(BufReg);
10982   } else {
10983     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
10984             .addImm(LabelOffset)
10985             .addReg(BufReg);
10986   }
10987   MIB.cloneMemRefs(MI);
10988 
10989   // Reload SP
10990   if (PVT == MVT::i64) {
10991     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
10992             .addImm(SPOffset)
10993             .addReg(BufReg);
10994   } else {
10995     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
10996             .addImm(SPOffset)
10997             .addReg(BufReg);
10998   }
10999   MIB.cloneMemRefs(MI);
11000 
11001   // Reload BP
11002   if (PVT == MVT::i64) {
11003     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11004             .addImm(BPOffset)
11005             .addReg(BufReg);
11006   } else {
11007     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11008             .addImm(BPOffset)
11009             .addReg(BufReg);
11010   }
11011   MIB.cloneMemRefs(MI);
11012 
11013   // Reload TOC
11014   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11015     setUsesTOCBasePtr(*MBB->getParent());
11016     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11017               .addImm(TOCOffset)
11018               .addReg(BufReg)
11019               .cloneMemRefs(MI);
11020   }
11021 
11022   // Jump
11023   BuildMI(*MBB, MI, DL,
11024           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11025   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11026 
11027   MI.eraseFromParent();
11028   return MBB;
11029 }
11030 
11031 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11032   // If the function specifically requests inline stack probes, emit them.
11033   if (MF.getFunction().hasFnAttribute("probe-stack"))
11034     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11035            "inline-asm";
11036   return false;
11037 }
11038 
11039 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11040   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11041   unsigned StackAlign = TFI->getStackAlignment();
11042   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11043          "Unexpected stack alignment");
11044   // The default stack probe size is 4096 if the function has no
11045   // stack-probe-size attribute.
11046   unsigned StackProbeSize = 4096;
11047   const Function &Fn = MF.getFunction();
11048   if (Fn.hasFnAttribute("stack-probe-size"))
11049     Fn.getFnAttribute("stack-probe-size")
11050         .getValueAsString()
11051         .getAsInteger(0, StackProbeSize);
11052   // Round down to the stack alignment.
11053   StackProbeSize &= ~(StackAlign - 1);
11054   return StackProbeSize ? StackProbeSize : StackAlign;
11055 }
11056 
11057 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11058 // into three phases. In the first phase, it uses pseudo instruction
11059 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11060 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11061 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11062 // MaxCallFrameSize so that it can calculate correct data area pointer.
11063 MachineBasicBlock *
11064 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11065                                     MachineBasicBlock *MBB) const {
11066   const bool isPPC64 = Subtarget.isPPC64();
11067   MachineFunction *MF = MBB->getParent();
11068   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11069   DebugLoc DL = MI.getDebugLoc();
11070   const unsigned ProbeSize = getStackProbeSize(*MF);
11071   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11072   MachineRegisterInfo &MRI = MF->getRegInfo();
11073   // The CFG of probing stack looks as
11074   //         +-----+
11075   //         | MBB |
11076   //         +--+--+
11077   //            |
11078   //       +----v----+
11079   //  +--->+ TestMBB +---+
11080   //  |    +----+----+   |
11081   //  |         |        |
11082   //  |   +-----v----+   |
11083   //  +---+ BlockMBB |   |
11084   //      +----------+   |
11085   //                     |
11086   //       +---------+   |
11087   //       | TailMBB +<--+
11088   //       +---------+
11089   // In MBB, calculate previous frame pointer and final stack pointer.
11090   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11091   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11092   // TailMBB is spliced via \p MI.
11093   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11094   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11095   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11096 
11097   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11098   MF->insert(MBBIter, TestMBB);
11099   MF->insert(MBBIter, BlockMBB);
11100   MF->insert(MBBIter, TailMBB);
11101 
11102   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11103   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11104 
11105   Register DstReg = MI.getOperand(0).getReg();
11106   Register NegSizeReg = MI.getOperand(1).getReg();
11107   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11108   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11109   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11110   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11111 
11112   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11113   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11114   // NegSize.
11115   unsigned ProbeOpc;
11116   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11117     ProbeOpc =
11118         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11119   else
11120     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11121     // and NegSizeReg will be allocated in the same phyreg to avoid
11122     // redundant copy when NegSizeReg has only one use which is current MI and
11123     // will be replaced by PREPARE_PROBED_ALLOCA then.
11124     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11125                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11126   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11127       .addDef(ActualNegSizeReg)
11128       .addReg(NegSizeReg)
11129       .add(MI.getOperand(2))
11130       .add(MI.getOperand(3));
11131 
11132   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11133   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11134           FinalStackPtr)
11135       .addReg(SPReg)
11136       .addReg(ActualNegSizeReg);
11137 
11138   // Materialize a scratch register for update.
11139   int64_t NegProbeSize = -(int64_t)ProbeSize;
11140   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11141   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11142   if (!isInt<16>(NegProbeSize)) {
11143     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11144     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11145         .addImm(NegProbeSize >> 16);
11146     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11147             ScratchReg)
11148         .addReg(TempReg)
11149         .addImm(NegProbeSize & 0xFFFF);
11150   } else
11151     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11152         .addImm(NegProbeSize);
11153 
11154   {
11155     // Probing leading residual part.
11156     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11157     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11158         .addReg(ActualNegSizeReg)
11159         .addReg(ScratchReg);
11160     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11161     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11162         .addReg(Div)
11163         .addReg(ScratchReg);
11164     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11165     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11166         .addReg(Mul)
11167         .addReg(ActualNegSizeReg);
11168     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11169         .addReg(FramePointer)
11170         .addReg(SPReg)
11171         .addReg(NegMod);
11172   }
11173 
11174   {
11175     // Remaining part should be multiple of ProbeSize.
11176     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11177     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11178         .addReg(SPReg)
11179         .addReg(FinalStackPtr);
11180     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11181         .addImm(PPC::PRED_EQ)
11182         .addReg(CmpResult)
11183         .addMBB(TailMBB);
11184     TestMBB->addSuccessor(BlockMBB);
11185     TestMBB->addSuccessor(TailMBB);
11186   }
11187 
11188   {
11189     // Touch the block.
11190     // |P...|P...|P...
11191     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11192         .addReg(FramePointer)
11193         .addReg(SPReg)
11194         .addReg(ScratchReg);
11195     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11196     BlockMBB->addSuccessor(TestMBB);
11197   }
11198 
11199   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11200   // DYNAREAOFFSET pseudo instruction to get the future result.
11201   Register MaxCallFrameSizeReg =
11202       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11203   BuildMI(TailMBB, DL,
11204           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11205           MaxCallFrameSizeReg)
11206       .add(MI.getOperand(2))
11207       .add(MI.getOperand(3));
11208   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11209       .addReg(SPReg)
11210       .addReg(MaxCallFrameSizeReg);
11211 
11212   // Splice instructions after MI to TailMBB.
11213   TailMBB->splice(TailMBB->end(), MBB,
11214                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11215   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11216   MBB->addSuccessor(TestMBB);
11217 
11218   // Delete the pseudo instruction.
11219   MI.eraseFromParent();
11220 
11221   ++NumDynamicAllocaProbed;
11222   return TailMBB;
11223 }
11224 
11225 MachineBasicBlock *
11226 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11227                                                MachineBasicBlock *BB) const {
11228   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11229       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11230     if (Subtarget.is64BitELFABI() &&
11231         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11232         !Subtarget.isUsingPCRelativeCalls()) {
11233       // Call lowering should have added an r2 operand to indicate a dependence
11234       // on the TOC base pointer value. It can't however, because there is no
11235       // way to mark the dependence as implicit there, and so the stackmap code
11236       // will confuse it with a regular operand. Instead, add the dependence
11237       // here.
11238       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11239     }
11240 
11241     return emitPatchPoint(MI, BB);
11242   }
11243 
11244   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11245       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11246     return emitEHSjLjSetJmp(MI, BB);
11247   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11248              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11249     return emitEHSjLjLongJmp(MI, BB);
11250   }
11251 
11252   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11253 
11254   // To "insert" these instructions we actually have to insert their
11255   // control-flow patterns.
11256   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11257   MachineFunction::iterator It = ++BB->getIterator();
11258 
11259   MachineFunction *F = BB->getParent();
11260 
11261   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11262       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11263       MI.getOpcode() == PPC::SELECT_I8) {
11264     SmallVector<MachineOperand, 2> Cond;
11265     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11266         MI.getOpcode() == PPC::SELECT_CC_I8)
11267       Cond.push_back(MI.getOperand(4));
11268     else
11269       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11270     Cond.push_back(MI.getOperand(1));
11271 
11272     DebugLoc dl = MI.getDebugLoc();
11273     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11274                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11275   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11276              MI.getOpcode() == PPC::SELECT_CC_F8 ||
11277              MI.getOpcode() == PPC::SELECT_CC_F16 ||
11278              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11279              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11280              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11281              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11282              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11283              MI.getOpcode() == PPC::SELECT_CC_SPE ||
11284              MI.getOpcode() == PPC::SELECT_F4 ||
11285              MI.getOpcode() == PPC::SELECT_F8 ||
11286              MI.getOpcode() == PPC::SELECT_F16 ||
11287              MI.getOpcode() == PPC::SELECT_SPE ||
11288              MI.getOpcode() == PPC::SELECT_SPE4 ||
11289              MI.getOpcode() == PPC::SELECT_VRRC ||
11290              MI.getOpcode() == PPC::SELECT_VSFRC ||
11291              MI.getOpcode() == PPC::SELECT_VSSRC ||
11292              MI.getOpcode() == PPC::SELECT_VSRC) {
11293     // The incoming instruction knows the destination vreg to set, the
11294     // condition code register to branch on, the true/false values to
11295     // select between, and a branch opcode to use.
11296 
11297     //  thisMBB:
11298     //  ...
11299     //   TrueVal = ...
11300     //   cmpTY ccX, r1, r2
11301     //   bCC copy1MBB
11302     //   fallthrough --> copy0MBB
11303     MachineBasicBlock *thisMBB = BB;
11304     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11305     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11306     DebugLoc dl = MI.getDebugLoc();
11307     F->insert(It, copy0MBB);
11308     F->insert(It, sinkMBB);
11309 
11310     // Transfer the remainder of BB and its successor edges to sinkMBB.
11311     sinkMBB->splice(sinkMBB->begin(), BB,
11312                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11313     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11314 
11315     // Next, add the true and fallthrough blocks as its successors.
11316     BB->addSuccessor(copy0MBB);
11317     BB->addSuccessor(sinkMBB);
11318 
11319     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11320         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11321         MI.getOpcode() == PPC::SELECT_F16 ||
11322         MI.getOpcode() == PPC::SELECT_SPE4 ||
11323         MI.getOpcode() == PPC::SELECT_SPE ||
11324         MI.getOpcode() == PPC::SELECT_VRRC ||
11325         MI.getOpcode() == PPC::SELECT_VSFRC ||
11326         MI.getOpcode() == PPC::SELECT_VSSRC ||
11327         MI.getOpcode() == PPC::SELECT_VSRC) {
11328       BuildMI(BB, dl, TII->get(PPC::BC))
11329           .addReg(MI.getOperand(1).getReg())
11330           .addMBB(sinkMBB);
11331     } else {
11332       unsigned SelectPred = MI.getOperand(4).getImm();
11333       BuildMI(BB, dl, TII->get(PPC::BCC))
11334           .addImm(SelectPred)
11335           .addReg(MI.getOperand(1).getReg())
11336           .addMBB(sinkMBB);
11337     }
11338 
11339     //  copy0MBB:
11340     //   %FalseValue = ...
11341     //   # fallthrough to sinkMBB
11342     BB = copy0MBB;
11343 
11344     // Update machine-CFG edges
11345     BB->addSuccessor(sinkMBB);
11346 
11347     //  sinkMBB:
11348     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11349     //  ...
11350     BB = sinkMBB;
11351     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11352         .addReg(MI.getOperand(3).getReg())
11353         .addMBB(copy0MBB)
11354         .addReg(MI.getOperand(2).getReg())
11355         .addMBB(thisMBB);
11356   } else if (MI.getOpcode() == PPC::ReadTB) {
11357     // To read the 64-bit time-base register on a 32-bit target, we read the
11358     // two halves. Should the counter have wrapped while it was being read, we
11359     // need to try again.
11360     // ...
11361     // readLoop:
11362     // mfspr Rx,TBU # load from TBU
11363     // mfspr Ry,TB  # load from TB
11364     // mfspr Rz,TBU # load from TBU
11365     // cmpw crX,Rx,Rz # check if 'old'='new'
11366     // bne readLoop   # branch if they're not equal
11367     // ...
11368 
11369     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11370     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11371     DebugLoc dl = MI.getDebugLoc();
11372     F->insert(It, readMBB);
11373     F->insert(It, sinkMBB);
11374 
11375     // Transfer the remainder of BB and its successor edges to sinkMBB.
11376     sinkMBB->splice(sinkMBB->begin(), BB,
11377                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11378     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11379 
11380     BB->addSuccessor(readMBB);
11381     BB = readMBB;
11382 
11383     MachineRegisterInfo &RegInfo = F->getRegInfo();
11384     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11385     Register LoReg = MI.getOperand(0).getReg();
11386     Register HiReg = MI.getOperand(1).getReg();
11387 
11388     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11389     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11390     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11391 
11392     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11393 
11394     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11395         .addReg(HiReg)
11396         .addReg(ReadAgainReg);
11397     BuildMI(BB, dl, TII->get(PPC::BCC))
11398         .addImm(PPC::PRED_NE)
11399         .addReg(CmpReg)
11400         .addMBB(readMBB);
11401 
11402     BB->addSuccessor(readMBB);
11403     BB->addSuccessor(sinkMBB);
11404   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11405     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11406   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11407     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11408   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11409     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11410   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11411     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11412 
11413   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11414     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11415   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11416     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11417   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11418     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11419   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11420     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11421 
11422   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11423     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11424   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11425     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11426   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11427     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11428   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11429     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11430 
11431   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11432     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11433   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11434     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11435   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11436     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11437   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11438     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11439 
11440   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11441     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11442   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11443     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11444   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11445     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11446   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11447     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11448 
11449   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11450     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11451   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11452     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11453   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11454     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11455   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11456     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11457 
11458   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11459     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11460   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11461     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11462   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11463     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11464   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11465     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11466 
11467   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11468     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11469   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11470     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11471   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11472     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11473   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11474     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11475 
11476   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11477     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11478   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11479     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11480   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11481     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
11482   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11483     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
11484 
11485   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11486     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
11487   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11488     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
11489   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11490     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
11491   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11492     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
11493 
11494   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11495     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
11496   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11497     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
11498   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11499     BB = EmitAtomicBinary(MI, BB, 4, 0);
11500   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11501     BB = EmitAtomicBinary(MI, BB, 8, 0);
11502   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11503            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11504            (Subtarget.hasPartwordAtomics() &&
11505             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11506            (Subtarget.hasPartwordAtomics() &&
11507             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11508     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11509 
11510     auto LoadMnemonic = PPC::LDARX;
11511     auto StoreMnemonic = PPC::STDCX;
11512     switch (MI.getOpcode()) {
11513     default:
11514       llvm_unreachable("Compare and swap of unknown size");
11515     case PPC::ATOMIC_CMP_SWAP_I8:
11516       LoadMnemonic = PPC::LBARX;
11517       StoreMnemonic = PPC::STBCX;
11518       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11519       break;
11520     case PPC::ATOMIC_CMP_SWAP_I16:
11521       LoadMnemonic = PPC::LHARX;
11522       StoreMnemonic = PPC::STHCX;
11523       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11524       break;
11525     case PPC::ATOMIC_CMP_SWAP_I32:
11526       LoadMnemonic = PPC::LWARX;
11527       StoreMnemonic = PPC::STWCX;
11528       break;
11529     case PPC::ATOMIC_CMP_SWAP_I64:
11530       LoadMnemonic = PPC::LDARX;
11531       StoreMnemonic = PPC::STDCX;
11532       break;
11533     }
11534     Register dest = MI.getOperand(0).getReg();
11535     Register ptrA = MI.getOperand(1).getReg();
11536     Register ptrB = MI.getOperand(2).getReg();
11537     Register oldval = MI.getOperand(3).getReg();
11538     Register newval = MI.getOperand(4).getReg();
11539     DebugLoc dl = MI.getDebugLoc();
11540 
11541     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11542     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11543     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11544     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11545     F->insert(It, loop1MBB);
11546     F->insert(It, loop2MBB);
11547     F->insert(It, midMBB);
11548     F->insert(It, exitMBB);
11549     exitMBB->splice(exitMBB->begin(), BB,
11550                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11551     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11552 
11553     //  thisMBB:
11554     //   ...
11555     //   fallthrough --> loopMBB
11556     BB->addSuccessor(loop1MBB);
11557 
11558     // loop1MBB:
11559     //   l[bhwd]arx dest, ptr
11560     //   cmp[wd] dest, oldval
11561     //   bne- midMBB
11562     // loop2MBB:
11563     //   st[bhwd]cx. newval, ptr
11564     //   bne- loopMBB
11565     //   b exitBB
11566     // midMBB:
11567     //   st[bhwd]cx. dest, ptr
11568     // exitBB:
11569     BB = loop1MBB;
11570     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11571     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11572         .addReg(oldval)
11573         .addReg(dest);
11574     BuildMI(BB, dl, TII->get(PPC::BCC))
11575         .addImm(PPC::PRED_NE)
11576         .addReg(PPC::CR0)
11577         .addMBB(midMBB);
11578     BB->addSuccessor(loop2MBB);
11579     BB->addSuccessor(midMBB);
11580 
11581     BB = loop2MBB;
11582     BuildMI(BB, dl, TII->get(StoreMnemonic))
11583         .addReg(newval)
11584         .addReg(ptrA)
11585         .addReg(ptrB);
11586     BuildMI(BB, dl, TII->get(PPC::BCC))
11587         .addImm(PPC::PRED_NE)
11588         .addReg(PPC::CR0)
11589         .addMBB(loop1MBB);
11590     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11591     BB->addSuccessor(loop1MBB);
11592     BB->addSuccessor(exitMBB);
11593 
11594     BB = midMBB;
11595     BuildMI(BB, dl, TII->get(StoreMnemonic))
11596         .addReg(dest)
11597         .addReg(ptrA)
11598         .addReg(ptrB);
11599     BB->addSuccessor(exitMBB);
11600 
11601     //  exitMBB:
11602     //   ...
11603     BB = exitMBB;
11604   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
11605              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
11606     // We must use 64-bit registers for addresses when targeting 64-bit,
11607     // since we're actually doing arithmetic on them.  Other registers
11608     // can be 32-bit.
11609     bool is64bit = Subtarget.isPPC64();
11610     bool isLittleEndian = Subtarget.isLittleEndian();
11611     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
11612 
11613     Register dest = MI.getOperand(0).getReg();
11614     Register ptrA = MI.getOperand(1).getReg();
11615     Register ptrB = MI.getOperand(2).getReg();
11616     Register oldval = MI.getOperand(3).getReg();
11617     Register newval = MI.getOperand(4).getReg();
11618     DebugLoc dl = MI.getDebugLoc();
11619 
11620     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11621     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11622     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11623     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11624     F->insert(It, loop1MBB);
11625     F->insert(It, loop2MBB);
11626     F->insert(It, midMBB);
11627     F->insert(It, exitMBB);
11628     exitMBB->splice(exitMBB->begin(), BB,
11629                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11630     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11631 
11632     MachineRegisterInfo &RegInfo = F->getRegInfo();
11633     const TargetRegisterClass *RC =
11634         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11635     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11636 
11637     Register PtrReg = RegInfo.createVirtualRegister(RC);
11638     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11639     Register ShiftReg =
11640         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11641     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
11642     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
11643     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
11644     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
11645     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11646     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11647     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11648     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11649     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11650     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11651     Register Ptr1Reg;
11652     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
11653     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11654     //  thisMBB:
11655     //   ...
11656     //   fallthrough --> loopMBB
11657     BB->addSuccessor(loop1MBB);
11658 
11659     // The 4-byte load must be aligned, while a char or short may be
11660     // anywhere in the word.  Hence all this nasty bookkeeping code.
11661     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11662     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11663     //   xori shift, shift1, 24 [16]
11664     //   rlwinm ptr, ptr1, 0, 0, 29
11665     //   slw newval2, newval, shift
11666     //   slw oldval2, oldval,shift
11667     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11668     //   slw mask, mask2, shift
11669     //   and newval3, newval2, mask
11670     //   and oldval3, oldval2, mask
11671     // loop1MBB:
11672     //   lwarx tmpDest, ptr
11673     //   and tmp, tmpDest, mask
11674     //   cmpw tmp, oldval3
11675     //   bne- midMBB
11676     // loop2MBB:
11677     //   andc tmp2, tmpDest, mask
11678     //   or tmp4, tmp2, newval3
11679     //   stwcx. tmp4, ptr
11680     //   bne- loop1MBB
11681     //   b exitBB
11682     // midMBB:
11683     //   stwcx. tmpDest, ptr
11684     // exitBB:
11685     //   srw dest, tmpDest, shift
11686     if (ptrA != ZeroReg) {
11687       Ptr1Reg = RegInfo.createVirtualRegister(RC);
11688       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11689           .addReg(ptrA)
11690           .addReg(ptrB);
11691     } else {
11692       Ptr1Reg = ptrB;
11693     }
11694 
11695     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11696     // mode.
11697     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11698         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11699         .addImm(3)
11700         .addImm(27)
11701         .addImm(is8bit ? 28 : 27);
11702     if (!isLittleEndian)
11703       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11704           .addReg(Shift1Reg)
11705           .addImm(is8bit ? 24 : 16);
11706     if (is64bit)
11707       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11708           .addReg(Ptr1Reg)
11709           .addImm(0)
11710           .addImm(61);
11711     else
11712       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11713           .addReg(Ptr1Reg)
11714           .addImm(0)
11715           .addImm(0)
11716           .addImm(29);
11717     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
11718         .addReg(newval)
11719         .addReg(ShiftReg);
11720     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
11721         .addReg(oldval)
11722         .addReg(ShiftReg);
11723     if (is8bit)
11724       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11725     else {
11726       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11727       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11728           .addReg(Mask3Reg)
11729           .addImm(65535);
11730     }
11731     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11732         .addReg(Mask2Reg)
11733         .addReg(ShiftReg);
11734     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
11735         .addReg(NewVal2Reg)
11736         .addReg(MaskReg);
11737     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
11738         .addReg(OldVal2Reg)
11739         .addReg(MaskReg);
11740 
11741     BB = loop1MBB;
11742     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11743         .addReg(ZeroReg)
11744         .addReg(PtrReg);
11745     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
11746         .addReg(TmpDestReg)
11747         .addReg(MaskReg);
11748     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
11749         .addReg(TmpReg)
11750         .addReg(OldVal3Reg);
11751     BuildMI(BB, dl, TII->get(PPC::BCC))
11752         .addImm(PPC::PRED_NE)
11753         .addReg(PPC::CR0)
11754         .addMBB(midMBB);
11755     BB->addSuccessor(loop2MBB);
11756     BB->addSuccessor(midMBB);
11757 
11758     BB = loop2MBB;
11759     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11760         .addReg(TmpDestReg)
11761         .addReg(MaskReg);
11762     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
11763         .addReg(Tmp2Reg)
11764         .addReg(NewVal3Reg);
11765     BuildMI(BB, dl, TII->get(PPC::STWCX))
11766         .addReg(Tmp4Reg)
11767         .addReg(ZeroReg)
11768         .addReg(PtrReg);
11769     BuildMI(BB, dl, TII->get(PPC::BCC))
11770         .addImm(PPC::PRED_NE)
11771         .addReg(PPC::CR0)
11772         .addMBB(loop1MBB);
11773     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11774     BB->addSuccessor(loop1MBB);
11775     BB->addSuccessor(exitMBB);
11776 
11777     BB = midMBB;
11778     BuildMI(BB, dl, TII->get(PPC::STWCX))
11779         .addReg(TmpDestReg)
11780         .addReg(ZeroReg)
11781         .addReg(PtrReg);
11782     BB->addSuccessor(exitMBB);
11783 
11784     //  exitMBB:
11785     //   ...
11786     BB = exitMBB;
11787     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11788         .addReg(TmpReg)
11789         .addReg(ShiftReg);
11790   } else if (MI.getOpcode() == PPC::FADDrtz) {
11791     // This pseudo performs an FADD with rounding mode temporarily forced
11792     // to round-to-zero.  We emit this via custom inserter since the FPSCR
11793     // is not modeled at the SelectionDAG level.
11794     Register Dest = MI.getOperand(0).getReg();
11795     Register Src1 = MI.getOperand(1).getReg();
11796     Register Src2 = MI.getOperand(2).getReg();
11797     DebugLoc dl = MI.getDebugLoc();
11798 
11799     MachineRegisterInfo &RegInfo = F->getRegInfo();
11800     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11801 
11802     // Save FPSCR value.
11803     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
11804 
11805     // Set rounding mode to round-to-zero.
11806     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
11807         .addImm(31)
11808         .addReg(PPC::RM, RegState::ImplicitDefine);
11809 
11810     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
11811         .addImm(30)
11812         .addReg(PPC::RM, RegState::ImplicitDefine);
11813 
11814     // Perform addition.
11815     auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
11816                    .addReg(Src1)
11817                    .addReg(Src2);
11818     if (MI.getFlag(MachineInstr::NoFPExcept))
11819       MIB.setMIFlag(MachineInstr::NoFPExcept);
11820 
11821     // Restore FPSCR value.
11822     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
11823   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11824              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
11825              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11826              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
11827     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11828                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
11829                           ? PPC::ANDI8_rec
11830                           : PPC::ANDI_rec;
11831     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11832                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
11833 
11834     MachineRegisterInfo &RegInfo = F->getRegInfo();
11835     Register Dest = RegInfo.createVirtualRegister(
11836         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
11837 
11838     DebugLoc Dl = MI.getDebugLoc();
11839     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
11840         .addReg(MI.getOperand(1).getReg())
11841         .addImm(1);
11842     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11843             MI.getOperand(0).getReg())
11844         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
11845   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
11846     DebugLoc Dl = MI.getDebugLoc();
11847     MachineRegisterInfo &RegInfo = F->getRegInfo();
11848     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11849     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
11850     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11851             MI.getOperand(0).getReg())
11852         .addReg(CRReg);
11853   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
11854     DebugLoc Dl = MI.getDebugLoc();
11855     unsigned Imm = MI.getOperand(1).getImm();
11856     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
11857     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11858             MI.getOperand(0).getReg())
11859         .addReg(PPC::CR0EQ);
11860   } else if (MI.getOpcode() == PPC::SETRNDi) {
11861     DebugLoc dl = MI.getDebugLoc();
11862     Register OldFPSCRReg = MI.getOperand(0).getReg();
11863 
11864     // Save FPSCR value.
11865     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11866 
11867     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
11868     // the following settings:
11869     //   00 Round to nearest
11870     //   01 Round to 0
11871     //   10 Round to +inf
11872     //   11 Round to -inf
11873 
11874     // When the operand is immediate, using the two least significant bits of
11875     // the immediate to set the bits 62:63 of FPSCR.
11876     unsigned Mode = MI.getOperand(1).getImm();
11877     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
11878         .addImm(31)
11879         .addReg(PPC::RM, RegState::ImplicitDefine);
11880 
11881     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
11882         .addImm(30)
11883         .addReg(PPC::RM, RegState::ImplicitDefine);
11884   } else if (MI.getOpcode() == PPC::SETRND) {
11885     DebugLoc dl = MI.getDebugLoc();
11886 
11887     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
11888     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
11889     // If the target doesn't have DirectMove, we should use stack to do the
11890     // conversion, because the target doesn't have the instructions like mtvsrd
11891     // or mfvsrd to do this conversion directly.
11892     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
11893       if (Subtarget.hasDirectMove()) {
11894         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
11895           .addReg(SrcReg);
11896       } else {
11897         // Use stack to do the register copy.
11898         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
11899         MachineRegisterInfo &RegInfo = F->getRegInfo();
11900         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
11901         if (RC == &PPC::F8RCRegClass) {
11902           // Copy register from F8RCRegClass to G8RCRegclass.
11903           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
11904                  "Unsupported RegClass.");
11905 
11906           StoreOp = PPC::STFD;
11907           LoadOp = PPC::LD;
11908         } else {
11909           // Copy register from G8RCRegClass to F8RCRegclass.
11910           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
11911                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
11912                  "Unsupported RegClass.");
11913         }
11914 
11915         MachineFrameInfo &MFI = F->getFrameInfo();
11916         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
11917 
11918         MachineMemOperand *MMOStore = F->getMachineMemOperand(
11919             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11920             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
11921             MFI.getObjectAlign(FrameIdx));
11922 
11923         // Store the SrcReg into the stack.
11924         BuildMI(*BB, MI, dl, TII->get(StoreOp))
11925           .addReg(SrcReg)
11926           .addImm(0)
11927           .addFrameIndex(FrameIdx)
11928           .addMemOperand(MMOStore);
11929 
11930         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
11931             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11932             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
11933             MFI.getObjectAlign(FrameIdx));
11934 
11935         // Load from the stack where SrcReg is stored, and save to DestReg,
11936         // so we have done the RegClass conversion from RegClass::SrcReg to
11937         // RegClass::DestReg.
11938         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
11939           .addImm(0)
11940           .addFrameIndex(FrameIdx)
11941           .addMemOperand(MMOLoad);
11942       }
11943     };
11944 
11945     Register OldFPSCRReg = MI.getOperand(0).getReg();
11946 
11947     // Save FPSCR value.
11948     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11949 
11950     // When the operand is gprc register, use two least significant bits of the
11951     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
11952     //
11953     // copy OldFPSCRTmpReg, OldFPSCRReg
11954     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
11955     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
11956     // copy NewFPSCRReg, NewFPSCRTmpReg
11957     // mtfsf 255, NewFPSCRReg
11958     MachineOperand SrcOp = MI.getOperand(1);
11959     MachineRegisterInfo &RegInfo = F->getRegInfo();
11960     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11961 
11962     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
11963 
11964     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11965     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11966 
11967     // The first operand of INSERT_SUBREG should be a register which has
11968     // subregisters, we only care about its RegClass, so we should use an
11969     // IMPLICIT_DEF register.
11970     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
11971     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
11972       .addReg(ImDefReg)
11973       .add(SrcOp)
11974       .addImm(1);
11975 
11976     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11977     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
11978       .addReg(OldFPSCRTmpReg)
11979       .addReg(ExtSrcReg)
11980       .addImm(0)
11981       .addImm(62);
11982 
11983     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11984     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
11985 
11986     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
11987     // bits of FPSCR.
11988     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
11989       .addImm(255)
11990       .addReg(NewFPSCRReg)
11991       .addImm(0)
11992       .addImm(0);
11993   } else if (MI.getOpcode() == PPC::SETFLM) {
11994     DebugLoc Dl = MI.getDebugLoc();
11995 
11996     // Result of setflm is previous FPSCR content, so we need to save it first.
11997     Register OldFPSCRReg = MI.getOperand(0).getReg();
11998     BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
11999 
12000     // Put bits in 32:63 to FPSCR.
12001     Register NewFPSCRReg = MI.getOperand(1).getReg();
12002     BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
12003         .addImm(255)
12004         .addReg(NewFPSCRReg)
12005         .addImm(0)
12006         .addImm(0);
12007   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12008              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12009     return emitProbedAlloca(MI, BB);
12010   } else {
12011     llvm_unreachable("Unexpected instr type to insert");
12012   }
12013 
12014   MI.eraseFromParent(); // The pseudo instruction is gone now.
12015   return BB;
12016 }
12017 
12018 //===----------------------------------------------------------------------===//
12019 // Target Optimization Hooks
12020 //===----------------------------------------------------------------------===//
12021 
12022 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12023   // For the estimates, convergence is quadratic, so we essentially double the
12024   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12025   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12026   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12027   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12028   if (VT.getScalarType() == MVT::f64)
12029     RefinementSteps++;
12030   return RefinementSteps;
12031 }
12032 
12033 SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
12034                                             const DenormalMode &Mode) const {
12035   // We only have VSX Vector Test for software Square Root.
12036   EVT VT = Op.getValueType();
12037   if (!isTypeLegal(MVT::i1) ||
12038       (VT != MVT::f64 &&
12039        ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
12040     return SDValue();
12041 
12042   SDLoc DL(Op);
12043   // The output register of FTSQRT is CR field.
12044   SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
12045   // ftsqrt BF,FRB
12046   // Let e_b be the unbiased exponent of the double-precision
12047   // floating-point operand in register FRB.
12048   // fe_flag is set to 1 if either of the following conditions occurs.
12049   //   - The double-precision floating-point operand in register FRB is a zero,
12050   //     a NaN, or an infinity, or a negative value.
12051   //   - e_b is less than or equal to -970.
12052   // Otherwise fe_flag is set to 0.
12053   // Both VSX and non-VSX versions would set EQ bit in the CR if the number is
12054   // not eligible for iteration. (zero/negative/infinity/nan or unbiased
12055   // exponent is less than -970)
12056   SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32);
12057   return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1,
12058                                     FTSQRT, SRIdxVal),
12059                  0);
12060 }
12061 
12062 SDValue
12063 PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
12064                                                SelectionDAG &DAG) const {
12065   // We only have VSX Vector Square Root.
12066   EVT VT = Op.getValueType();
12067   if (VT != MVT::f64 &&
12068       ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
12069     return TargetLowering::getSqrtResultForDenormInput(Op, DAG);
12070 
12071   return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op);
12072 }
12073 
12074 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12075                                            int Enabled, int &RefinementSteps,
12076                                            bool &UseOneConstNR,
12077                                            bool Reciprocal) const {
12078   EVT VT = Operand.getValueType();
12079   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12080       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12081       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12082       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12083     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12084       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12085 
12086     // The Newton-Raphson computation with a single constant does not provide
12087     // enough accuracy on some CPUs.
12088     UseOneConstNR = !Subtarget.needsTwoConstNR();
12089     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12090   }
12091   return SDValue();
12092 }
12093 
12094 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12095                                             int Enabled,
12096                                             int &RefinementSteps) const {
12097   EVT VT = Operand.getValueType();
12098   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12099       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12100       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12101       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12102     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12103       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12104     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12105   }
12106   return SDValue();
12107 }
12108 
12109 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12110   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12111   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12112   // enabled for division), this functionality is redundant with the default
12113   // combiner logic (once the division -> reciprocal/multiply transformation
12114   // has taken place). As a result, this matters more for older cores than for
12115   // newer ones.
12116 
12117   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12118   // reciprocal if there are two or more FDIVs (for embedded cores with only
12119   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12120   switch (Subtarget.getCPUDirective()) {
12121   default:
12122     return 3;
12123   case PPC::DIR_440:
12124   case PPC::DIR_A2:
12125   case PPC::DIR_E500:
12126   case PPC::DIR_E500mc:
12127   case PPC::DIR_E5500:
12128     return 2;
12129   }
12130 }
12131 
12132 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12133 // collapsed, and so we need to look through chains of them.
12134 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12135                                      int64_t& Offset, SelectionDAG &DAG) {
12136   if (DAG.isBaseWithConstantOffset(Loc)) {
12137     Base = Loc.getOperand(0);
12138     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12139 
12140     // The base might itself be a base plus an offset, and if so, accumulate
12141     // that as well.
12142     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12143   }
12144 }
12145 
12146 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12147                             unsigned Bytes, int Dist,
12148                             SelectionDAG &DAG) {
12149   if (VT.getSizeInBits() / 8 != Bytes)
12150     return false;
12151 
12152   SDValue BaseLoc = Base->getBasePtr();
12153   if (Loc.getOpcode() == ISD::FrameIndex) {
12154     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12155       return false;
12156     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12157     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12158     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12159     int FS  = MFI.getObjectSize(FI);
12160     int BFS = MFI.getObjectSize(BFI);
12161     if (FS != BFS || FS != (int)Bytes) return false;
12162     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12163   }
12164 
12165   SDValue Base1 = Loc, Base2 = BaseLoc;
12166   int64_t Offset1 = 0, Offset2 = 0;
12167   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12168   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12169   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12170     return true;
12171 
12172   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12173   const GlobalValue *GV1 = nullptr;
12174   const GlobalValue *GV2 = nullptr;
12175   Offset1 = 0;
12176   Offset2 = 0;
12177   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12178   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12179   if (isGA1 && isGA2 && GV1 == GV2)
12180     return Offset1 == (Offset2 + Dist*Bytes);
12181   return false;
12182 }
12183 
12184 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12185 // not enforce equality of the chain operands.
12186 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12187                             unsigned Bytes, int Dist,
12188                             SelectionDAG &DAG) {
12189   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12190     EVT VT = LS->getMemoryVT();
12191     SDValue Loc = LS->getBasePtr();
12192     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12193   }
12194 
12195   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12196     EVT VT;
12197     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12198     default: return false;
12199     case Intrinsic::ppc_altivec_lvx:
12200     case Intrinsic::ppc_altivec_lvxl:
12201     case Intrinsic::ppc_vsx_lxvw4x:
12202     case Intrinsic::ppc_vsx_lxvw4x_be:
12203       VT = MVT::v4i32;
12204       break;
12205     case Intrinsic::ppc_vsx_lxvd2x:
12206     case Intrinsic::ppc_vsx_lxvd2x_be:
12207       VT = MVT::v2f64;
12208       break;
12209     case Intrinsic::ppc_altivec_lvebx:
12210       VT = MVT::i8;
12211       break;
12212     case Intrinsic::ppc_altivec_lvehx:
12213       VT = MVT::i16;
12214       break;
12215     case Intrinsic::ppc_altivec_lvewx:
12216       VT = MVT::i32;
12217       break;
12218     }
12219 
12220     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12221   }
12222 
12223   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12224     EVT VT;
12225     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12226     default: return false;
12227     case Intrinsic::ppc_altivec_stvx:
12228     case Intrinsic::ppc_altivec_stvxl:
12229     case Intrinsic::ppc_vsx_stxvw4x:
12230       VT = MVT::v4i32;
12231       break;
12232     case Intrinsic::ppc_vsx_stxvd2x:
12233       VT = MVT::v2f64;
12234       break;
12235     case Intrinsic::ppc_vsx_stxvw4x_be:
12236       VT = MVT::v4i32;
12237       break;
12238     case Intrinsic::ppc_vsx_stxvd2x_be:
12239       VT = MVT::v2f64;
12240       break;
12241     case Intrinsic::ppc_altivec_stvebx:
12242       VT = MVT::i8;
12243       break;
12244     case Intrinsic::ppc_altivec_stvehx:
12245       VT = MVT::i16;
12246       break;
12247     case Intrinsic::ppc_altivec_stvewx:
12248       VT = MVT::i32;
12249       break;
12250     }
12251 
12252     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12253   }
12254 
12255   return false;
12256 }
12257 
12258 // Return true is there is a nearyby consecutive load to the one provided
12259 // (regardless of alignment). We search up and down the chain, looking though
12260 // token factors and other loads (but nothing else). As a result, a true result
12261 // indicates that it is safe to create a new consecutive load adjacent to the
12262 // load provided.
12263 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12264   SDValue Chain = LD->getChain();
12265   EVT VT = LD->getMemoryVT();
12266 
12267   SmallSet<SDNode *, 16> LoadRoots;
12268   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12269   SmallSet<SDNode *, 16> Visited;
12270 
12271   // First, search up the chain, branching to follow all token-factor operands.
12272   // If we find a consecutive load, then we're done, otherwise, record all
12273   // nodes just above the top-level loads and token factors.
12274   while (!Queue.empty()) {
12275     SDNode *ChainNext = Queue.pop_back_val();
12276     if (!Visited.insert(ChainNext).second)
12277       continue;
12278 
12279     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12280       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12281         return true;
12282 
12283       if (!Visited.count(ChainLD->getChain().getNode()))
12284         Queue.push_back(ChainLD->getChain().getNode());
12285     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12286       for (const SDUse &O : ChainNext->ops())
12287         if (!Visited.count(O.getNode()))
12288           Queue.push_back(O.getNode());
12289     } else
12290       LoadRoots.insert(ChainNext);
12291   }
12292 
12293   // Second, search down the chain, starting from the top-level nodes recorded
12294   // in the first phase. These top-level nodes are the nodes just above all
12295   // loads and token factors. Starting with their uses, recursively look though
12296   // all loads (just the chain uses) and token factors to find a consecutive
12297   // load.
12298   Visited.clear();
12299   Queue.clear();
12300 
12301   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12302        IE = LoadRoots.end(); I != IE; ++I) {
12303     Queue.push_back(*I);
12304 
12305     while (!Queue.empty()) {
12306       SDNode *LoadRoot = Queue.pop_back_val();
12307       if (!Visited.insert(LoadRoot).second)
12308         continue;
12309 
12310       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12311         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12312           return true;
12313 
12314       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12315            UE = LoadRoot->use_end(); UI != UE; ++UI)
12316         if (((isa<MemSDNode>(*UI) &&
12317             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12318             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12319           Queue.push_back(*UI);
12320     }
12321   }
12322 
12323   return false;
12324 }
12325 
12326 /// This function is called when we have proved that a SETCC node can be replaced
12327 /// by subtraction (and other supporting instructions) so that the result of
12328 /// comparison is kept in a GPR instead of CR. This function is purely for
12329 /// codegen purposes and has some flags to guide the codegen process.
12330 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12331                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12332   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12333 
12334   // Zero extend the operands to the largest legal integer. Originally, they
12335   // must be of a strictly smaller size.
12336   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12337                          DAG.getConstant(Size, DL, MVT::i32));
12338   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12339                          DAG.getConstant(Size, DL, MVT::i32));
12340 
12341   // Swap if needed. Depends on the condition code.
12342   if (Swap)
12343     std::swap(Op0, Op1);
12344 
12345   // Subtract extended integers.
12346   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12347 
12348   // Move the sign bit to the least significant position and zero out the rest.
12349   // Now the least significant bit carries the result of original comparison.
12350   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12351                              DAG.getConstant(Size - 1, DL, MVT::i32));
12352   auto Final = Shifted;
12353 
12354   // Complement the result if needed. Based on the condition code.
12355   if (Complement)
12356     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12357                         DAG.getConstant(1, DL, MVT::i64));
12358 
12359   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12360 }
12361 
12362 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12363                                                   DAGCombinerInfo &DCI) const {
12364   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12365 
12366   SelectionDAG &DAG = DCI.DAG;
12367   SDLoc DL(N);
12368 
12369   // Size of integers being compared has a critical role in the following
12370   // analysis, so we prefer to do this when all types are legal.
12371   if (!DCI.isAfterLegalizeDAG())
12372     return SDValue();
12373 
12374   // If all users of SETCC extend its value to a legal integer type
12375   // then we replace SETCC with a subtraction
12376   for (SDNode::use_iterator UI = N->use_begin(),
12377        UE = N->use_end(); UI != UE; ++UI) {
12378     if (UI->getOpcode() != ISD::ZERO_EXTEND)
12379       return SDValue();
12380   }
12381 
12382   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12383   auto OpSize = N->getOperand(0).getValueSizeInBits();
12384 
12385   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12386 
12387   if (OpSize < Size) {
12388     switch (CC) {
12389     default: break;
12390     case ISD::SETULT:
12391       return generateEquivalentSub(N, Size, false, false, DL, DAG);
12392     case ISD::SETULE:
12393       return generateEquivalentSub(N, Size, true, true, DL, DAG);
12394     case ISD::SETUGT:
12395       return generateEquivalentSub(N, Size, false, true, DL, DAG);
12396     case ISD::SETUGE:
12397       return generateEquivalentSub(N, Size, true, false, DL, DAG);
12398     }
12399   }
12400 
12401   return SDValue();
12402 }
12403 
12404 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12405                                                   DAGCombinerInfo &DCI) const {
12406   SelectionDAG &DAG = DCI.DAG;
12407   SDLoc dl(N);
12408 
12409   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12410   // If we're tracking CR bits, we need to be careful that we don't have:
12411   //   trunc(binary-ops(zext(x), zext(y)))
12412   // or
12413   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12414   // such that we're unnecessarily moving things into GPRs when it would be
12415   // better to keep them in CR bits.
12416 
12417   // Note that trunc here can be an actual i1 trunc, or can be the effective
12418   // truncation that comes from a setcc or select_cc.
12419   if (N->getOpcode() == ISD::TRUNCATE &&
12420       N->getValueType(0) != MVT::i1)
12421     return SDValue();
12422 
12423   if (N->getOperand(0).getValueType() != MVT::i32 &&
12424       N->getOperand(0).getValueType() != MVT::i64)
12425     return SDValue();
12426 
12427   if (N->getOpcode() == ISD::SETCC ||
12428       N->getOpcode() == ISD::SELECT_CC) {
12429     // If we're looking at a comparison, then we need to make sure that the
12430     // high bits (all except for the first) don't matter the result.
12431     ISD::CondCode CC =
12432       cast<CondCodeSDNode>(N->getOperand(
12433         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12434     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12435 
12436     if (ISD::isSignedIntSetCC(CC)) {
12437       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12438           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12439         return SDValue();
12440     } else if (ISD::isUnsignedIntSetCC(CC)) {
12441       if (!DAG.MaskedValueIsZero(N->getOperand(0),
12442                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12443           !DAG.MaskedValueIsZero(N->getOperand(1),
12444                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
12445         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12446                                              : SDValue());
12447     } else {
12448       // This is neither a signed nor an unsigned comparison, just make sure
12449       // that the high bits are equal.
12450       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12451       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12452 
12453       // We don't really care about what is known about the first bit (if
12454       // anything), so pretend that it is known zero for both to ensure they can
12455       // be compared as constants.
12456       Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0);
12457       Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0);
12458 
12459       if (!Op1Known.isConstant() || !Op2Known.isConstant() ||
12460           Op1Known.getConstant() != Op2Known.getConstant())
12461         return SDValue();
12462     }
12463   }
12464 
12465   // We now know that the higher-order bits are irrelevant, we just need to
12466   // make sure that all of the intermediate operations are bit operations, and
12467   // all inputs are extensions.
12468   if (N->getOperand(0).getOpcode() != ISD::AND &&
12469       N->getOperand(0).getOpcode() != ISD::OR  &&
12470       N->getOperand(0).getOpcode() != ISD::XOR &&
12471       N->getOperand(0).getOpcode() != ISD::SELECT &&
12472       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12473       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12474       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12475       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12476       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12477     return SDValue();
12478 
12479   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12480       N->getOperand(1).getOpcode() != ISD::AND &&
12481       N->getOperand(1).getOpcode() != ISD::OR  &&
12482       N->getOperand(1).getOpcode() != ISD::XOR &&
12483       N->getOperand(1).getOpcode() != ISD::SELECT &&
12484       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
12485       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
12486       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
12487       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
12488       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
12489     return SDValue();
12490 
12491   SmallVector<SDValue, 4> Inputs;
12492   SmallVector<SDValue, 8> BinOps, PromOps;
12493   SmallPtrSet<SDNode *, 16> Visited;
12494 
12495   for (unsigned i = 0; i < 2; ++i) {
12496     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12497           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12498           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12499           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12500         isa<ConstantSDNode>(N->getOperand(i)))
12501       Inputs.push_back(N->getOperand(i));
12502     else
12503       BinOps.push_back(N->getOperand(i));
12504 
12505     if (N->getOpcode() == ISD::TRUNCATE)
12506       break;
12507   }
12508 
12509   // Visit all inputs, collect all binary operations (and, or, xor and
12510   // select) that are all fed by extensions.
12511   while (!BinOps.empty()) {
12512     SDValue BinOp = BinOps.back();
12513     BinOps.pop_back();
12514 
12515     if (!Visited.insert(BinOp.getNode()).second)
12516       continue;
12517 
12518     PromOps.push_back(BinOp);
12519 
12520     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12521       // The condition of the select is not promoted.
12522       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12523         continue;
12524       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12525         continue;
12526 
12527       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12528             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12529             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12530            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12531           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12532         Inputs.push_back(BinOp.getOperand(i));
12533       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12534                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12535                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12536                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12537                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
12538                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12539                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12540                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12541                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
12542         BinOps.push_back(BinOp.getOperand(i));
12543       } else {
12544         // We have an input that is not an extension or another binary
12545         // operation; we'll abort this transformation.
12546         return SDValue();
12547       }
12548     }
12549   }
12550 
12551   // Make sure that this is a self-contained cluster of operations (which
12552   // is not quite the same thing as saying that everything has only one
12553   // use).
12554   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12555     if (isa<ConstantSDNode>(Inputs[i]))
12556       continue;
12557 
12558     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12559                               UE = Inputs[i].getNode()->use_end();
12560          UI != UE; ++UI) {
12561       SDNode *User = *UI;
12562       if (User != N && !Visited.count(User))
12563         return SDValue();
12564 
12565       // Make sure that we're not going to promote the non-output-value
12566       // operand(s) or SELECT or SELECT_CC.
12567       // FIXME: Although we could sometimes handle this, and it does occur in
12568       // practice that one of the condition inputs to the select is also one of
12569       // the outputs, we currently can't deal with this.
12570       if (User->getOpcode() == ISD::SELECT) {
12571         if (User->getOperand(0) == Inputs[i])
12572           return SDValue();
12573       } else if (User->getOpcode() == ISD::SELECT_CC) {
12574         if (User->getOperand(0) == Inputs[i] ||
12575             User->getOperand(1) == Inputs[i])
12576           return SDValue();
12577       }
12578     }
12579   }
12580 
12581   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12582     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12583                               UE = PromOps[i].getNode()->use_end();
12584          UI != UE; ++UI) {
12585       SDNode *User = *UI;
12586       if (User != N && !Visited.count(User))
12587         return SDValue();
12588 
12589       // Make sure that we're not going to promote the non-output-value
12590       // operand(s) or SELECT or SELECT_CC.
12591       // FIXME: Although we could sometimes handle this, and it does occur in
12592       // practice that one of the condition inputs to the select is also one of
12593       // the outputs, we currently can't deal with this.
12594       if (User->getOpcode() == ISD::SELECT) {
12595         if (User->getOperand(0) == PromOps[i])
12596           return SDValue();
12597       } else if (User->getOpcode() == ISD::SELECT_CC) {
12598         if (User->getOperand(0) == PromOps[i] ||
12599             User->getOperand(1) == PromOps[i])
12600           return SDValue();
12601       }
12602     }
12603   }
12604 
12605   // Replace all inputs with the extension operand.
12606   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12607     // Constants may have users outside the cluster of to-be-promoted nodes,
12608     // and so we need to replace those as we do the promotions.
12609     if (isa<ConstantSDNode>(Inputs[i]))
12610       continue;
12611     else
12612       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
12613   }
12614 
12615   std::list<HandleSDNode> PromOpHandles;
12616   for (auto &PromOp : PromOps)
12617     PromOpHandles.emplace_back(PromOp);
12618 
12619   // Replace all operations (these are all the same, but have a different
12620   // (i1) return type). DAG.getNode will validate that the types of
12621   // a binary operator match, so go through the list in reverse so that
12622   // we've likely promoted both operands first. Any intermediate truncations or
12623   // extensions disappear.
12624   while (!PromOpHandles.empty()) {
12625     SDValue PromOp = PromOpHandles.back().getValue();
12626     PromOpHandles.pop_back();
12627 
12628     if (PromOp.getOpcode() == ISD::TRUNCATE ||
12629         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
12630         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
12631         PromOp.getOpcode() == ISD::ANY_EXTEND) {
12632       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
12633           PromOp.getOperand(0).getValueType() != MVT::i1) {
12634         // The operand is not yet ready (see comment below).
12635         PromOpHandles.emplace_front(PromOp);
12636         continue;
12637       }
12638 
12639       SDValue RepValue = PromOp.getOperand(0);
12640       if (isa<ConstantSDNode>(RepValue))
12641         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
12642 
12643       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
12644       continue;
12645     }
12646 
12647     unsigned C;
12648     switch (PromOp.getOpcode()) {
12649     default:             C = 0; break;
12650     case ISD::SELECT:    C = 1; break;
12651     case ISD::SELECT_CC: C = 2; break;
12652     }
12653 
12654     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12655          PromOp.getOperand(C).getValueType() != MVT::i1) ||
12656         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12657          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
12658       // The to-be-promoted operands of this node have not yet been
12659       // promoted (this should be rare because we're going through the
12660       // list backward, but if one of the operands has several users in
12661       // this cluster of to-be-promoted nodes, it is possible).
12662       PromOpHandles.emplace_front(PromOp);
12663       continue;
12664     }
12665 
12666     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12667                                 PromOp.getNode()->op_end());
12668 
12669     // If there are any constant inputs, make sure they're replaced now.
12670     for (unsigned i = 0; i < 2; ++i)
12671       if (isa<ConstantSDNode>(Ops[C+i]))
12672         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
12673 
12674     DAG.ReplaceAllUsesOfValueWith(PromOp,
12675       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
12676   }
12677 
12678   // Now we're left with the initial truncation itself.
12679   if (N->getOpcode() == ISD::TRUNCATE)
12680     return N->getOperand(0);
12681 
12682   // Otherwise, this is a comparison. The operands to be compared have just
12683   // changed type (to i1), but everything else is the same.
12684   return SDValue(N, 0);
12685 }
12686 
12687 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
12688                                                   DAGCombinerInfo &DCI) const {
12689   SelectionDAG &DAG = DCI.DAG;
12690   SDLoc dl(N);
12691 
12692   // If we're tracking CR bits, we need to be careful that we don't have:
12693   //   zext(binary-ops(trunc(x), trunc(y)))
12694   // or
12695   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
12696   // such that we're unnecessarily moving things into CR bits that can more
12697   // efficiently stay in GPRs. Note that if we're not certain that the high
12698   // bits are set as required by the final extension, we still may need to do
12699   // some masking to get the proper behavior.
12700 
12701   // This same functionality is important on PPC64 when dealing with
12702   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
12703   // the return values of functions. Because it is so similar, it is handled
12704   // here as well.
12705 
12706   if (N->getValueType(0) != MVT::i32 &&
12707       N->getValueType(0) != MVT::i64)
12708     return SDValue();
12709 
12710   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
12711         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
12712     return SDValue();
12713 
12714   if (N->getOperand(0).getOpcode() != ISD::AND &&
12715       N->getOperand(0).getOpcode() != ISD::OR  &&
12716       N->getOperand(0).getOpcode() != ISD::XOR &&
12717       N->getOperand(0).getOpcode() != ISD::SELECT &&
12718       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
12719     return SDValue();
12720 
12721   SmallVector<SDValue, 4> Inputs;
12722   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
12723   SmallPtrSet<SDNode *, 16> Visited;
12724 
12725   // Visit all inputs, collect all binary operations (and, or, xor and
12726   // select) that are all fed by truncations.
12727   while (!BinOps.empty()) {
12728     SDValue BinOp = BinOps.back();
12729     BinOps.pop_back();
12730 
12731     if (!Visited.insert(BinOp.getNode()).second)
12732       continue;
12733 
12734     PromOps.push_back(BinOp);
12735 
12736     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12737       // The condition of the select is not promoted.
12738       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12739         continue;
12740       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12741         continue;
12742 
12743       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12744           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12745         Inputs.push_back(BinOp.getOperand(i));
12746       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12747                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12748                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12749                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12750                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
12751         BinOps.push_back(BinOp.getOperand(i));
12752       } else {
12753         // We have an input that is not a truncation or another binary
12754         // operation; we'll abort this transformation.
12755         return SDValue();
12756       }
12757     }
12758   }
12759 
12760   // The operands of a select that must be truncated when the select is
12761   // promoted because the operand is actually part of the to-be-promoted set.
12762   DenseMap<SDNode *, EVT> SelectTruncOp[2];
12763 
12764   // Make sure that this is a self-contained cluster of operations (which
12765   // is not quite the same thing as saying that everything has only one
12766   // use).
12767   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12768     if (isa<ConstantSDNode>(Inputs[i]))
12769       continue;
12770 
12771     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12772                               UE = Inputs[i].getNode()->use_end();
12773          UI != UE; ++UI) {
12774       SDNode *User = *UI;
12775       if (User != N && !Visited.count(User))
12776         return SDValue();
12777 
12778       // If we're going to promote the non-output-value operand(s) or SELECT or
12779       // SELECT_CC, record them for truncation.
12780       if (User->getOpcode() == ISD::SELECT) {
12781         if (User->getOperand(0) == Inputs[i])
12782           SelectTruncOp[0].insert(std::make_pair(User,
12783                                     User->getOperand(0).getValueType()));
12784       } else if (User->getOpcode() == ISD::SELECT_CC) {
12785         if (User->getOperand(0) == Inputs[i])
12786           SelectTruncOp[0].insert(std::make_pair(User,
12787                                     User->getOperand(0).getValueType()));
12788         if (User->getOperand(1) == Inputs[i])
12789           SelectTruncOp[1].insert(std::make_pair(User,
12790                                     User->getOperand(1).getValueType()));
12791       }
12792     }
12793   }
12794 
12795   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12796     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12797                               UE = PromOps[i].getNode()->use_end();
12798          UI != UE; ++UI) {
12799       SDNode *User = *UI;
12800       if (User != N && !Visited.count(User))
12801         return SDValue();
12802 
12803       // If we're going to promote the non-output-value operand(s) or SELECT or
12804       // SELECT_CC, record them for truncation.
12805       if (User->getOpcode() == ISD::SELECT) {
12806         if (User->getOperand(0) == PromOps[i])
12807           SelectTruncOp[0].insert(std::make_pair(User,
12808                                     User->getOperand(0).getValueType()));
12809       } else if (User->getOpcode() == ISD::SELECT_CC) {
12810         if (User->getOperand(0) == PromOps[i])
12811           SelectTruncOp[0].insert(std::make_pair(User,
12812                                     User->getOperand(0).getValueType()));
12813         if (User->getOperand(1) == PromOps[i])
12814           SelectTruncOp[1].insert(std::make_pair(User,
12815                                     User->getOperand(1).getValueType()));
12816       }
12817     }
12818   }
12819 
12820   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
12821   bool ReallyNeedsExt = false;
12822   if (N->getOpcode() != ISD::ANY_EXTEND) {
12823     // If all of the inputs are not already sign/zero extended, then
12824     // we'll still need to do that at the end.
12825     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12826       if (isa<ConstantSDNode>(Inputs[i]))
12827         continue;
12828 
12829       unsigned OpBits =
12830         Inputs[i].getOperand(0).getValueSizeInBits();
12831       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
12832 
12833       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
12834            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
12835                                   APInt::getHighBitsSet(OpBits,
12836                                                         OpBits-PromBits))) ||
12837           (N->getOpcode() == ISD::SIGN_EXTEND &&
12838            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
12839              (OpBits-(PromBits-1)))) {
12840         ReallyNeedsExt = true;
12841         break;
12842       }
12843     }
12844   }
12845 
12846   // Replace all inputs, either with the truncation operand, or a
12847   // truncation or extension to the final output type.
12848   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12849     // Constant inputs need to be replaced with the to-be-promoted nodes that
12850     // use them because they might have users outside of the cluster of
12851     // promoted nodes.
12852     if (isa<ConstantSDNode>(Inputs[i]))
12853       continue;
12854 
12855     SDValue InSrc = Inputs[i].getOperand(0);
12856     if (Inputs[i].getValueType() == N->getValueType(0))
12857       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
12858     else if (N->getOpcode() == ISD::SIGN_EXTEND)
12859       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12860         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
12861     else if (N->getOpcode() == ISD::ZERO_EXTEND)
12862       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12863         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
12864     else
12865       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12866         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
12867   }
12868 
12869   std::list<HandleSDNode> PromOpHandles;
12870   for (auto &PromOp : PromOps)
12871     PromOpHandles.emplace_back(PromOp);
12872 
12873   // Replace all operations (these are all the same, but have a different
12874   // (promoted) return type). DAG.getNode will validate that the types of
12875   // a binary operator match, so go through the list in reverse so that
12876   // we've likely promoted both operands first.
12877   while (!PromOpHandles.empty()) {
12878     SDValue PromOp = PromOpHandles.back().getValue();
12879     PromOpHandles.pop_back();
12880 
12881     unsigned C;
12882     switch (PromOp.getOpcode()) {
12883     default:             C = 0; break;
12884     case ISD::SELECT:    C = 1; break;
12885     case ISD::SELECT_CC: C = 2; break;
12886     }
12887 
12888     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12889          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
12890         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12891          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
12892       // The to-be-promoted operands of this node have not yet been
12893       // promoted (this should be rare because we're going through the
12894       // list backward, but if one of the operands has several users in
12895       // this cluster of to-be-promoted nodes, it is possible).
12896       PromOpHandles.emplace_front(PromOp);
12897       continue;
12898     }
12899 
12900     // For SELECT and SELECT_CC nodes, we do a similar check for any
12901     // to-be-promoted comparison inputs.
12902     if (PromOp.getOpcode() == ISD::SELECT ||
12903         PromOp.getOpcode() == ISD::SELECT_CC) {
12904       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
12905            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
12906           (SelectTruncOp[1].count(PromOp.getNode()) &&
12907            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
12908         PromOpHandles.emplace_front(PromOp);
12909         continue;
12910       }
12911     }
12912 
12913     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12914                                 PromOp.getNode()->op_end());
12915 
12916     // If this node has constant inputs, then they'll need to be promoted here.
12917     for (unsigned i = 0; i < 2; ++i) {
12918       if (!isa<ConstantSDNode>(Ops[C+i]))
12919         continue;
12920       if (Ops[C+i].getValueType() == N->getValueType(0))
12921         continue;
12922 
12923       if (N->getOpcode() == ISD::SIGN_EXTEND)
12924         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12925       else if (N->getOpcode() == ISD::ZERO_EXTEND)
12926         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12927       else
12928         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12929     }
12930 
12931     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
12932     // truncate them again to the original value type.
12933     if (PromOp.getOpcode() == ISD::SELECT ||
12934         PromOp.getOpcode() == ISD::SELECT_CC) {
12935       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
12936       if (SI0 != SelectTruncOp[0].end())
12937         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
12938       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
12939       if (SI1 != SelectTruncOp[1].end())
12940         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
12941     }
12942 
12943     DAG.ReplaceAllUsesOfValueWith(PromOp,
12944       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
12945   }
12946 
12947   // Now we're left with the initial extension itself.
12948   if (!ReallyNeedsExt)
12949     return N->getOperand(0);
12950 
12951   // To zero extend, just mask off everything except for the first bit (in the
12952   // i1 case).
12953   if (N->getOpcode() == ISD::ZERO_EXTEND)
12954     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
12955                        DAG.getConstant(APInt::getLowBitsSet(
12956                                          N->getValueSizeInBits(0), PromBits),
12957                                        dl, N->getValueType(0)));
12958 
12959   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
12960          "Invalid extension type");
12961   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
12962   SDValue ShiftCst =
12963       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
12964   return DAG.getNode(
12965       ISD::SRA, dl, N->getValueType(0),
12966       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
12967       ShiftCst);
12968 }
12969 
12970 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
12971                                         DAGCombinerInfo &DCI) const {
12972   assert(N->getOpcode() == ISD::SETCC &&
12973          "Should be called with a SETCC node");
12974 
12975   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12976   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
12977     SDValue LHS = N->getOperand(0);
12978     SDValue RHS = N->getOperand(1);
12979 
12980     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
12981     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
12982         LHS.hasOneUse())
12983       std::swap(LHS, RHS);
12984 
12985     // x == 0-y --> x+y == 0
12986     // x != 0-y --> x+y != 0
12987     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
12988         RHS.hasOneUse()) {
12989       SDLoc DL(N);
12990       SelectionDAG &DAG = DCI.DAG;
12991       EVT VT = N->getValueType(0);
12992       EVT OpVT = LHS.getValueType();
12993       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
12994       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
12995     }
12996   }
12997 
12998   return DAGCombineTruncBoolExt(N, DCI);
12999 }
13000 
13001 // Is this an extending load from an f32 to an f64?
13002 static bool isFPExtLoad(SDValue Op) {
13003   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13004     return LD->getExtensionType() == ISD::EXTLOAD &&
13005       Op.getValueType() == MVT::f64;
13006   return false;
13007 }
13008 
13009 /// Reduces the number of fp-to-int conversion when building a vector.
13010 ///
13011 /// If this vector is built out of floating to integer conversions,
13012 /// transform it to a vector built out of floating point values followed by a
13013 /// single floating to integer conversion of the vector.
13014 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13015 /// becomes (fptosi (build_vector ($A, $B, ...)))
13016 SDValue PPCTargetLowering::
13017 combineElementTruncationToVectorTruncation(SDNode *N,
13018                                            DAGCombinerInfo &DCI) const {
13019   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13020          "Should be called with a BUILD_VECTOR node");
13021 
13022   SelectionDAG &DAG = DCI.DAG;
13023   SDLoc dl(N);
13024 
13025   SDValue FirstInput = N->getOperand(0);
13026   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13027          "The input operand must be an fp-to-int conversion.");
13028 
13029   // This combine happens after legalization so the fp_to_[su]i nodes are
13030   // already converted to PPCSISD nodes.
13031   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13032   if (FirstConversion == PPCISD::FCTIDZ ||
13033       FirstConversion == PPCISD::FCTIDUZ ||
13034       FirstConversion == PPCISD::FCTIWZ ||
13035       FirstConversion == PPCISD::FCTIWUZ) {
13036     bool IsSplat = true;
13037     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13038       FirstConversion == PPCISD::FCTIWUZ;
13039     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13040     SmallVector<SDValue, 4> Ops;
13041     EVT TargetVT = N->getValueType(0);
13042     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13043       SDValue NextOp = N->getOperand(i);
13044       if (NextOp.getOpcode() != PPCISD::MFVSR)
13045         return SDValue();
13046       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13047       if (NextConversion != FirstConversion)
13048         return SDValue();
13049       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13050       // This is not valid if the input was originally double precision. It is
13051       // also not profitable to do unless this is an extending load in which
13052       // case doing this combine will allow us to combine consecutive loads.
13053       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13054         return SDValue();
13055       if (N->getOperand(i) != FirstInput)
13056         IsSplat = false;
13057     }
13058 
13059     // If this is a splat, we leave it as-is since there will be only a single
13060     // fp-to-int conversion followed by a splat of the integer. This is better
13061     // for 32-bit and smaller ints and neutral for 64-bit ints.
13062     if (IsSplat)
13063       return SDValue();
13064 
13065     // Now that we know we have the right type of node, get its operands
13066     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13067       SDValue In = N->getOperand(i).getOperand(0);
13068       if (Is32Bit) {
13069         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13070         // here, we know that all inputs are extending loads so this is safe).
13071         if (In.isUndef())
13072           Ops.push_back(DAG.getUNDEF(SrcVT));
13073         else {
13074           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13075                                       MVT::f32, In.getOperand(0),
13076                                       DAG.getIntPtrConstant(1, dl));
13077           Ops.push_back(Trunc);
13078         }
13079       } else
13080         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13081     }
13082 
13083     unsigned Opcode;
13084     if (FirstConversion == PPCISD::FCTIDZ ||
13085         FirstConversion == PPCISD::FCTIWZ)
13086       Opcode = ISD::FP_TO_SINT;
13087     else
13088       Opcode = ISD::FP_TO_UINT;
13089 
13090     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13091     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13092     return DAG.getNode(Opcode, dl, TargetVT, BV);
13093   }
13094   return SDValue();
13095 }
13096 
13097 /// Reduce the number of loads when building a vector.
13098 ///
13099 /// Building a vector out of multiple loads can be converted to a load
13100 /// of the vector type if the loads are consecutive. If the loads are
13101 /// consecutive but in descending order, a shuffle is added at the end
13102 /// to reorder the vector.
13103 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13104   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13105          "Should be called with a BUILD_VECTOR node");
13106 
13107   SDLoc dl(N);
13108 
13109   // Return early for non byte-sized type, as they can't be consecutive.
13110   if (!N->getValueType(0).getVectorElementType().isByteSized())
13111     return SDValue();
13112 
13113   bool InputsAreConsecutiveLoads = true;
13114   bool InputsAreReverseConsecutive = true;
13115   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13116   SDValue FirstInput = N->getOperand(0);
13117   bool IsRoundOfExtLoad = false;
13118 
13119   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13120       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13121     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13122     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13123   }
13124   // Not a build vector of (possibly fp_rounded) loads.
13125   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13126       N->getNumOperands() == 1)
13127     return SDValue();
13128 
13129   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13130     // If any inputs are fp_round(extload), they all must be.
13131     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13132       return SDValue();
13133 
13134     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13135       N->getOperand(i);
13136     if (NextInput.getOpcode() != ISD::LOAD)
13137       return SDValue();
13138 
13139     SDValue PreviousInput =
13140       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13141     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13142     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13143 
13144     // If any inputs are fp_round(extload), they all must be.
13145     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13146       return SDValue();
13147 
13148     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13149       InputsAreConsecutiveLoads = false;
13150     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13151       InputsAreReverseConsecutive = false;
13152 
13153     // Exit early if the loads are neither consecutive nor reverse consecutive.
13154     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13155       return SDValue();
13156   }
13157 
13158   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13159          "The loads cannot be both consecutive and reverse consecutive.");
13160 
13161   SDValue FirstLoadOp =
13162     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13163   SDValue LastLoadOp =
13164     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13165                        N->getOperand(N->getNumOperands()-1);
13166 
13167   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13168   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13169   if (InputsAreConsecutiveLoads) {
13170     assert(LD1 && "Input needs to be a LoadSDNode.");
13171     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13172                        LD1->getBasePtr(), LD1->getPointerInfo(),
13173                        LD1->getAlignment());
13174   }
13175   if (InputsAreReverseConsecutive) {
13176     assert(LDL && "Input needs to be a LoadSDNode.");
13177     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13178                                LDL->getBasePtr(), LDL->getPointerInfo(),
13179                                LDL->getAlignment());
13180     SmallVector<int, 16> Ops;
13181     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13182       Ops.push_back(i);
13183 
13184     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13185                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13186   }
13187   return SDValue();
13188 }
13189 
13190 // This function adds the required vector_shuffle needed to get
13191 // the elements of the vector extract in the correct position
13192 // as specified by the CorrectElems encoding.
13193 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13194                                       SDValue Input, uint64_t Elems,
13195                                       uint64_t CorrectElems) {
13196   SDLoc dl(N);
13197 
13198   unsigned NumElems = Input.getValueType().getVectorNumElements();
13199   SmallVector<int, 16> ShuffleMask(NumElems, -1);
13200 
13201   // Knowing the element indices being extracted from the original
13202   // vector and the order in which they're being inserted, just put
13203   // them at element indices required for the instruction.
13204   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13205     if (DAG.getDataLayout().isLittleEndian())
13206       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13207     else
13208       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13209     CorrectElems = CorrectElems >> 8;
13210     Elems = Elems >> 8;
13211   }
13212 
13213   SDValue Shuffle =
13214       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13215                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13216 
13217   EVT VT = N->getValueType(0);
13218   SDValue Conv = DAG.getBitcast(VT, Shuffle);
13219 
13220   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13221                                Input.getValueType().getVectorElementType(),
13222                                VT.getVectorNumElements());
13223   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13224                      DAG.getValueType(ExtVT));
13225 }
13226 
13227 // Look for build vector patterns where input operands come from sign
13228 // extended vector_extract elements of specific indices. If the correct indices
13229 // aren't used, add a vector shuffle to fix up the indices and create
13230 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13231 // during instruction selection.
13232 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13233   // This array encodes the indices that the vector sign extend instructions
13234   // extract from when extending from one type to another for both BE and LE.
13235   // The right nibble of each byte corresponds to the LE incides.
13236   // and the left nibble of each byte corresponds to the BE incides.
13237   // For example: 0x3074B8FC  byte->word
13238   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13239   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13240   // For example: 0x000070F8  byte->double word
13241   // For LE: the allowed indices are: 0x0,0x8
13242   // For BE: the allowed indices are: 0x7,0xF
13243   uint64_t TargetElems[] = {
13244       0x3074B8FC, // b->w
13245       0x000070F8, // b->d
13246       0x10325476, // h->w
13247       0x00003074, // h->d
13248       0x00001032, // w->d
13249   };
13250 
13251   uint64_t Elems = 0;
13252   int Index;
13253   SDValue Input;
13254 
13255   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13256     if (!Op)
13257       return false;
13258     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13259         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13260       return false;
13261 
13262     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13263     // of the right width.
13264     SDValue Extract = Op.getOperand(0);
13265     if (Extract.getOpcode() == ISD::ANY_EXTEND)
13266       Extract = Extract.getOperand(0);
13267     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13268       return false;
13269 
13270     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13271     if (!ExtOp)
13272       return false;
13273 
13274     Index = ExtOp->getZExtValue();
13275     if (Input && Input != Extract.getOperand(0))
13276       return false;
13277 
13278     if (!Input)
13279       Input = Extract.getOperand(0);
13280 
13281     Elems = Elems << 8;
13282     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13283     Elems |= Index;
13284 
13285     return true;
13286   };
13287 
13288   // If the build vector operands aren't sign extended vector extracts,
13289   // of the same input vector, then return.
13290   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13291     if (!isSExtOfVecExtract(N->getOperand(i))) {
13292       return SDValue();
13293     }
13294   }
13295 
13296   // If the vector extract indicies are not correct, add the appropriate
13297   // vector_shuffle.
13298   int TgtElemArrayIdx;
13299   int InputSize = Input.getValueType().getScalarSizeInBits();
13300   int OutputSize = N->getValueType(0).getScalarSizeInBits();
13301   if (InputSize + OutputSize == 40)
13302     TgtElemArrayIdx = 0;
13303   else if (InputSize + OutputSize == 72)
13304     TgtElemArrayIdx = 1;
13305   else if (InputSize + OutputSize == 48)
13306     TgtElemArrayIdx = 2;
13307   else if (InputSize + OutputSize == 80)
13308     TgtElemArrayIdx = 3;
13309   else if (InputSize + OutputSize == 96)
13310     TgtElemArrayIdx = 4;
13311   else
13312     return SDValue();
13313 
13314   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13315   CorrectElems = DAG.getDataLayout().isLittleEndian()
13316                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13317                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13318   if (Elems != CorrectElems) {
13319     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13320   }
13321 
13322   // Regular lowering will catch cases where a shuffle is not needed.
13323   return SDValue();
13324 }
13325 
13326 // Look for the pattern of a load from a narrow width to i128, feeding
13327 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
13328 // (LXVRZX). This node represents a zero extending load that will be matched
13329 // to the Load VSX Vector Rightmost instructions.
13330 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
13331   SDLoc DL(N);
13332 
13333   // This combine is only eligible for a BUILD_VECTOR of v1i128.
13334   if (N->getValueType(0) != MVT::v1i128)
13335     return SDValue();
13336 
13337   SDValue Operand = N->getOperand(0);
13338   // Proceed with the transformation if the operand to the BUILD_VECTOR
13339   // is a load instruction.
13340   if (Operand.getOpcode() != ISD::LOAD)
13341     return SDValue();
13342 
13343   LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand);
13344   EVT MemoryType = LD->getMemoryVT();
13345 
13346   // This transformation is only valid if the we are loading either a byte,
13347   // halfword, word, or doubleword.
13348   bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
13349                      MemoryType == MVT::i32 || MemoryType == MVT::i64;
13350 
13351   // Ensure that the load from the narrow width is being zero extended to i128.
13352   if (!ValidLDType ||
13353       (LD->getExtensionType() != ISD::ZEXTLOAD &&
13354        LD->getExtensionType() != ISD::EXTLOAD))
13355     return SDValue();
13356 
13357   SDValue LoadOps[] = {
13358       LD->getChain(), LD->getBasePtr(),
13359       DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
13360 
13361   return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
13362                                  DAG.getVTList(MVT::v1i128, MVT::Other),
13363                                  LoadOps, MemoryType, LD->getMemOperand());
13364 }
13365 
13366 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13367                                                  DAGCombinerInfo &DCI) const {
13368   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13369          "Should be called with a BUILD_VECTOR node");
13370 
13371   SelectionDAG &DAG = DCI.DAG;
13372   SDLoc dl(N);
13373 
13374   if (!Subtarget.hasVSX())
13375     return SDValue();
13376 
13377   // The target independent DAG combiner will leave a build_vector of
13378   // float-to-int conversions intact. We can generate MUCH better code for
13379   // a float-to-int conversion of a vector of floats.
13380   SDValue FirstInput = N->getOperand(0);
13381   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13382     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13383     if (Reduced)
13384       return Reduced;
13385   }
13386 
13387   // If we're building a vector out of consecutive loads, just load that
13388   // vector type.
13389   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13390   if (Reduced)
13391     return Reduced;
13392 
13393   // If we're building a vector out of extended elements from another vector
13394   // we have P9 vector integer extend instructions. The code assumes legal
13395   // input types (i.e. it can't handle things like v4i16) so do not run before
13396   // legalization.
13397   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13398     Reduced = combineBVOfVecSExt(N, DAG);
13399     if (Reduced)
13400       return Reduced;
13401   }
13402 
13403   // On Power10, the Load VSX Vector Rightmost instructions can be utilized
13404   // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
13405   // is a load from <valid narrow width> to i128.
13406   if (Subtarget.isISA3_1()) {
13407     SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
13408     if (BVOfZLoad)
13409       return BVOfZLoad;
13410   }
13411 
13412   if (N->getValueType(0) != MVT::v2f64)
13413     return SDValue();
13414 
13415   // Looking for:
13416   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13417   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13418       FirstInput.getOpcode() != ISD::UINT_TO_FP)
13419     return SDValue();
13420   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13421       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13422     return SDValue();
13423   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13424     return SDValue();
13425 
13426   SDValue Ext1 = FirstInput.getOperand(0);
13427   SDValue Ext2 = N->getOperand(1).getOperand(0);
13428   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13429      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13430     return SDValue();
13431 
13432   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13433   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13434   if (!Ext1Op || !Ext2Op)
13435     return SDValue();
13436   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13437       Ext1.getOperand(0) != Ext2.getOperand(0))
13438     return SDValue();
13439 
13440   int FirstElem = Ext1Op->getZExtValue();
13441   int SecondElem = Ext2Op->getZExtValue();
13442   int SubvecIdx;
13443   if (FirstElem == 0 && SecondElem == 1)
13444     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13445   else if (FirstElem == 2 && SecondElem == 3)
13446     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13447   else
13448     return SDValue();
13449 
13450   SDValue SrcVec = Ext1.getOperand(0);
13451   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13452     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13453   return DAG.getNode(NodeType, dl, MVT::v2f64,
13454                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13455 }
13456 
13457 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13458                                               DAGCombinerInfo &DCI) const {
13459   assert((N->getOpcode() == ISD::SINT_TO_FP ||
13460           N->getOpcode() == ISD::UINT_TO_FP) &&
13461          "Need an int -> FP conversion node here");
13462 
13463   if (useSoftFloat() || !Subtarget.has64BitSupport())
13464     return SDValue();
13465 
13466   SelectionDAG &DAG = DCI.DAG;
13467   SDLoc dl(N);
13468   SDValue Op(N, 0);
13469 
13470   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13471   // from the hardware.
13472   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13473     return SDValue();
13474   if (!Op.getOperand(0).getValueType().isSimple())
13475     return SDValue();
13476   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13477       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13478     return SDValue();
13479 
13480   SDValue FirstOperand(Op.getOperand(0));
13481   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13482     (FirstOperand.getValueType() == MVT::i8 ||
13483      FirstOperand.getValueType() == MVT::i16);
13484   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
13485     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
13486     bool DstDouble = Op.getValueType() == MVT::f64;
13487     unsigned ConvOp = Signed ?
13488       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
13489       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
13490     SDValue WidthConst =
13491       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
13492                             dl, false);
13493     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
13494     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
13495     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
13496                                          DAG.getVTList(MVT::f64, MVT::Other),
13497                                          Ops, MVT::i8, LDN->getMemOperand());
13498 
13499     // For signed conversion, we need to sign-extend the value in the VSR
13500     if (Signed) {
13501       SDValue ExtOps[] = { Ld, WidthConst };
13502       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
13503       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
13504     } else
13505       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
13506   }
13507 
13508 
13509   // For i32 intermediate values, unfortunately, the conversion functions
13510   // leave the upper 32 bits of the value are undefined. Within the set of
13511   // scalar instructions, we have no method for zero- or sign-extending the
13512   // value. Thus, we cannot handle i32 intermediate values here.
13513   if (Op.getOperand(0).getValueType() == MVT::i32)
13514     return SDValue();
13515 
13516   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
13517          "UINT_TO_FP is supported only with FPCVT");
13518 
13519   // If we have FCFIDS, then use it when converting to single-precision.
13520   // Otherwise, convert to double-precision and then round.
13521   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13522                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
13523                                                             : PPCISD::FCFIDS)
13524                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
13525                                                             : PPCISD::FCFID);
13526   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13527                   ? MVT::f32
13528                   : MVT::f64;
13529 
13530   // If we're converting from a float, to an int, and back to a float again,
13531   // then we don't need the store/load pair at all.
13532   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
13533        Subtarget.hasFPCVT()) ||
13534       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
13535     SDValue Src = Op.getOperand(0).getOperand(0);
13536     if (Src.getValueType() == MVT::f32) {
13537       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
13538       DCI.AddToWorklist(Src.getNode());
13539     } else if (Src.getValueType() != MVT::f64) {
13540       // Make sure that we don't pick up a ppc_fp128 source value.
13541       return SDValue();
13542     }
13543 
13544     unsigned FCTOp =
13545       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
13546                                                         PPCISD::FCTIDUZ;
13547 
13548     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
13549     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
13550 
13551     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
13552       FP = DAG.getNode(ISD::FP_ROUND, dl,
13553                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
13554       DCI.AddToWorklist(FP.getNode());
13555     }
13556 
13557     return FP;
13558   }
13559 
13560   return SDValue();
13561 }
13562 
13563 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
13564 // builtins) into loads with swaps.
13565 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
13566                                               DAGCombinerInfo &DCI) const {
13567   SelectionDAG &DAG = DCI.DAG;
13568   SDLoc dl(N);
13569   SDValue Chain;
13570   SDValue Base;
13571   MachineMemOperand *MMO;
13572 
13573   switch (N->getOpcode()) {
13574   default:
13575     llvm_unreachable("Unexpected opcode for little endian VSX load");
13576   case ISD::LOAD: {
13577     LoadSDNode *LD = cast<LoadSDNode>(N);
13578     Chain = LD->getChain();
13579     Base = LD->getBasePtr();
13580     MMO = LD->getMemOperand();
13581     // If the MMO suggests this isn't a load of a full vector, leave
13582     // things alone.  For a built-in, we have to make the change for
13583     // correctness, so if there is a size problem that will be a bug.
13584     if (MMO->getSize() < 16)
13585       return SDValue();
13586     break;
13587   }
13588   case ISD::INTRINSIC_W_CHAIN: {
13589     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13590     Chain = Intrin->getChain();
13591     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
13592     // us what we want. Get operand 2 instead.
13593     Base = Intrin->getOperand(2);
13594     MMO = Intrin->getMemOperand();
13595     break;
13596   }
13597   }
13598 
13599   MVT VecTy = N->getValueType(0).getSimpleVT();
13600 
13601   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
13602   // aligned and the type is a vector with elements up to 4 bytes
13603   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13604       VecTy.getScalarSizeInBits() <= 32) {
13605     return SDValue();
13606   }
13607 
13608   SDValue LoadOps[] = { Chain, Base };
13609   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
13610                                          DAG.getVTList(MVT::v2f64, MVT::Other),
13611                                          LoadOps, MVT::v2f64, MMO);
13612 
13613   DCI.AddToWorklist(Load.getNode());
13614   Chain = Load.getValue(1);
13615   SDValue Swap = DAG.getNode(
13616       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
13617   DCI.AddToWorklist(Swap.getNode());
13618 
13619   // Add a bitcast if the resulting load type doesn't match v2f64.
13620   if (VecTy != MVT::v2f64) {
13621     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
13622     DCI.AddToWorklist(N.getNode());
13623     // Package {bitcast value, swap's chain} to match Load's shape.
13624     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
13625                        N, Swap.getValue(1));
13626   }
13627 
13628   return Swap;
13629 }
13630 
13631 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
13632 // builtins) into stores with swaps.
13633 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
13634                                                DAGCombinerInfo &DCI) const {
13635   SelectionDAG &DAG = DCI.DAG;
13636   SDLoc dl(N);
13637   SDValue Chain;
13638   SDValue Base;
13639   unsigned SrcOpnd;
13640   MachineMemOperand *MMO;
13641 
13642   switch (N->getOpcode()) {
13643   default:
13644     llvm_unreachable("Unexpected opcode for little endian VSX store");
13645   case ISD::STORE: {
13646     StoreSDNode *ST = cast<StoreSDNode>(N);
13647     Chain = ST->getChain();
13648     Base = ST->getBasePtr();
13649     MMO = ST->getMemOperand();
13650     SrcOpnd = 1;
13651     // If the MMO suggests this isn't a store of a full vector, leave
13652     // things alone.  For a built-in, we have to make the change for
13653     // correctness, so if there is a size problem that will be a bug.
13654     if (MMO->getSize() < 16)
13655       return SDValue();
13656     break;
13657   }
13658   case ISD::INTRINSIC_VOID: {
13659     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13660     Chain = Intrin->getChain();
13661     // Intrin->getBasePtr() oddly does not get what we want.
13662     Base = Intrin->getOperand(3);
13663     MMO = Intrin->getMemOperand();
13664     SrcOpnd = 2;
13665     break;
13666   }
13667   }
13668 
13669   SDValue Src = N->getOperand(SrcOpnd);
13670   MVT VecTy = Src.getValueType().getSimpleVT();
13671 
13672   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
13673   // aligned and the type is a vector with elements up to 4 bytes
13674   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13675       VecTy.getScalarSizeInBits() <= 32) {
13676     return SDValue();
13677   }
13678 
13679   // All stores are done as v2f64 and possible bit cast.
13680   if (VecTy != MVT::v2f64) {
13681     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
13682     DCI.AddToWorklist(Src.getNode());
13683   }
13684 
13685   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
13686                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
13687   DCI.AddToWorklist(Swap.getNode());
13688   Chain = Swap.getValue(1);
13689   SDValue StoreOps[] = { Chain, Swap, Base };
13690   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
13691                                           DAG.getVTList(MVT::Other),
13692                                           StoreOps, VecTy, MMO);
13693   DCI.AddToWorklist(Store.getNode());
13694   return Store;
13695 }
13696 
13697 // Handle DAG combine for STORE (FP_TO_INT F).
13698 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
13699                                                DAGCombinerInfo &DCI) const {
13700 
13701   SelectionDAG &DAG = DCI.DAG;
13702   SDLoc dl(N);
13703   unsigned Opcode = N->getOperand(1).getOpcode();
13704 
13705   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
13706          && "Not a FP_TO_INT Instruction!");
13707 
13708   SDValue Val = N->getOperand(1).getOperand(0);
13709   EVT Op1VT = N->getOperand(1).getValueType();
13710   EVT ResVT = Val.getValueType();
13711 
13712   if (!isTypeLegal(ResVT))
13713     return SDValue();
13714 
13715   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
13716   bool ValidTypeForStoreFltAsInt =
13717         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
13718          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
13719 
13720   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
13721       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
13722     return SDValue();
13723 
13724   // Extend f32 values to f64
13725   if (ResVT.getScalarSizeInBits() == 32) {
13726     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
13727     DCI.AddToWorklist(Val.getNode());
13728   }
13729 
13730   // Set signed or unsigned conversion opcode.
13731   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
13732                           PPCISD::FP_TO_SINT_IN_VSR :
13733                           PPCISD::FP_TO_UINT_IN_VSR;
13734 
13735   Val = DAG.getNode(ConvOpcode,
13736                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
13737   DCI.AddToWorklist(Val.getNode());
13738 
13739   // Set number of bytes being converted.
13740   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
13741   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
13742                     DAG.getIntPtrConstant(ByteSize, dl, false),
13743                     DAG.getValueType(Op1VT) };
13744 
13745   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
13746           DAG.getVTList(MVT::Other), Ops,
13747           cast<StoreSDNode>(N)->getMemoryVT(),
13748           cast<StoreSDNode>(N)->getMemOperand());
13749 
13750   DCI.AddToWorklist(Val.getNode());
13751   return Val;
13752 }
13753 
13754 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
13755   // Check that the source of the element keeps flipping
13756   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
13757   bool PrevElemFromFirstVec = Mask[0] < NumElts;
13758   for (int i = 1, e = Mask.size(); i < e; i++) {
13759     if (PrevElemFromFirstVec && Mask[i] < NumElts)
13760       return false;
13761     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
13762       return false;
13763     PrevElemFromFirstVec = !PrevElemFromFirstVec;
13764   }
13765   return true;
13766 }
13767 
13768 static bool isSplatBV(SDValue Op) {
13769   if (Op.getOpcode() != ISD::BUILD_VECTOR)
13770     return false;
13771   SDValue FirstOp;
13772 
13773   // Find first non-undef input.
13774   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
13775     FirstOp = Op.getOperand(i);
13776     if (!FirstOp.isUndef())
13777       break;
13778   }
13779 
13780   // All inputs are undef or the same as the first non-undef input.
13781   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
13782     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
13783       return false;
13784   return true;
13785 }
13786 
13787 static SDValue isScalarToVec(SDValue Op) {
13788   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
13789     return Op;
13790   if (Op.getOpcode() != ISD::BITCAST)
13791     return SDValue();
13792   Op = Op.getOperand(0);
13793   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
13794     return Op;
13795   return SDValue();
13796 }
13797 
13798 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
13799                                             int LHSMaxIdx, int RHSMinIdx,
13800                                             int RHSMaxIdx, int HalfVec) {
13801   for (int i = 0, e = ShuffV.size(); i < e; i++) {
13802     int Idx = ShuffV[i];
13803     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
13804       ShuffV[i] += HalfVec;
13805   }
13806   return;
13807 }
13808 
13809 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
13810 // the original is:
13811 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
13812 // In such a case, just change the shuffle mask to extract the element
13813 // from the permuted index.
13814 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
13815   SDLoc dl(OrigSToV);
13816   EVT VT = OrigSToV.getValueType();
13817   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
13818          "Expecting a SCALAR_TO_VECTOR here");
13819   SDValue Input = OrigSToV.getOperand(0);
13820 
13821   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
13822     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
13823     SDValue OrigVector = Input.getOperand(0);
13824 
13825     // Can't handle non-const element indices or different vector types
13826     // for the input to the extract and the output of the scalar_to_vector.
13827     if (Idx && VT == OrigVector.getValueType()) {
13828       SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
13829       NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
13830       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
13831     }
13832   }
13833   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
13834                      OrigSToV.getOperand(0));
13835 }
13836 
13837 // On little endian subtargets, combine shuffles such as:
13838 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
13839 // into:
13840 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
13841 // because the latter can be matched to a single instruction merge.
13842 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
13843 // to put the value into element zero. Adjust the shuffle mask so that the
13844 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
13845 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
13846                                                 SelectionDAG &DAG) const {
13847   SDValue LHS = SVN->getOperand(0);
13848   SDValue RHS = SVN->getOperand(1);
13849   auto Mask = SVN->getMask();
13850   int NumElts = LHS.getValueType().getVectorNumElements();
13851   SDValue Res(SVN, 0);
13852   SDLoc dl(SVN);
13853 
13854   // None of these combines are useful on big endian systems since the ISA
13855   // already has a big endian bias.
13856   if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
13857     return Res;
13858 
13859   // If this is not a shuffle of a shuffle and the first element comes from
13860   // the second vector, canonicalize to the commuted form. This will make it
13861   // more likely to match one of the single instruction patterns.
13862   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
13863       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
13864     std::swap(LHS, RHS);
13865     Res = DAG.getCommutedVectorShuffle(*SVN);
13866     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
13867   }
13868 
13869   // Adjust the shuffle mask if either input vector comes from a
13870   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
13871   // form (to prevent the need for a swap).
13872   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
13873   SDValue SToVLHS = isScalarToVec(LHS);
13874   SDValue SToVRHS = isScalarToVec(RHS);
13875   if (SToVLHS || SToVRHS) {
13876     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
13877                             : SToVRHS.getValueType().getVectorNumElements();
13878     int NumEltsOut = ShuffV.size();
13879 
13880     // Initially assume that neither input is permuted. These will be adjusted
13881     // accordingly if either input is.
13882     int LHSMaxIdx = -1;
13883     int RHSMinIdx = -1;
13884     int RHSMaxIdx = -1;
13885     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
13886 
13887     // Get the permuted scalar to vector nodes for the source(s) that come from
13888     // ISD::SCALAR_TO_VECTOR.
13889     if (SToVLHS) {
13890       // Set up the values for the shuffle vector fixup.
13891       LHSMaxIdx = NumEltsOut / NumEltsIn;
13892       SToVLHS = getSToVPermuted(SToVLHS, DAG);
13893       if (SToVLHS.getValueType() != LHS.getValueType())
13894         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
13895       LHS = SToVLHS;
13896     }
13897     if (SToVRHS) {
13898       RHSMinIdx = NumEltsOut;
13899       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
13900       SToVRHS = getSToVPermuted(SToVRHS, DAG);
13901       if (SToVRHS.getValueType() != RHS.getValueType())
13902         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
13903       RHS = SToVRHS;
13904     }
13905 
13906     // Fix up the shuffle mask to reflect where the desired element actually is.
13907     // The minimum and maximum indices that correspond to element zero for both
13908     // the LHS and RHS are computed and will control which shuffle mask entries
13909     // are to be changed. For example, if the RHS is permuted, any shuffle mask
13910     // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
13911     // HalfVec to refer to the corresponding element in the permuted vector.
13912     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
13913                                     HalfVec);
13914     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
13915 
13916     // We may have simplified away the shuffle. We won't be able to do anything
13917     // further with it here.
13918     if (!isa<ShuffleVectorSDNode>(Res))
13919       return Res;
13920     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
13921   }
13922 
13923   // The common case after we commuted the shuffle is that the RHS is a splat
13924   // and we have elements coming in from the splat at indices that are not
13925   // conducive to using a merge.
13926   // Example:
13927   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
13928   if (!isSplatBV(RHS))
13929     return Res;
13930 
13931   // We are looking for a mask such that all even elements are from
13932   // one vector and all odd elements from the other.
13933   if (!isAlternatingShuffMask(Mask, NumElts))
13934     return Res;
13935 
13936   // Adjust the mask so we are pulling in the same index from the splat
13937   // as the index from the interesting vector in consecutive elements.
13938   // Example (even elements from first vector):
13939   // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
13940   if (Mask[0] < NumElts)
13941     for (int i = 1, e = Mask.size(); i < e; i += 2)
13942       ShuffV[i] = (ShuffV[i - 1] + NumElts);
13943   // Example (odd elements from first vector):
13944   // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
13945   else
13946     for (int i = 0, e = Mask.size(); i < e; i += 2)
13947       ShuffV[i] = (ShuffV[i + 1] + NumElts);
13948 
13949   // If the RHS has undefs, we need to remove them since we may have created
13950   // a shuffle that adds those instead of the splat value.
13951   SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
13952   RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
13953 
13954   Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
13955   return Res;
13956 }
13957 
13958 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
13959                                                 LSBaseSDNode *LSBase,
13960                                                 DAGCombinerInfo &DCI) const {
13961   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
13962         "Not a reverse memop pattern!");
13963 
13964   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
13965     auto Mask = SVN->getMask();
13966     int i = 0;
13967     auto I = Mask.rbegin();
13968     auto E = Mask.rend();
13969 
13970     for (; I != E; ++I) {
13971       if (*I != i)
13972         return false;
13973       i++;
13974     }
13975     return true;
13976   };
13977 
13978   SelectionDAG &DAG = DCI.DAG;
13979   EVT VT = SVN->getValueType(0);
13980 
13981   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
13982     return SDValue();
13983 
13984   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
13985   // See comment in PPCVSXSwapRemoval.cpp.
13986   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
13987   if (!Subtarget.hasP9Vector())
13988     return SDValue();
13989 
13990   if(!IsElementReverse(SVN))
13991     return SDValue();
13992 
13993   if (LSBase->getOpcode() == ISD::LOAD) {
13994     SDLoc dl(SVN);
13995     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
13996     return DAG.getMemIntrinsicNode(
13997         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
13998         LSBase->getMemoryVT(), LSBase->getMemOperand());
13999   }
14000 
14001   if (LSBase->getOpcode() == ISD::STORE) {
14002     SDLoc dl(LSBase);
14003     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14004                           LSBase->getBasePtr()};
14005     return DAG.getMemIntrinsicNode(
14006         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14007         LSBase->getMemoryVT(), LSBase->getMemOperand());
14008   }
14009 
14010   llvm_unreachable("Expected a load or store node here");
14011 }
14012 
14013 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14014                                              DAGCombinerInfo &DCI) const {
14015   SelectionDAG &DAG = DCI.DAG;
14016   SDLoc dl(N);
14017   switch (N->getOpcode()) {
14018   default: break;
14019   case ISD::ADD:
14020     return combineADD(N, DCI);
14021   case ISD::SHL:
14022     return combineSHL(N, DCI);
14023   case ISD::SRA:
14024     return combineSRA(N, DCI);
14025   case ISD::SRL:
14026     return combineSRL(N, DCI);
14027   case ISD::MUL:
14028     return combineMUL(N, DCI);
14029   case ISD::FMA:
14030   case PPCISD::FNMSUB:
14031     return combineFMALike(N, DCI);
14032   case PPCISD::SHL:
14033     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14034         return N->getOperand(0);
14035     break;
14036   case PPCISD::SRL:
14037     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14038         return N->getOperand(0);
14039     break;
14040   case PPCISD::SRA:
14041     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14042       if (C->isNullValue() ||   //  0 >>s V -> 0.
14043           C->isAllOnesValue())    // -1 >>s V -> -1.
14044         return N->getOperand(0);
14045     }
14046     break;
14047   case ISD::SIGN_EXTEND:
14048   case ISD::ZERO_EXTEND:
14049   case ISD::ANY_EXTEND:
14050     return DAGCombineExtBoolTrunc(N, DCI);
14051   case ISD::TRUNCATE:
14052     return combineTRUNCATE(N, DCI);
14053   case ISD::SETCC:
14054     if (SDValue CSCC = combineSetCC(N, DCI))
14055       return CSCC;
14056     LLVM_FALLTHROUGH;
14057   case ISD::SELECT_CC:
14058     return DAGCombineTruncBoolExt(N, DCI);
14059   case ISD::SINT_TO_FP:
14060   case ISD::UINT_TO_FP:
14061     return combineFPToIntToFP(N, DCI);
14062   case ISD::VECTOR_SHUFFLE:
14063     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14064       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14065       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14066     }
14067     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14068   case ISD::STORE: {
14069 
14070     EVT Op1VT = N->getOperand(1).getValueType();
14071     unsigned Opcode = N->getOperand(1).getOpcode();
14072 
14073     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14074       SDValue Val= combineStoreFPToInt(N, DCI);
14075       if (Val)
14076         return Val;
14077     }
14078 
14079     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14080       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14081       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14082       if (Val)
14083         return Val;
14084     }
14085 
14086     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14087     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14088         N->getOperand(1).getNode()->hasOneUse() &&
14089         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14090          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14091 
14092       // STBRX can only handle simple types and it makes no sense to store less
14093       // two bytes in byte-reversed order.
14094       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14095       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14096         break;
14097 
14098       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14099       // Do an any-extend to 32-bits if this is a half-word input.
14100       if (BSwapOp.getValueType() == MVT::i16)
14101         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14102 
14103       // If the type of BSWAP operand is wider than stored memory width
14104       // it need to be shifted to the right side before STBRX.
14105       if (Op1VT.bitsGT(mVT)) {
14106         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14107         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14108                               DAG.getConstant(Shift, dl, MVT::i32));
14109         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14110         if (Op1VT == MVT::i64)
14111           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14112       }
14113 
14114       SDValue Ops[] = {
14115         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14116       };
14117       return
14118         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14119                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14120                                 cast<StoreSDNode>(N)->getMemOperand());
14121     }
14122 
14123     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14124     // So it can increase the chance of CSE constant construction.
14125     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14126         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14127       // Need to sign-extended to 64-bits to handle negative values.
14128       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14129       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14130                                     MemVT.getSizeInBits());
14131       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14132 
14133       // DAG.getTruncStore() can't be used here because it doesn't accept
14134       // the general (base + offset) addressing mode.
14135       // So we use UpdateNodeOperands and setTruncatingStore instead.
14136       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14137                              N->getOperand(3));
14138       cast<StoreSDNode>(N)->setTruncatingStore(true);
14139       return SDValue(N, 0);
14140     }
14141 
14142     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14143     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14144     if (Op1VT.isSimple()) {
14145       MVT StoreVT = Op1VT.getSimpleVT();
14146       if (Subtarget.needsSwapsForVSXMemOps() &&
14147           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14148            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14149         return expandVSXStoreForLE(N, DCI);
14150     }
14151     break;
14152   }
14153   case ISD::LOAD: {
14154     LoadSDNode *LD = cast<LoadSDNode>(N);
14155     EVT VT = LD->getValueType(0);
14156 
14157     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14158     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14159     if (VT.isSimple()) {
14160       MVT LoadVT = VT.getSimpleVT();
14161       if (Subtarget.needsSwapsForVSXMemOps() &&
14162           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14163            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14164         return expandVSXLoadForLE(N, DCI);
14165     }
14166 
14167     // We sometimes end up with a 64-bit integer load, from which we extract
14168     // two single-precision floating-point numbers. This happens with
14169     // std::complex<float>, and other similar structures, because of the way we
14170     // canonicalize structure copies. However, if we lack direct moves,
14171     // then the final bitcasts from the extracted integer values to the
14172     // floating-point numbers turn into store/load pairs. Even with direct moves,
14173     // just loading the two floating-point numbers is likely better.
14174     auto ReplaceTwoFloatLoad = [&]() {
14175       if (VT != MVT::i64)
14176         return false;
14177 
14178       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14179           LD->isVolatile())
14180         return false;
14181 
14182       //  We're looking for a sequence like this:
14183       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14184       //      t16: i64 = srl t13, Constant:i32<32>
14185       //    t17: i32 = truncate t16
14186       //  t18: f32 = bitcast t17
14187       //    t19: i32 = truncate t13
14188       //  t20: f32 = bitcast t19
14189 
14190       if (!LD->hasNUsesOfValue(2, 0))
14191         return false;
14192 
14193       auto UI = LD->use_begin();
14194       while (UI.getUse().getResNo() != 0) ++UI;
14195       SDNode *Trunc = *UI++;
14196       while (UI.getUse().getResNo() != 0) ++UI;
14197       SDNode *RightShift = *UI;
14198       if (Trunc->getOpcode() != ISD::TRUNCATE)
14199         std::swap(Trunc, RightShift);
14200 
14201       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14202           Trunc->getValueType(0) != MVT::i32 ||
14203           !Trunc->hasOneUse())
14204         return false;
14205       if (RightShift->getOpcode() != ISD::SRL ||
14206           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14207           RightShift->getConstantOperandVal(1) != 32 ||
14208           !RightShift->hasOneUse())
14209         return false;
14210 
14211       SDNode *Trunc2 = *RightShift->use_begin();
14212       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14213           Trunc2->getValueType(0) != MVT::i32 ||
14214           !Trunc2->hasOneUse())
14215         return false;
14216 
14217       SDNode *Bitcast = *Trunc->use_begin();
14218       SDNode *Bitcast2 = *Trunc2->use_begin();
14219 
14220       if (Bitcast->getOpcode() != ISD::BITCAST ||
14221           Bitcast->getValueType(0) != MVT::f32)
14222         return false;
14223       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14224           Bitcast2->getValueType(0) != MVT::f32)
14225         return false;
14226 
14227       if (Subtarget.isLittleEndian())
14228         std::swap(Bitcast, Bitcast2);
14229 
14230       // Bitcast has the second float (in memory-layout order) and Bitcast2
14231       // has the first one.
14232 
14233       SDValue BasePtr = LD->getBasePtr();
14234       if (LD->isIndexed()) {
14235         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14236                "Non-pre-inc AM on PPC?");
14237         BasePtr =
14238           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14239                       LD->getOffset());
14240       }
14241 
14242       auto MMOFlags =
14243           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14244       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14245                                       LD->getPointerInfo(), LD->getAlignment(),
14246                                       MMOFlags, LD->getAAInfo());
14247       SDValue AddPtr =
14248         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14249                     BasePtr, DAG.getIntPtrConstant(4, dl));
14250       SDValue FloatLoad2 = DAG.getLoad(
14251           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14252           LD->getPointerInfo().getWithOffset(4),
14253           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14254 
14255       if (LD->isIndexed()) {
14256         // Note that DAGCombine should re-form any pre-increment load(s) from
14257         // what is produced here if that makes sense.
14258         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14259       }
14260 
14261       DCI.CombineTo(Bitcast2, FloatLoad);
14262       DCI.CombineTo(Bitcast, FloatLoad2);
14263 
14264       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14265                                     SDValue(FloatLoad2.getNode(), 1));
14266       return true;
14267     };
14268 
14269     if (ReplaceTwoFloatLoad())
14270       return SDValue(N, 0);
14271 
14272     EVT MemVT = LD->getMemoryVT();
14273     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
14274     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
14275     if (LD->isUnindexed() && VT.isVector() &&
14276         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
14277           // P8 and later hardware should just use LOAD.
14278           !Subtarget.hasP8Vector() &&
14279           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
14280            VT == MVT::v4f32))) &&
14281         LD->getAlign() < ABIAlignment) {
14282       // This is a type-legal unaligned Altivec load.
14283       SDValue Chain = LD->getChain();
14284       SDValue Ptr = LD->getBasePtr();
14285       bool isLittleEndian = Subtarget.isLittleEndian();
14286 
14287       // This implements the loading of unaligned vectors as described in
14288       // the venerable Apple Velocity Engine overview. Specifically:
14289       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
14290       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
14291       //
14292       // The general idea is to expand a sequence of one or more unaligned
14293       // loads into an alignment-based permutation-control instruction (lvsl
14294       // or lvsr), a series of regular vector loads (which always truncate
14295       // their input address to an aligned address), and a series of
14296       // permutations.  The results of these permutations are the requested
14297       // loaded values.  The trick is that the last "extra" load is not taken
14298       // from the address you might suspect (sizeof(vector) bytes after the
14299       // last requested load), but rather sizeof(vector) - 1 bytes after the
14300       // last requested vector. The point of this is to avoid a page fault if
14301       // the base address happened to be aligned. This works because if the
14302       // base address is aligned, then adding less than a full vector length
14303       // will cause the last vector in the sequence to be (re)loaded.
14304       // Otherwise, the next vector will be fetched as you might suspect was
14305       // necessary.
14306 
14307       // We might be able to reuse the permutation generation from
14308       // a different base address offset from this one by an aligned amount.
14309       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14310       // optimization later.
14311       Intrinsic::ID Intr, IntrLD, IntrPerm;
14312       MVT PermCntlTy, PermTy, LDTy;
14313       Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14314                             : Intrinsic::ppc_altivec_lvsl;
14315       IntrLD = Intrinsic::ppc_altivec_lvx;
14316       IntrPerm = Intrinsic::ppc_altivec_vperm;
14317       PermCntlTy = MVT::v16i8;
14318       PermTy = MVT::v4i32;
14319       LDTy = MVT::v4i32;
14320 
14321       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14322 
14323       // Create the new MMO for the new base load. It is like the original MMO,
14324       // but represents an area in memory almost twice the vector size centered
14325       // on the original address. If the address is unaligned, we might start
14326       // reading up to (sizeof(vector)-1) bytes below the address of the
14327       // original unaligned load.
14328       MachineFunction &MF = DAG.getMachineFunction();
14329       MachineMemOperand *BaseMMO =
14330         MF.getMachineMemOperand(LD->getMemOperand(),
14331                                 -(long)MemVT.getStoreSize()+1,
14332                                 2*MemVT.getStoreSize()-1);
14333 
14334       // Create the new base load.
14335       SDValue LDXIntID =
14336           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14337       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14338       SDValue BaseLoad =
14339         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14340                                 DAG.getVTList(PermTy, MVT::Other),
14341                                 BaseLoadOps, LDTy, BaseMMO);
14342 
14343       // Note that the value of IncOffset (which is provided to the next
14344       // load's pointer info offset value, and thus used to calculate the
14345       // alignment), and the value of IncValue (which is actually used to
14346       // increment the pointer value) are different! This is because we
14347       // require the next load to appear to be aligned, even though it
14348       // is actually offset from the base pointer by a lesser amount.
14349       int IncOffset = VT.getSizeInBits() / 8;
14350       int IncValue = IncOffset;
14351 
14352       // Walk (both up and down) the chain looking for another load at the real
14353       // (aligned) offset (the alignment of the other load does not matter in
14354       // this case). If found, then do not use the offset reduction trick, as
14355       // that will prevent the loads from being later combined (as they would
14356       // otherwise be duplicates).
14357       if (!findConsecutiveLoad(LD, DAG))
14358         --IncValue;
14359 
14360       SDValue Increment =
14361           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14362       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14363 
14364       MachineMemOperand *ExtraMMO =
14365         MF.getMachineMemOperand(LD->getMemOperand(),
14366                                 1, 2*MemVT.getStoreSize()-1);
14367       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14368       SDValue ExtraLoad =
14369         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14370                                 DAG.getVTList(PermTy, MVT::Other),
14371                                 ExtraLoadOps, LDTy, ExtraMMO);
14372 
14373       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14374         BaseLoad.getValue(1), ExtraLoad.getValue(1));
14375 
14376       // Because vperm has a big-endian bias, we must reverse the order
14377       // of the input vectors and complement the permute control vector
14378       // when generating little endian code.  We have already handled the
14379       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14380       // and ExtraLoad here.
14381       SDValue Perm;
14382       if (isLittleEndian)
14383         Perm = BuildIntrinsicOp(IntrPerm,
14384                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14385       else
14386         Perm = BuildIntrinsicOp(IntrPerm,
14387                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14388 
14389       if (VT != PermTy)
14390         Perm = Subtarget.hasAltivec()
14391                    ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
14392                    : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
14393                                  DAG.getTargetConstant(1, dl, MVT::i64));
14394                                // second argument is 1 because this rounding
14395                                // is always exact.
14396 
14397       // The output of the permutation is our loaded result, the TokenFactor is
14398       // our new chain.
14399       DCI.CombineTo(N, Perm, TF);
14400       return SDValue(N, 0);
14401     }
14402     }
14403     break;
14404     case ISD::INTRINSIC_WO_CHAIN: {
14405       bool isLittleEndian = Subtarget.isLittleEndian();
14406       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14407       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14408                                            : Intrinsic::ppc_altivec_lvsl);
14409       if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
14410         SDValue Add = N->getOperand(1);
14411 
14412         int Bits = 4 /* 16 byte alignment */;
14413 
14414         if (DAG.MaskedValueIsZero(Add->getOperand(1),
14415                                   APInt::getAllOnesValue(Bits /* alignment */)
14416                                       .zext(Add.getScalarValueSizeInBits()))) {
14417           SDNode *BasePtr = Add->getOperand(0).getNode();
14418           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14419                                     UE = BasePtr->use_end();
14420                UI != UE; ++UI) {
14421             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14422                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
14423                     IID) {
14424               // We've found another LVSL/LVSR, and this address is an aligned
14425               // multiple of that one. The results will be the same, so use the
14426               // one we've just found instead.
14427 
14428               return SDValue(*UI, 0);
14429             }
14430           }
14431         }
14432 
14433         if (isa<ConstantSDNode>(Add->getOperand(1))) {
14434           SDNode *BasePtr = Add->getOperand(0).getNode();
14435           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14436                UE = BasePtr->use_end(); UI != UE; ++UI) {
14437             if (UI->getOpcode() == ISD::ADD &&
14438                 isa<ConstantSDNode>(UI->getOperand(1)) &&
14439                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
14440                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
14441                 (1ULL << Bits) == 0) {
14442               SDNode *OtherAdd = *UI;
14443               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
14444                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
14445                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14446                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
14447                   return SDValue(*VI, 0);
14448                 }
14449               }
14450             }
14451           }
14452         }
14453       }
14454 
14455       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
14456       // Expose the vabsduw/h/b opportunity for down stream
14457       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
14458           (IID == Intrinsic::ppc_altivec_vmaxsw ||
14459            IID == Intrinsic::ppc_altivec_vmaxsh ||
14460            IID == Intrinsic::ppc_altivec_vmaxsb)) {
14461         SDValue V1 = N->getOperand(1);
14462         SDValue V2 = N->getOperand(2);
14463         if ((V1.getSimpleValueType() == MVT::v4i32 ||
14464              V1.getSimpleValueType() == MVT::v8i16 ||
14465              V1.getSimpleValueType() == MVT::v16i8) &&
14466             V1.getSimpleValueType() == V2.getSimpleValueType()) {
14467           // (0-a, a)
14468           if (V1.getOpcode() == ISD::SUB &&
14469               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
14470               V1.getOperand(1) == V2) {
14471             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
14472           }
14473           // (a, 0-a)
14474           if (V2.getOpcode() == ISD::SUB &&
14475               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
14476               V2.getOperand(1) == V1) {
14477             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14478           }
14479           // (x-y, y-x)
14480           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
14481               V1.getOperand(0) == V2.getOperand(1) &&
14482               V1.getOperand(1) == V2.getOperand(0)) {
14483             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14484           }
14485         }
14486       }
14487     }
14488 
14489     break;
14490   case ISD::INTRINSIC_W_CHAIN:
14491     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14492     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14493     if (Subtarget.needsSwapsForVSXMemOps()) {
14494       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14495       default:
14496         break;
14497       case Intrinsic::ppc_vsx_lxvw4x:
14498       case Intrinsic::ppc_vsx_lxvd2x:
14499         return expandVSXLoadForLE(N, DCI);
14500       }
14501     }
14502     break;
14503   case ISD::INTRINSIC_VOID:
14504     // For little endian, VSX stores require generating xxswapd/stxvd2x.
14505     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14506     if (Subtarget.needsSwapsForVSXMemOps()) {
14507       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14508       default:
14509         break;
14510       case Intrinsic::ppc_vsx_stxvw4x:
14511       case Intrinsic::ppc_vsx_stxvd2x:
14512         return expandVSXStoreForLE(N, DCI);
14513       }
14514     }
14515     break;
14516   case ISD::BSWAP:
14517     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
14518     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
14519         N->getOperand(0).hasOneUse() &&
14520         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
14521          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
14522           N->getValueType(0) == MVT::i64))) {
14523       SDValue Load = N->getOperand(0);
14524       LoadSDNode *LD = cast<LoadSDNode>(Load);
14525       // Create the byte-swapping load.
14526       SDValue Ops[] = {
14527         LD->getChain(),    // Chain
14528         LD->getBasePtr(),  // Ptr
14529         DAG.getValueType(N->getValueType(0)) // VT
14530       };
14531       SDValue BSLoad =
14532         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
14533                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
14534                                               MVT::i64 : MVT::i32, MVT::Other),
14535                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
14536 
14537       // If this is an i16 load, insert the truncate.
14538       SDValue ResVal = BSLoad;
14539       if (N->getValueType(0) == MVT::i16)
14540         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
14541 
14542       // First, combine the bswap away.  This makes the value produced by the
14543       // load dead.
14544       DCI.CombineTo(N, ResVal);
14545 
14546       // Next, combine the load away, we give it a bogus result value but a real
14547       // chain result.  The result value is dead because the bswap is dead.
14548       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
14549 
14550       // Return N so it doesn't get rechecked!
14551       return SDValue(N, 0);
14552     }
14553     break;
14554   case PPCISD::VCMP:
14555     // If a VCMP_rec node already exists with exactly the same operands as this
14556     // node, use its result instead of this node (VCMP_rec computes both a CR6
14557     // and a normal output).
14558     //
14559     if (!N->getOperand(0).hasOneUse() &&
14560         !N->getOperand(1).hasOneUse() &&
14561         !N->getOperand(2).hasOneUse()) {
14562 
14563       // Scan all of the users of the LHS, looking for VCMP_rec's that match.
14564       SDNode *VCMPrecNode = nullptr;
14565 
14566       SDNode *LHSN = N->getOperand(0).getNode();
14567       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
14568            UI != E; ++UI)
14569         if (UI->getOpcode() == PPCISD::VCMP_rec &&
14570             UI->getOperand(1) == N->getOperand(1) &&
14571             UI->getOperand(2) == N->getOperand(2) &&
14572             UI->getOperand(0) == N->getOperand(0)) {
14573           VCMPrecNode = *UI;
14574           break;
14575         }
14576 
14577       // If there is no VCMP_rec node, or if the flag value has a single use,
14578       // don't transform this.
14579       if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1))
14580         break;
14581 
14582       // Look at the (necessarily single) use of the flag value.  If it has a
14583       // chain, this transformation is more complex.  Note that multiple things
14584       // could use the value result, which we should ignore.
14585       SDNode *FlagUser = nullptr;
14586       for (SDNode::use_iterator UI = VCMPrecNode->use_begin();
14587            FlagUser == nullptr; ++UI) {
14588         assert(UI != VCMPrecNode->use_end() && "Didn't find user!");
14589         SDNode *User = *UI;
14590         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
14591           if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) {
14592             FlagUser = User;
14593             break;
14594           }
14595         }
14596       }
14597 
14598       // If the user is a MFOCRF instruction, we know this is safe.
14599       // Otherwise we give up for right now.
14600       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
14601         return SDValue(VCMPrecNode, 0);
14602     }
14603     break;
14604   case ISD::BRCOND: {
14605     SDValue Cond = N->getOperand(1);
14606     SDValue Target = N->getOperand(2);
14607 
14608     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14609         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
14610           Intrinsic::loop_decrement) {
14611 
14612       // We now need to make the intrinsic dead (it cannot be instruction
14613       // selected).
14614       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
14615       assert(Cond.getNode()->hasOneUse() &&
14616              "Counter decrement has more than one use");
14617 
14618       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
14619                          N->getOperand(0), Target);
14620     }
14621   }
14622   break;
14623   case ISD::BR_CC: {
14624     // If this is a branch on an altivec predicate comparison, lower this so
14625     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
14626     // lowering is done pre-legalize, because the legalizer lowers the predicate
14627     // compare down to code that is difficult to reassemble.
14628     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
14629     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
14630 
14631     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
14632     // value. If so, pass-through the AND to get to the intrinsic.
14633     if (LHS.getOpcode() == ISD::AND &&
14634         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14635         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
14636           Intrinsic::loop_decrement &&
14637         isa<ConstantSDNode>(LHS.getOperand(1)) &&
14638         !isNullConstant(LHS.getOperand(1)))
14639       LHS = LHS.getOperand(0);
14640 
14641     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14642         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
14643           Intrinsic::loop_decrement &&
14644         isa<ConstantSDNode>(RHS)) {
14645       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
14646              "Counter decrement comparison is not EQ or NE");
14647 
14648       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14649       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
14650                     (CC == ISD::SETNE && !Val);
14651 
14652       // We now need to make the intrinsic dead (it cannot be instruction
14653       // selected).
14654       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
14655       assert(LHS.getNode()->hasOneUse() &&
14656              "Counter decrement has more than one use");
14657 
14658       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
14659                          N->getOperand(0), N->getOperand(4));
14660     }
14661 
14662     int CompareOpc;
14663     bool isDot;
14664 
14665     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14666         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
14667         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
14668       assert(isDot && "Can't compare against a vector result!");
14669 
14670       // If this is a comparison against something other than 0/1, then we know
14671       // that the condition is never/always true.
14672       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14673       if (Val != 0 && Val != 1) {
14674         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
14675           return N->getOperand(0);
14676         // Always !=, turn it into an unconditional branch.
14677         return DAG.getNode(ISD::BR, dl, MVT::Other,
14678                            N->getOperand(0), N->getOperand(4));
14679       }
14680 
14681       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
14682 
14683       // Create the PPCISD altivec 'dot' comparison node.
14684       SDValue Ops[] = {
14685         LHS.getOperand(2),  // LHS of compare
14686         LHS.getOperand(3),  // RHS of compare
14687         DAG.getConstant(CompareOpc, dl, MVT::i32)
14688       };
14689       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
14690       SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
14691 
14692       // Unpack the result based on how the target uses it.
14693       PPC::Predicate CompOpc;
14694       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
14695       default:  // Can't happen, don't crash on invalid number though.
14696       case 0:   // Branch on the value of the EQ bit of CR6.
14697         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
14698         break;
14699       case 1:   // Branch on the inverted value of the EQ bit of CR6.
14700         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
14701         break;
14702       case 2:   // Branch on the value of the LT bit of CR6.
14703         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
14704         break;
14705       case 3:   // Branch on the inverted value of the LT bit of CR6.
14706         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
14707         break;
14708       }
14709 
14710       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
14711                          DAG.getConstant(CompOpc, dl, MVT::i32),
14712                          DAG.getRegister(PPC::CR6, MVT::i32),
14713                          N->getOperand(4), CompNode.getValue(1));
14714     }
14715     break;
14716   }
14717   case ISD::BUILD_VECTOR:
14718     return DAGCombineBuildVector(N, DCI);
14719   case ISD::ABS:
14720     return combineABS(N, DCI);
14721   case ISD::VSELECT:
14722     return combineVSelect(N, DCI);
14723   }
14724 
14725   return SDValue();
14726 }
14727 
14728 SDValue
14729 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
14730                                  SelectionDAG &DAG,
14731                                  SmallVectorImpl<SDNode *> &Created) const {
14732   // fold (sdiv X, pow2)
14733   EVT VT = N->getValueType(0);
14734   if (VT == MVT::i64 && !Subtarget.isPPC64())
14735     return SDValue();
14736   if ((VT != MVT::i32 && VT != MVT::i64) ||
14737       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
14738     return SDValue();
14739 
14740   SDLoc DL(N);
14741   SDValue N0 = N->getOperand(0);
14742 
14743   bool IsNegPow2 = (-Divisor).isPowerOf2();
14744   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
14745   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
14746 
14747   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
14748   Created.push_back(Op.getNode());
14749 
14750   if (IsNegPow2) {
14751     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
14752     Created.push_back(Op.getNode());
14753   }
14754 
14755   return Op;
14756 }
14757 
14758 //===----------------------------------------------------------------------===//
14759 // Inline Assembly Support
14760 //===----------------------------------------------------------------------===//
14761 
14762 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
14763                                                       KnownBits &Known,
14764                                                       const APInt &DemandedElts,
14765                                                       const SelectionDAG &DAG,
14766                                                       unsigned Depth) const {
14767   Known.resetAll();
14768   switch (Op.getOpcode()) {
14769   default: break;
14770   case PPCISD::LBRX: {
14771     // lhbrx is known to have the top bits cleared out.
14772     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
14773       Known.Zero = 0xFFFF0000;
14774     break;
14775   }
14776   case ISD::INTRINSIC_WO_CHAIN: {
14777     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
14778     default: break;
14779     case Intrinsic::ppc_altivec_vcmpbfp_p:
14780     case Intrinsic::ppc_altivec_vcmpeqfp_p:
14781     case Intrinsic::ppc_altivec_vcmpequb_p:
14782     case Intrinsic::ppc_altivec_vcmpequh_p:
14783     case Intrinsic::ppc_altivec_vcmpequw_p:
14784     case Intrinsic::ppc_altivec_vcmpequd_p:
14785     case Intrinsic::ppc_altivec_vcmpequq_p:
14786     case Intrinsic::ppc_altivec_vcmpgefp_p:
14787     case Intrinsic::ppc_altivec_vcmpgtfp_p:
14788     case Intrinsic::ppc_altivec_vcmpgtsb_p:
14789     case Intrinsic::ppc_altivec_vcmpgtsh_p:
14790     case Intrinsic::ppc_altivec_vcmpgtsw_p:
14791     case Intrinsic::ppc_altivec_vcmpgtsd_p:
14792     case Intrinsic::ppc_altivec_vcmpgtsq_p:
14793     case Intrinsic::ppc_altivec_vcmpgtub_p:
14794     case Intrinsic::ppc_altivec_vcmpgtuh_p:
14795     case Intrinsic::ppc_altivec_vcmpgtuw_p:
14796     case Intrinsic::ppc_altivec_vcmpgtud_p:
14797     case Intrinsic::ppc_altivec_vcmpgtuq_p:
14798       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
14799       break;
14800     }
14801   }
14802   }
14803 }
14804 
14805 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
14806   switch (Subtarget.getCPUDirective()) {
14807   default: break;
14808   case PPC::DIR_970:
14809   case PPC::DIR_PWR4:
14810   case PPC::DIR_PWR5:
14811   case PPC::DIR_PWR5X:
14812   case PPC::DIR_PWR6:
14813   case PPC::DIR_PWR6X:
14814   case PPC::DIR_PWR7:
14815   case PPC::DIR_PWR8:
14816   case PPC::DIR_PWR9:
14817   case PPC::DIR_PWR10:
14818   case PPC::DIR_PWR_FUTURE: {
14819     if (!ML)
14820       break;
14821 
14822     if (!DisableInnermostLoopAlign32) {
14823       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
14824       // so that we can decrease cache misses and branch-prediction misses.
14825       // Actual alignment of the loop will depend on the hotness check and other
14826       // logic in alignBlocks.
14827       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
14828         return Align(32);
14829     }
14830 
14831     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
14832 
14833     // For small loops (between 5 and 8 instructions), align to a 32-byte
14834     // boundary so that the entire loop fits in one instruction-cache line.
14835     uint64_t LoopSize = 0;
14836     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
14837       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
14838         LoopSize += TII->getInstSizeInBytes(*J);
14839         if (LoopSize > 32)
14840           break;
14841       }
14842 
14843     if (LoopSize > 16 && LoopSize <= 32)
14844       return Align(32);
14845 
14846     break;
14847   }
14848   }
14849 
14850   return TargetLowering::getPrefLoopAlignment(ML);
14851 }
14852 
14853 /// getConstraintType - Given a constraint, return the type of
14854 /// constraint it is for this target.
14855 PPCTargetLowering::ConstraintType
14856 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
14857   if (Constraint.size() == 1) {
14858     switch (Constraint[0]) {
14859     default: break;
14860     case 'b':
14861     case 'r':
14862     case 'f':
14863     case 'd':
14864     case 'v':
14865     case 'y':
14866       return C_RegisterClass;
14867     case 'Z':
14868       // FIXME: While Z does indicate a memory constraint, it specifically
14869       // indicates an r+r address (used in conjunction with the 'y' modifier
14870       // in the replacement string). Currently, we're forcing the base
14871       // register to be r0 in the asm printer (which is interpreted as zero)
14872       // and forming the complete address in the second register. This is
14873       // suboptimal.
14874       return C_Memory;
14875     }
14876   } else if (Constraint == "wc") { // individual CR bits.
14877     return C_RegisterClass;
14878   } else if (Constraint == "wa" || Constraint == "wd" ||
14879              Constraint == "wf" || Constraint == "ws" ||
14880              Constraint == "wi" || Constraint == "ww") {
14881     return C_RegisterClass; // VSX registers.
14882   }
14883   return TargetLowering::getConstraintType(Constraint);
14884 }
14885 
14886 /// Examine constraint type and operand type and determine a weight value.
14887 /// This object must already have been set up with the operand type
14888 /// and the current alternative constraint selected.
14889 TargetLowering::ConstraintWeight
14890 PPCTargetLowering::getSingleConstraintMatchWeight(
14891     AsmOperandInfo &info, const char *constraint) const {
14892   ConstraintWeight weight = CW_Invalid;
14893   Value *CallOperandVal = info.CallOperandVal;
14894     // If we don't have a value, we can't do a match,
14895     // but allow it at the lowest weight.
14896   if (!CallOperandVal)
14897     return CW_Default;
14898   Type *type = CallOperandVal->getType();
14899 
14900   // Look at the constraint type.
14901   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
14902     return CW_Register; // an individual CR bit.
14903   else if ((StringRef(constraint) == "wa" ||
14904             StringRef(constraint) == "wd" ||
14905             StringRef(constraint) == "wf") &&
14906            type->isVectorTy())
14907     return CW_Register;
14908   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
14909     return CW_Register; // just hold 64-bit integers data.
14910   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
14911     return CW_Register;
14912   else if (StringRef(constraint) == "ww" && type->isFloatTy())
14913     return CW_Register;
14914 
14915   switch (*constraint) {
14916   default:
14917     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
14918     break;
14919   case 'b':
14920     if (type->isIntegerTy())
14921       weight = CW_Register;
14922     break;
14923   case 'f':
14924     if (type->isFloatTy())
14925       weight = CW_Register;
14926     break;
14927   case 'd':
14928     if (type->isDoubleTy())
14929       weight = CW_Register;
14930     break;
14931   case 'v':
14932     if (type->isVectorTy())
14933       weight = CW_Register;
14934     break;
14935   case 'y':
14936     weight = CW_Register;
14937     break;
14938   case 'Z':
14939     weight = CW_Memory;
14940     break;
14941   }
14942   return weight;
14943 }
14944 
14945 std::pair<unsigned, const TargetRegisterClass *>
14946 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
14947                                                 StringRef Constraint,
14948                                                 MVT VT) const {
14949   if (Constraint.size() == 1) {
14950     // GCC RS6000 Constraint Letters
14951     switch (Constraint[0]) {
14952     case 'b':   // R1-R31
14953       if (VT == MVT::i64 && Subtarget.isPPC64())
14954         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
14955       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
14956     case 'r':   // R0-R31
14957       if (VT == MVT::i64 && Subtarget.isPPC64())
14958         return std::make_pair(0U, &PPC::G8RCRegClass);
14959       return std::make_pair(0U, &PPC::GPRCRegClass);
14960     // 'd' and 'f' constraints are both defined to be "the floating point
14961     // registers", where one is for 32-bit and the other for 64-bit. We don't
14962     // really care overly much here so just give them all the same reg classes.
14963     case 'd':
14964     case 'f':
14965       if (Subtarget.hasSPE()) {
14966         if (VT == MVT::f32 || VT == MVT::i32)
14967           return std::make_pair(0U, &PPC::GPRCRegClass);
14968         if (VT == MVT::f64 || VT == MVT::i64)
14969           return std::make_pair(0U, &PPC::SPERCRegClass);
14970       } else {
14971         if (VT == MVT::f32 || VT == MVT::i32)
14972           return std::make_pair(0U, &PPC::F4RCRegClass);
14973         if (VT == MVT::f64 || VT == MVT::i64)
14974           return std::make_pair(0U, &PPC::F8RCRegClass);
14975       }
14976       break;
14977     case 'v':
14978       if (Subtarget.hasAltivec())
14979         return std::make_pair(0U, &PPC::VRRCRegClass);
14980       break;
14981     case 'y':   // crrc
14982       return std::make_pair(0U, &PPC::CRRCRegClass);
14983     }
14984   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
14985     // An individual CR bit.
14986     return std::make_pair(0U, &PPC::CRBITRCRegClass);
14987   } else if ((Constraint == "wa" || Constraint == "wd" ||
14988              Constraint == "wf" || Constraint == "wi") &&
14989              Subtarget.hasVSX()) {
14990     return std::make_pair(0U, &PPC::VSRCRegClass);
14991   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
14992     if (VT == MVT::f32 && Subtarget.hasP8Vector())
14993       return std::make_pair(0U, &PPC::VSSRCRegClass);
14994     else
14995       return std::make_pair(0U, &PPC::VSFRCRegClass);
14996   }
14997 
14998   // If we name a VSX register, we can't defer to the base class because it
14999   // will not recognize the correct register (their names will be VSL{0-31}
15000   // and V{0-31} so they won't match). So we match them here.
15001   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15002     int VSNum = atoi(Constraint.data() + 3);
15003     assert(VSNum >= 0 && VSNum <= 63 &&
15004            "Attempted to access a vsr out of range");
15005     if (VSNum < 32)
15006       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15007     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15008   }
15009   std::pair<unsigned, const TargetRegisterClass *> R =
15010       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15011 
15012   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15013   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15014   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15015   // register.
15016   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15017   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15018   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15019       PPC::GPRCRegClass.contains(R.first))
15020     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15021                             PPC::sub_32, &PPC::G8RCRegClass),
15022                           &PPC::G8RCRegClass);
15023 
15024   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15025   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15026     R.first = PPC::CR0;
15027     R.second = &PPC::CRRCRegClass;
15028   }
15029 
15030   return R;
15031 }
15032 
15033 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15034 /// vector.  If it is invalid, don't add anything to Ops.
15035 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15036                                                      std::string &Constraint,
15037                                                      std::vector<SDValue>&Ops,
15038                                                      SelectionDAG &DAG) const {
15039   SDValue Result;
15040 
15041   // Only support length 1 constraints.
15042   if (Constraint.length() > 1) return;
15043 
15044   char Letter = Constraint[0];
15045   switch (Letter) {
15046   default: break;
15047   case 'I':
15048   case 'J':
15049   case 'K':
15050   case 'L':
15051   case 'M':
15052   case 'N':
15053   case 'O':
15054   case 'P': {
15055     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15056     if (!CST) return; // Must be an immediate to match.
15057     SDLoc dl(Op);
15058     int64_t Value = CST->getSExtValue();
15059     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15060                          // numbers are printed as such.
15061     switch (Letter) {
15062     default: llvm_unreachable("Unknown constraint letter!");
15063     case 'I':  // "I" is a signed 16-bit constant.
15064       if (isInt<16>(Value))
15065         Result = DAG.getTargetConstant(Value, dl, TCVT);
15066       break;
15067     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15068       if (isShiftedUInt<16, 16>(Value))
15069         Result = DAG.getTargetConstant(Value, dl, TCVT);
15070       break;
15071     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15072       if (isShiftedInt<16, 16>(Value))
15073         Result = DAG.getTargetConstant(Value, dl, TCVT);
15074       break;
15075     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15076       if (isUInt<16>(Value))
15077         Result = DAG.getTargetConstant(Value, dl, TCVT);
15078       break;
15079     case 'M':  // "M" is a constant that is greater than 31.
15080       if (Value > 31)
15081         Result = DAG.getTargetConstant(Value, dl, TCVT);
15082       break;
15083     case 'N':  // "N" is a positive constant that is an exact power of two.
15084       if (Value > 0 && isPowerOf2_64(Value))
15085         Result = DAG.getTargetConstant(Value, dl, TCVT);
15086       break;
15087     case 'O':  // "O" is the constant zero.
15088       if (Value == 0)
15089         Result = DAG.getTargetConstant(Value, dl, TCVT);
15090       break;
15091     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15092       if (isInt<16>(-Value))
15093         Result = DAG.getTargetConstant(Value, dl, TCVT);
15094       break;
15095     }
15096     break;
15097   }
15098   }
15099 
15100   if (Result.getNode()) {
15101     Ops.push_back(Result);
15102     return;
15103   }
15104 
15105   // Handle standard constraint letters.
15106   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15107 }
15108 
15109 // isLegalAddressingMode - Return true if the addressing mode represented
15110 // by AM is legal for this target, for a load/store of the specified type.
15111 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15112                                               const AddrMode &AM, Type *Ty,
15113                                               unsigned AS,
15114                                               Instruction *I) const {
15115   // Vector type r+i form is supported since power9 as DQ form. We don't check
15116   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
15117   // imm form is preferred and the offset can be adjusted to use imm form later
15118   // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
15119   // max offset to check legal addressing mode, we should be a little aggressive
15120   // to contain other offsets for that LSRUse.
15121   if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
15122     return false;
15123 
15124   // PPC allows a sign-extended 16-bit immediate field.
15125   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15126     return false;
15127 
15128   // No global is ever allowed as a base.
15129   if (AM.BaseGV)
15130     return false;
15131 
15132   // PPC only support r+r,
15133   switch (AM.Scale) {
15134   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15135     break;
15136   case 1:
15137     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15138       return false;
15139     // Otherwise we have r+r or r+i.
15140     break;
15141   case 2:
15142     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15143       return false;
15144     // Allow 2*r as r+r.
15145     break;
15146   default:
15147     // No other scales are supported.
15148     return false;
15149   }
15150 
15151   return true;
15152 }
15153 
15154 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15155                                            SelectionDAG &DAG) const {
15156   MachineFunction &MF = DAG.getMachineFunction();
15157   MachineFrameInfo &MFI = MF.getFrameInfo();
15158   MFI.setReturnAddressIsTaken(true);
15159 
15160   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15161     return SDValue();
15162 
15163   SDLoc dl(Op);
15164   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15165 
15166   // Make sure the function does not optimize away the store of the RA to
15167   // the stack.
15168   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15169   FuncInfo->setLRStoreRequired();
15170   bool isPPC64 = Subtarget.isPPC64();
15171   auto PtrVT = getPointerTy(MF.getDataLayout());
15172 
15173   if (Depth > 0) {
15174     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15175     SDValue Offset =
15176         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15177                         isPPC64 ? MVT::i64 : MVT::i32);
15178     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15179                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15180                        MachinePointerInfo());
15181   }
15182 
15183   // Just load the return address off the stack.
15184   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15185   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15186                      MachinePointerInfo());
15187 }
15188 
15189 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15190                                           SelectionDAG &DAG) const {
15191   SDLoc dl(Op);
15192   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15193 
15194   MachineFunction &MF = DAG.getMachineFunction();
15195   MachineFrameInfo &MFI = MF.getFrameInfo();
15196   MFI.setFrameAddressIsTaken(true);
15197 
15198   EVT PtrVT = getPointerTy(MF.getDataLayout());
15199   bool isPPC64 = PtrVT == MVT::i64;
15200 
15201   // Naked functions never have a frame pointer, and so we use r1. For all
15202   // other functions, this decision must be delayed until during PEI.
15203   unsigned FrameReg;
15204   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15205     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15206   else
15207     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15208 
15209   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15210                                          PtrVT);
15211   while (Depth--)
15212     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15213                             FrameAddr, MachinePointerInfo());
15214   return FrameAddr;
15215 }
15216 
15217 // FIXME? Maybe this could be a TableGen attribute on some registers and
15218 // this table could be generated automatically from RegInfo.
15219 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15220                                               const MachineFunction &MF) const {
15221   bool isPPC64 = Subtarget.isPPC64();
15222 
15223   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15224   if (!is64Bit && VT != LLT::scalar(32))
15225     report_fatal_error("Invalid register global variable type");
15226 
15227   Register Reg = StringSwitch<Register>(RegName)
15228                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15229                      .Case("r2", isPPC64 ? Register() : PPC::R2)
15230                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15231                      .Default(Register());
15232 
15233   if (Reg)
15234     return Reg;
15235   report_fatal_error("Invalid register name global variable");
15236 }
15237 
15238 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15239   // 32-bit SVR4 ABI access everything as got-indirect.
15240   if (Subtarget.is32BitELFABI())
15241     return true;
15242 
15243   // AIX accesses everything indirectly through the TOC, which is similar to
15244   // the GOT.
15245   if (Subtarget.isAIXABI())
15246     return true;
15247 
15248   CodeModel::Model CModel = getTargetMachine().getCodeModel();
15249   // If it is small or large code model, module locals are accessed
15250   // indirectly by loading their address from .toc/.got.
15251   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15252     return true;
15253 
15254   // JumpTable and BlockAddress are accessed as got-indirect.
15255   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15256     return true;
15257 
15258   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15259     return Subtarget.isGVIndirectSymbol(G->getGlobal());
15260 
15261   return false;
15262 }
15263 
15264 bool
15265 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15266   // The PowerPC target isn't yet aware of offsets.
15267   return false;
15268 }
15269 
15270 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15271                                            const CallInst &I,
15272                                            MachineFunction &MF,
15273                                            unsigned Intrinsic) const {
15274   switch (Intrinsic) {
15275   case Intrinsic::ppc_altivec_lvx:
15276   case Intrinsic::ppc_altivec_lvxl:
15277   case Intrinsic::ppc_altivec_lvebx:
15278   case Intrinsic::ppc_altivec_lvehx:
15279   case Intrinsic::ppc_altivec_lvewx:
15280   case Intrinsic::ppc_vsx_lxvd2x:
15281   case Intrinsic::ppc_vsx_lxvw4x:
15282   case Intrinsic::ppc_vsx_lxvd2x_be:
15283   case Intrinsic::ppc_vsx_lxvw4x_be:
15284   case Intrinsic::ppc_vsx_lxvl:
15285   case Intrinsic::ppc_vsx_lxvll: {
15286     EVT VT;
15287     switch (Intrinsic) {
15288     case Intrinsic::ppc_altivec_lvebx:
15289       VT = MVT::i8;
15290       break;
15291     case Intrinsic::ppc_altivec_lvehx:
15292       VT = MVT::i16;
15293       break;
15294     case Intrinsic::ppc_altivec_lvewx:
15295       VT = MVT::i32;
15296       break;
15297     case Intrinsic::ppc_vsx_lxvd2x:
15298     case Intrinsic::ppc_vsx_lxvd2x_be:
15299       VT = MVT::v2f64;
15300       break;
15301     default:
15302       VT = MVT::v4i32;
15303       break;
15304     }
15305 
15306     Info.opc = ISD::INTRINSIC_W_CHAIN;
15307     Info.memVT = VT;
15308     Info.ptrVal = I.getArgOperand(0);
15309     Info.offset = -VT.getStoreSize()+1;
15310     Info.size = 2*VT.getStoreSize()-1;
15311     Info.align = Align(1);
15312     Info.flags = MachineMemOperand::MOLoad;
15313     return true;
15314   }
15315   case Intrinsic::ppc_altivec_stvx:
15316   case Intrinsic::ppc_altivec_stvxl:
15317   case Intrinsic::ppc_altivec_stvebx:
15318   case Intrinsic::ppc_altivec_stvehx:
15319   case Intrinsic::ppc_altivec_stvewx:
15320   case Intrinsic::ppc_vsx_stxvd2x:
15321   case Intrinsic::ppc_vsx_stxvw4x:
15322   case Intrinsic::ppc_vsx_stxvd2x_be:
15323   case Intrinsic::ppc_vsx_stxvw4x_be:
15324   case Intrinsic::ppc_vsx_stxvl:
15325   case Intrinsic::ppc_vsx_stxvll: {
15326     EVT VT;
15327     switch (Intrinsic) {
15328     case Intrinsic::ppc_altivec_stvebx:
15329       VT = MVT::i8;
15330       break;
15331     case Intrinsic::ppc_altivec_stvehx:
15332       VT = MVT::i16;
15333       break;
15334     case Intrinsic::ppc_altivec_stvewx:
15335       VT = MVT::i32;
15336       break;
15337     case Intrinsic::ppc_vsx_stxvd2x:
15338     case Intrinsic::ppc_vsx_stxvd2x_be:
15339       VT = MVT::v2f64;
15340       break;
15341     default:
15342       VT = MVT::v4i32;
15343       break;
15344     }
15345 
15346     Info.opc = ISD::INTRINSIC_VOID;
15347     Info.memVT = VT;
15348     Info.ptrVal = I.getArgOperand(1);
15349     Info.offset = -VT.getStoreSize()+1;
15350     Info.size = 2*VT.getStoreSize()-1;
15351     Info.align = Align(1);
15352     Info.flags = MachineMemOperand::MOStore;
15353     return true;
15354   }
15355   default:
15356     break;
15357   }
15358 
15359   return false;
15360 }
15361 
15362 /// It returns EVT::Other if the type should be determined using generic
15363 /// target-independent logic.
15364 EVT PPCTargetLowering::getOptimalMemOpType(
15365     const MemOp &Op, const AttributeList &FuncAttributes) const {
15366   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15367     // We should use Altivec/VSX loads and stores when available. For unaligned
15368     // addresses, unaligned VSX loads are only fast starting with the P8.
15369     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
15370         (Op.isAligned(Align(16)) ||
15371          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15372       return MVT::v4i32;
15373   }
15374 
15375   if (Subtarget.isPPC64()) {
15376     return MVT::i64;
15377   }
15378 
15379   return MVT::i32;
15380 }
15381 
15382 /// Returns true if it is beneficial to convert a load of a constant
15383 /// to just the constant itself.
15384 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15385                                                           Type *Ty) const {
15386   assert(Ty->isIntegerTy());
15387 
15388   unsigned BitSize = Ty->getPrimitiveSizeInBits();
15389   return !(BitSize == 0 || BitSize > 64);
15390 }
15391 
15392 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15393   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15394     return false;
15395   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
15396   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
15397   return NumBits1 == 64 && NumBits2 == 32;
15398 }
15399 
15400 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15401   if (!VT1.isInteger() || !VT2.isInteger())
15402     return false;
15403   unsigned NumBits1 = VT1.getSizeInBits();
15404   unsigned NumBits2 = VT2.getSizeInBits();
15405   return NumBits1 == 64 && NumBits2 == 32;
15406 }
15407 
15408 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15409   // Generally speaking, zexts are not free, but they are free when they can be
15410   // folded with other operations.
15411   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
15412     EVT MemVT = LD->getMemoryVT();
15413     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
15414          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
15415         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
15416          LD->getExtensionType() == ISD::ZEXTLOAD))
15417       return true;
15418   }
15419 
15420   // FIXME: Add other cases...
15421   //  - 32-bit shifts with a zext to i64
15422   //  - zext after ctlz, bswap, etc.
15423   //  - zext after and by a constant mask
15424 
15425   return TargetLowering::isZExtFree(Val, VT2);
15426 }
15427 
15428 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
15429   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
15430          "invalid fpext types");
15431   // Extending to float128 is not free.
15432   if (DestVT == MVT::f128)
15433     return false;
15434   return true;
15435 }
15436 
15437 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15438   return isInt<16>(Imm) || isUInt<16>(Imm);
15439 }
15440 
15441 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15442   return isInt<16>(Imm) || isUInt<16>(Imm);
15443 }
15444 
15445 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
15446                                                        unsigned,
15447                                                        unsigned,
15448                                                        MachineMemOperand::Flags,
15449                                                        bool *Fast) const {
15450   if (DisablePPCUnaligned)
15451     return false;
15452 
15453   // PowerPC supports unaligned memory access for simple non-vector types.
15454   // Although accessing unaligned addresses is not as efficient as accessing
15455   // aligned addresses, it is generally more efficient than manual expansion,
15456   // and generally only traps for software emulation when crossing page
15457   // boundaries.
15458 
15459   if (!VT.isSimple())
15460     return false;
15461 
15462   if (VT.isFloatingPoint() && !VT.isVector() &&
15463       !Subtarget.allowsUnalignedFPAccess())
15464     return false;
15465 
15466   if (VT.getSimpleVT().isVector()) {
15467     if (Subtarget.hasVSX()) {
15468       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
15469           VT != MVT::v4f32 && VT != MVT::v4i32)
15470         return false;
15471     } else {
15472       return false;
15473     }
15474   }
15475 
15476   if (VT == MVT::ppcf128)
15477     return false;
15478 
15479   if (Fast)
15480     *Fast = true;
15481 
15482   return true;
15483 }
15484 
15485 bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
15486                                                SDValue C) const {
15487   // Check integral scalar types.
15488   if (!VT.isScalarInteger())
15489     return false;
15490   if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
15491     if (!ConstNode->getAPIntValue().isSignedIntN(64))
15492       return false;
15493     // This transformation will generate >= 2 operations. But the following
15494     // cases will generate <= 2 instructions during ISEL. So exclude them.
15495     // 1. If the constant multiplier fits 16 bits, it can be handled by one
15496     // HW instruction, ie. MULLI
15497     // 2. If the multiplier after shifted fits 16 bits, an extra shift
15498     // instruction is needed than case 1, ie. MULLI and RLDICR
15499     int64_t Imm = ConstNode->getSExtValue();
15500     unsigned Shift = countTrailingZeros<uint64_t>(Imm);
15501     Imm >>= Shift;
15502     if (isInt<16>(Imm))
15503       return false;
15504     uint64_t UImm = static_cast<uint64_t>(Imm);
15505     if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) ||
15506         isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm))
15507       return true;
15508   }
15509   return false;
15510 }
15511 
15512 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15513                                                    EVT VT) const {
15514   return isFMAFasterThanFMulAndFAdd(
15515       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
15516 }
15517 
15518 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
15519                                                    Type *Ty) const {
15520   switch (Ty->getScalarType()->getTypeID()) {
15521   case Type::FloatTyID:
15522   case Type::DoubleTyID:
15523     return true;
15524   case Type::FP128TyID:
15525     return Subtarget.hasP9Vector();
15526   default:
15527     return false;
15528   }
15529 }
15530 
15531 // FIXME: add more patterns which are not profitable to hoist.
15532 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
15533   if (!I->hasOneUse())
15534     return true;
15535 
15536   Instruction *User = I->user_back();
15537   assert(User && "A single use instruction with no uses.");
15538 
15539   switch (I->getOpcode()) {
15540   case Instruction::FMul: {
15541     // Don't break FMA, PowerPC prefers FMA.
15542     if (User->getOpcode() != Instruction::FSub &&
15543         User->getOpcode() != Instruction::FAdd)
15544       return true;
15545 
15546     const TargetOptions &Options = getTargetMachine().Options;
15547     const Function *F = I->getFunction();
15548     const DataLayout &DL = F->getParent()->getDataLayout();
15549     Type *Ty = User->getOperand(0)->getType();
15550 
15551     return !(
15552         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
15553         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
15554         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
15555   }
15556   case Instruction::Load: {
15557     // Don't break "store (load float*)" pattern, this pattern will be combined
15558     // to "store (load int32)" in later InstCombine pass. See function
15559     // combineLoadToOperationType. On PowerPC, loading a float point takes more
15560     // cycles than loading a 32 bit integer.
15561     LoadInst *LI = cast<LoadInst>(I);
15562     // For the loads that combineLoadToOperationType does nothing, like
15563     // ordered load, it should be profitable to hoist them.
15564     // For swifterror load, it can only be used for pointer to pointer type, so
15565     // later type check should get rid of this case.
15566     if (!LI->isUnordered())
15567       return true;
15568 
15569     if (User->getOpcode() != Instruction::Store)
15570       return true;
15571 
15572     if (I->getType()->getTypeID() != Type::FloatTyID)
15573       return true;
15574 
15575     return false;
15576   }
15577   default:
15578     return true;
15579   }
15580   return true;
15581 }
15582 
15583 const MCPhysReg *
15584 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
15585   // LR is a callee-save register, but we must treat it as clobbered by any call
15586   // site. Hence we include LR in the scratch registers, which are in turn added
15587   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
15588   // to CTR, which is used by any indirect call.
15589   static const MCPhysReg ScratchRegs[] = {
15590     PPC::X12, PPC::LR8, PPC::CTR8, 0
15591   };
15592 
15593   return ScratchRegs;
15594 }
15595 
15596 Register PPCTargetLowering::getExceptionPointerRegister(
15597     const Constant *PersonalityFn) const {
15598   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
15599 }
15600 
15601 Register PPCTargetLowering::getExceptionSelectorRegister(
15602     const Constant *PersonalityFn) const {
15603   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
15604 }
15605 
15606 bool
15607 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
15608                      EVT VT , unsigned DefinedValues) const {
15609   if (VT == MVT::v2i64)
15610     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
15611 
15612   if (Subtarget.hasVSX())
15613     return true;
15614 
15615   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
15616 }
15617 
15618 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
15619   if (DisableILPPref || Subtarget.enableMachineScheduler())
15620     return TargetLowering::getSchedulingPreference(N);
15621 
15622   return Sched::ILP;
15623 }
15624 
15625 // Create a fast isel object.
15626 FastISel *
15627 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
15628                                   const TargetLibraryInfo *LibInfo) const {
15629   return PPC::createFastISel(FuncInfo, LibInfo);
15630 }
15631 
15632 // 'Inverted' means the FMA opcode after negating one multiplicand.
15633 // For example, (fma -a b c) = (fnmsub a b c)
15634 static unsigned invertFMAOpcode(unsigned Opc) {
15635   switch (Opc) {
15636   default:
15637     llvm_unreachable("Invalid FMA opcode for PowerPC!");
15638   case ISD::FMA:
15639     return PPCISD::FNMSUB;
15640   case PPCISD::FNMSUB:
15641     return ISD::FMA;
15642   }
15643 }
15644 
15645 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
15646                                                 bool LegalOps, bool OptForSize,
15647                                                 NegatibleCost &Cost,
15648                                                 unsigned Depth) const {
15649   if (Depth > SelectionDAG::MaxRecursionDepth)
15650     return SDValue();
15651 
15652   unsigned Opc = Op.getOpcode();
15653   EVT VT = Op.getValueType();
15654   SDNodeFlags Flags = Op.getNode()->getFlags();
15655 
15656   switch (Opc) {
15657   case PPCISD::FNMSUB:
15658     if (!Op.hasOneUse() || !isTypeLegal(VT))
15659       break;
15660 
15661     const TargetOptions &Options = getTargetMachine().Options;
15662     SDValue N0 = Op.getOperand(0);
15663     SDValue N1 = Op.getOperand(1);
15664     SDValue N2 = Op.getOperand(2);
15665     SDLoc Loc(Op);
15666 
15667     NegatibleCost N2Cost = NegatibleCost::Expensive;
15668     SDValue NegN2 =
15669         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
15670 
15671     if (!NegN2)
15672       return SDValue();
15673 
15674     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
15675     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
15676     // These transformations may change sign of zeroes. For example,
15677     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
15678     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
15679       // Try and choose the cheaper one to negate.
15680       NegatibleCost N0Cost = NegatibleCost::Expensive;
15681       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
15682                                            N0Cost, Depth + 1);
15683 
15684       NegatibleCost N1Cost = NegatibleCost::Expensive;
15685       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
15686                                            N1Cost, Depth + 1);
15687 
15688       if (NegN0 && N0Cost <= N1Cost) {
15689         Cost = std::min(N0Cost, N2Cost);
15690         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
15691       } else if (NegN1) {
15692         Cost = std::min(N1Cost, N2Cost);
15693         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
15694       }
15695     }
15696 
15697     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
15698     if (isOperationLegal(ISD::FMA, VT)) {
15699       Cost = N2Cost;
15700       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
15701     }
15702 
15703     break;
15704   }
15705 
15706   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
15707                                               Cost, Depth);
15708 }
15709 
15710 // Override to enable LOAD_STACK_GUARD lowering on Linux.
15711 bool PPCTargetLowering::useLoadStackGuardNode() const {
15712   if (!Subtarget.isTargetLinux())
15713     return TargetLowering::useLoadStackGuardNode();
15714   return true;
15715 }
15716 
15717 // Override to disable global variable loading on Linux.
15718 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
15719   if (!Subtarget.isTargetLinux())
15720     return TargetLowering::insertSSPDeclarations(M);
15721 }
15722 
15723 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
15724                                      bool ForCodeSize) const {
15725   if (!VT.isSimple() || !Subtarget.hasVSX())
15726     return false;
15727 
15728   switch(VT.getSimpleVT().SimpleTy) {
15729   default:
15730     // For FP types that are currently not supported by PPC backend, return
15731     // false. Examples: f16, f80.
15732     return false;
15733   case MVT::f32:
15734   case MVT::f64:
15735     if (Subtarget.hasPrefixInstrs()) {
15736       // With prefixed instructions, we can materialize anything that can be
15737       // represented with a 32-bit immediate, not just positive zero.
15738       APFloat APFloatOfImm = Imm;
15739       return convertToNonDenormSingle(APFloatOfImm);
15740     }
15741     LLVM_FALLTHROUGH;
15742   case MVT::ppcf128:
15743     return Imm.isPosZero();
15744   }
15745 }
15746 
15747 // For vector shift operation op, fold
15748 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
15749 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
15750                                   SelectionDAG &DAG) {
15751   SDValue N0 = N->getOperand(0);
15752   SDValue N1 = N->getOperand(1);
15753   EVT VT = N0.getValueType();
15754   unsigned OpSizeInBits = VT.getScalarSizeInBits();
15755   unsigned Opcode = N->getOpcode();
15756   unsigned TargetOpcode;
15757 
15758   switch (Opcode) {
15759   default:
15760     llvm_unreachable("Unexpected shift operation");
15761   case ISD::SHL:
15762     TargetOpcode = PPCISD::SHL;
15763     break;
15764   case ISD::SRL:
15765     TargetOpcode = PPCISD::SRL;
15766     break;
15767   case ISD::SRA:
15768     TargetOpcode = PPCISD::SRA;
15769     break;
15770   }
15771 
15772   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
15773       N1->getOpcode() == ISD::AND)
15774     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
15775       if (Mask->getZExtValue() == OpSizeInBits - 1)
15776         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
15777 
15778   return SDValue();
15779 }
15780 
15781 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
15782   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15783     return Value;
15784 
15785   SDValue N0 = N->getOperand(0);
15786   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
15787   if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
15788       N0.getOpcode() != ISD::SIGN_EXTEND ||
15789       N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr ||
15790       N->getValueType(0) != MVT::i64)
15791     return SDValue();
15792 
15793   // We can't save an operation here if the value is already extended, and
15794   // the existing shift is easier to combine.
15795   SDValue ExtsSrc = N0.getOperand(0);
15796   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
15797       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
15798     return SDValue();
15799 
15800   SDLoc DL(N0);
15801   SDValue ShiftBy = SDValue(CN1, 0);
15802   // We want the shift amount to be i32 on the extswli, but the shift could
15803   // have an i64.
15804   if (ShiftBy.getValueType() == MVT::i64)
15805     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
15806 
15807   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
15808                          ShiftBy);
15809 }
15810 
15811 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
15812   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15813     return Value;
15814 
15815   return SDValue();
15816 }
15817 
15818 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
15819   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15820     return Value;
15821 
15822   return SDValue();
15823 }
15824 
15825 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
15826 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
15827 // When C is zero, the equation (addi Z, -C) can be simplified to Z
15828 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
15829 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
15830                                  const PPCSubtarget &Subtarget) {
15831   if (!Subtarget.isPPC64())
15832     return SDValue();
15833 
15834   SDValue LHS = N->getOperand(0);
15835   SDValue RHS = N->getOperand(1);
15836 
15837   auto isZextOfCompareWithConstant = [](SDValue Op) {
15838     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
15839         Op.getValueType() != MVT::i64)
15840       return false;
15841 
15842     SDValue Cmp = Op.getOperand(0);
15843     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
15844         Cmp.getOperand(0).getValueType() != MVT::i64)
15845       return false;
15846 
15847     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
15848       int64_t NegConstant = 0 - Constant->getSExtValue();
15849       // Due to the limitations of the addi instruction,
15850       // -C is required to be [-32768, 32767].
15851       return isInt<16>(NegConstant);
15852     }
15853 
15854     return false;
15855   };
15856 
15857   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
15858   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
15859 
15860   // If there is a pattern, canonicalize a zext operand to the RHS.
15861   if (LHSHasPattern && !RHSHasPattern)
15862     std::swap(LHS, RHS);
15863   else if (!LHSHasPattern && !RHSHasPattern)
15864     return SDValue();
15865 
15866   SDLoc DL(N);
15867   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
15868   SDValue Cmp = RHS.getOperand(0);
15869   SDValue Z = Cmp.getOperand(0);
15870   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
15871 
15872   assert(Constant && "Constant Should not be a null pointer.");
15873   int64_t NegConstant = 0 - Constant->getSExtValue();
15874 
15875   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
15876   default: break;
15877   case ISD::SETNE: {
15878     //                                 when C == 0
15879     //                             --> addze X, (addic Z, -1).carry
15880     //                            /
15881     // add X, (zext(setne Z, C))--
15882     //                            \    when -32768 <= -C <= 32767 && C != 0
15883     //                             --> addze X, (addic (addi Z, -C), -1).carry
15884     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15885                               DAG.getConstant(NegConstant, DL, MVT::i64));
15886     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15887     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15888                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
15889     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15890                        SDValue(Addc.getNode(), 1));
15891     }
15892   case ISD::SETEQ: {
15893     //                                 when C == 0
15894     //                             --> addze X, (subfic Z, 0).carry
15895     //                            /
15896     // add X, (zext(sete  Z, C))--
15897     //                            \    when -32768 <= -C <= 32767 && C != 0
15898     //                             --> addze X, (subfic (addi Z, -C), 0).carry
15899     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15900                               DAG.getConstant(NegConstant, DL, MVT::i64));
15901     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15902     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15903                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
15904     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15905                        SDValue(Subc.getNode(), 1));
15906     }
15907   }
15908 
15909   return SDValue();
15910 }
15911 
15912 // Transform
15913 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
15914 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
15915 // In this case both C1 and C2 must be known constants.
15916 // C1+C2 must fit into a 34 bit signed integer.
15917 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
15918                                           const PPCSubtarget &Subtarget) {
15919   if (!Subtarget.isUsingPCRelativeCalls())
15920     return SDValue();
15921 
15922   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
15923   // If we find that node try to cast the Global Address and the Constant.
15924   SDValue LHS = N->getOperand(0);
15925   SDValue RHS = N->getOperand(1);
15926 
15927   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
15928     std::swap(LHS, RHS);
15929 
15930   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
15931     return SDValue();
15932 
15933   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
15934   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
15935   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
15936 
15937   // Check that both casts succeeded.
15938   if (!GSDN || !ConstNode)
15939     return SDValue();
15940 
15941   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
15942   SDLoc DL(GSDN);
15943 
15944   // The signed int offset needs to fit in 34 bits.
15945   if (!isInt<34>(NewOffset))
15946     return SDValue();
15947 
15948   // The new global address is a copy of the old global address except
15949   // that it has the updated Offset.
15950   SDValue GA =
15951       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
15952                                  NewOffset, GSDN->getTargetFlags());
15953   SDValue MatPCRel =
15954       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
15955   return MatPCRel;
15956 }
15957 
15958 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
15959   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
15960     return Value;
15961 
15962   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
15963     return Value;
15964 
15965   return SDValue();
15966 }
15967 
15968 // Detect TRUNCATE operations on bitcasts of float128 values.
15969 // What we are looking for here is the situtation where we extract a subset
15970 // of bits from a 128 bit float.
15971 // This can be of two forms:
15972 // 1) BITCAST of f128 feeding TRUNCATE
15973 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
15974 // The reason this is required is because we do not have a legal i128 type
15975 // and so we want to prevent having to store the f128 and then reload part
15976 // of it.
15977 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
15978                                            DAGCombinerInfo &DCI) const {
15979   // If we are using CRBits then try that first.
15980   if (Subtarget.useCRBits()) {
15981     // Check if CRBits did anything and return that if it did.
15982     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
15983       return CRTruncValue;
15984   }
15985 
15986   SDLoc dl(N);
15987   SDValue Op0 = N->getOperand(0);
15988 
15989   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
15990   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
15991     EVT VT = N->getValueType(0);
15992     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
15993       return SDValue();
15994     SDValue Sub = Op0.getOperand(0);
15995     if (Sub.getOpcode() == ISD::SUB) {
15996       SDValue SubOp0 = Sub.getOperand(0);
15997       SDValue SubOp1 = Sub.getOperand(1);
15998       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
15999           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16000         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16001                                SubOp1.getOperand(0),
16002                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16003       }
16004     }
16005   }
16006 
16007   // Looking for a truncate of i128 to i64.
16008   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16009     return SDValue();
16010 
16011   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16012 
16013   // SRL feeding TRUNCATE.
16014   if (Op0.getOpcode() == ISD::SRL) {
16015     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16016     // The right shift has to be by 64 bits.
16017     if (!ConstNode || ConstNode->getZExtValue() != 64)
16018       return SDValue();
16019 
16020     // Switch the element number to extract.
16021     EltToExtract = EltToExtract ? 0 : 1;
16022     // Update Op0 past the SRL.
16023     Op0 = Op0.getOperand(0);
16024   }
16025 
16026   // BITCAST feeding a TRUNCATE possibly via SRL.
16027   if (Op0.getOpcode() == ISD::BITCAST &&
16028       Op0.getValueType() == MVT::i128 &&
16029       Op0.getOperand(0).getValueType() == MVT::f128) {
16030     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16031     return DCI.DAG.getNode(
16032         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16033         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16034   }
16035   return SDValue();
16036 }
16037 
16038 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16039   SelectionDAG &DAG = DCI.DAG;
16040 
16041   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16042   if (!ConstOpOrElement)
16043     return SDValue();
16044 
16045   // An imul is usually smaller than the alternative sequence for legal type.
16046   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16047       isOperationLegal(ISD::MUL, N->getValueType(0)))
16048     return SDValue();
16049 
16050   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16051     switch (this->Subtarget.getCPUDirective()) {
16052     default:
16053       // TODO: enhance the condition for subtarget before pwr8
16054       return false;
16055     case PPC::DIR_PWR8:
16056       //  type        mul     add    shl
16057       // scalar        4       1      1
16058       // vector        7       2      2
16059       return true;
16060     case PPC::DIR_PWR9:
16061     case PPC::DIR_PWR10:
16062     case PPC::DIR_PWR_FUTURE:
16063       //  type        mul     add    shl
16064       // scalar        5       2      2
16065       // vector        7       2      2
16066 
16067       // The cycle RATIO of related operations are showed as a table above.
16068       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16069       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16070       // are 4, it is always profitable; but for 3 instrs patterns
16071       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16072       // So we should only do it for vector type.
16073       return IsAddOne && IsNeg ? VT.isVector() : true;
16074     }
16075   };
16076 
16077   EVT VT = N->getValueType(0);
16078   SDLoc DL(N);
16079 
16080   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16081   bool IsNeg = MulAmt.isNegative();
16082   APInt MulAmtAbs = MulAmt.abs();
16083 
16084   if ((MulAmtAbs - 1).isPowerOf2()) {
16085     // (mul x, 2^N + 1) => (add (shl x, N), x)
16086     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16087 
16088     if (!IsProfitable(IsNeg, true, VT))
16089       return SDValue();
16090 
16091     SDValue Op0 = N->getOperand(0);
16092     SDValue Op1 =
16093         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16094                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16095     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16096 
16097     if (!IsNeg)
16098       return Res;
16099 
16100     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16101   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16102     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16103     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16104 
16105     if (!IsProfitable(IsNeg, false, VT))
16106       return SDValue();
16107 
16108     SDValue Op0 = N->getOperand(0);
16109     SDValue Op1 =
16110         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16111                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16112 
16113     if (!IsNeg)
16114       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16115     else
16116       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16117 
16118   } else {
16119     return SDValue();
16120   }
16121 }
16122 
16123 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16124 // in combiner since we need to check SD flags and other subtarget features.
16125 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16126                                           DAGCombinerInfo &DCI) const {
16127   SDValue N0 = N->getOperand(0);
16128   SDValue N1 = N->getOperand(1);
16129   SDValue N2 = N->getOperand(2);
16130   SDNodeFlags Flags = N->getFlags();
16131   EVT VT = N->getValueType(0);
16132   SelectionDAG &DAG = DCI.DAG;
16133   const TargetOptions &Options = getTargetMachine().Options;
16134   unsigned Opc = N->getOpcode();
16135   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16136   bool LegalOps = !DCI.isBeforeLegalizeOps();
16137   SDLoc Loc(N);
16138 
16139   if (!isOperationLegal(ISD::FMA, VT))
16140     return SDValue();
16141 
16142   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16143   // since (fnmsub a b c)=-0 while c-ab=+0.
16144   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16145     return SDValue();
16146 
16147   // (fma (fneg a) b c) => (fnmsub a b c)
16148   // (fnmsub (fneg a) b c) => (fma a b c)
16149   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16150     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16151 
16152   // (fma a (fneg b) c) => (fnmsub a b c)
16153   // (fnmsub a (fneg b) c) => (fma a b c)
16154   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16155     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16156 
16157   return SDValue();
16158 }
16159 
16160 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16161   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16162   if (!Subtarget.is64BitELFABI())
16163     return false;
16164 
16165   // If not a tail call then no need to proceed.
16166   if (!CI->isTailCall())
16167     return false;
16168 
16169   // If sibling calls have been disabled and tail-calls aren't guaranteed
16170   // there is no reason to duplicate.
16171   auto &TM = getTargetMachine();
16172   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16173     return false;
16174 
16175   // Can't tail call a function called indirectly, or if it has variadic args.
16176   const Function *Callee = CI->getCalledFunction();
16177   if (!Callee || Callee->isVarArg())
16178     return false;
16179 
16180   // Make sure the callee and caller calling conventions are eligible for tco.
16181   const Function *Caller = CI->getParent()->getParent();
16182   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
16183                                            CI->getCallingConv()))
16184       return false;
16185 
16186   // If the function is local then we have a good chance at tail-calling it
16187   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
16188 }
16189 
16190 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
16191   if (!Subtarget.hasVSX())
16192     return false;
16193   if (Subtarget.hasP9Vector() && VT == MVT::f128)
16194     return true;
16195   return VT == MVT::f32 || VT == MVT::f64 ||
16196     VT == MVT::v4f32 || VT == MVT::v2f64;
16197 }
16198 
16199 bool PPCTargetLowering::
16200 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
16201   const Value *Mask = AndI.getOperand(1);
16202   // If the mask is suitable for andi. or andis. we should sink the and.
16203   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
16204     // Can't handle constants wider than 64-bits.
16205     if (CI->getBitWidth() > 64)
16206       return false;
16207     int64_t ConstVal = CI->getZExtValue();
16208     return isUInt<16>(ConstVal) ||
16209       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16210   }
16211 
16212   // For non-constant masks, we can always use the record-form and.
16213   return true;
16214 }
16215 
16216 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16217 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16218 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16219 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16220 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
16221 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16222   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
16223   assert(Subtarget.hasP9Altivec() &&
16224          "Only combine this when P9 altivec supported!");
16225   EVT VT = N->getValueType(0);
16226   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16227     return SDValue();
16228 
16229   SelectionDAG &DAG = DCI.DAG;
16230   SDLoc dl(N);
16231   if (N->getOperand(0).getOpcode() == ISD::SUB) {
16232     // Even for signed integers, if it's known to be positive (as signed
16233     // integer) due to zero-extended inputs.
16234     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16235     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16236     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16237          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16238         (SubOpcd1 == ISD::ZERO_EXTEND ||
16239          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16240       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16241                          N->getOperand(0)->getOperand(0),
16242                          N->getOperand(0)->getOperand(1),
16243                          DAG.getTargetConstant(0, dl, MVT::i32));
16244     }
16245 
16246     // For type v4i32, it can be optimized with xvnegsp + vabsduw
16247     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16248         N->getOperand(0).hasOneUse()) {
16249       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16250                          N->getOperand(0)->getOperand(0),
16251                          N->getOperand(0)->getOperand(1),
16252                          DAG.getTargetConstant(1, dl, MVT::i32));
16253     }
16254   }
16255 
16256   return SDValue();
16257 }
16258 
16259 // For type v4i32/v8ii16/v16i8, transform
16260 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16261 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16262 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16263 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
16264 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16265                                           DAGCombinerInfo &DCI) const {
16266   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
16267   assert(Subtarget.hasP9Altivec() &&
16268          "Only combine this when P9 altivec supported!");
16269 
16270   SelectionDAG &DAG = DCI.DAG;
16271   SDLoc dl(N);
16272   SDValue Cond = N->getOperand(0);
16273   SDValue TrueOpnd = N->getOperand(1);
16274   SDValue FalseOpnd = N->getOperand(2);
16275   EVT VT = N->getOperand(1).getValueType();
16276 
16277   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
16278       FalseOpnd.getOpcode() != ISD::SUB)
16279     return SDValue();
16280 
16281   // ABSD only available for type v4i32/v8i16/v16i8
16282   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16283     return SDValue();
16284 
16285   // At least to save one more dependent computation
16286   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
16287     return SDValue();
16288 
16289   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
16290 
16291   // Can only handle unsigned comparison here
16292   switch (CC) {
16293   default:
16294     return SDValue();
16295   case ISD::SETUGT:
16296   case ISD::SETUGE:
16297     break;
16298   case ISD::SETULT:
16299   case ISD::SETULE:
16300     std::swap(TrueOpnd, FalseOpnd);
16301     break;
16302   }
16303 
16304   SDValue CmpOpnd1 = Cond.getOperand(0);
16305   SDValue CmpOpnd2 = Cond.getOperand(1);
16306 
16307   // SETCC CmpOpnd1 CmpOpnd2 cond
16308   // TrueOpnd = CmpOpnd1 - CmpOpnd2
16309   // FalseOpnd = CmpOpnd2 - CmpOpnd1
16310   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
16311       TrueOpnd.getOperand(1) == CmpOpnd2 &&
16312       FalseOpnd.getOperand(0) == CmpOpnd2 &&
16313       FalseOpnd.getOperand(1) == CmpOpnd1) {
16314     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
16315                        CmpOpnd1, CmpOpnd2,
16316                        DAG.getTargetConstant(0, dl, MVT::i32));
16317   }
16318 
16319   return SDValue();
16320 }
16321