1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSectionXCOFF.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <list>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "ppc-lowering"
105 
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 
121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123 
124 static cl::opt<bool> EnablePPCPCRelTLS(
125     "enable-ppc-pcrel-tls",
126     cl::desc("enable the use of PC relative memops in TLS instructions on PPC"),
127     cl::Hidden);
128 
129 STATISTIC(NumTailCalls, "Number of tail calls");
130 STATISTIC(NumSiblingCalls, "Number of sibling calls");
131 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
132 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
133 
134 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
135 
136 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
137 
138 // FIXME: Remove this once the bug has been fixed!
139 extern cl::opt<bool> ANDIGlueBug;
140 
141 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
142                                      const PPCSubtarget &STI)
143     : TargetLowering(TM), Subtarget(STI) {
144   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
145   // arguments are at least 4/8 bytes aligned.
146   bool isPPC64 = Subtarget.isPPC64();
147   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
148 
149   // Set up the register classes.
150   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
151   if (!useSoftFloat()) {
152     if (hasSPE()) {
153       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
154       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
155     } else {
156       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
157       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
158     }
159   }
160 
161   // Match BITREVERSE to customized fast code sequence in the td file.
162   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
163   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
164 
165   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
166   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
167 
168   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
169   for (MVT VT : MVT::integer_valuetypes()) {
170     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
171     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
172   }
173 
174   if (Subtarget.isISA3_0()) {
175     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
176     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
177     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
178     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
179   } else {
180     // No extending loads from f16 or HW conversions back and forth.
181     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
182     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
183     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
184     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
185     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
186     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
187     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
188     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
189   }
190 
191   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
192 
193   // PowerPC has pre-inc load and store's.
194   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
195   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
196   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
197   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
198   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
199   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
200   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
201   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
202   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
203   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
204   if (!Subtarget.hasSPE()) {
205     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
206     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
207     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
208     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
209   }
210 
211   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
212   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
213   for (MVT VT : ScalarIntVTs) {
214     setOperationAction(ISD::ADDC, VT, Legal);
215     setOperationAction(ISD::ADDE, VT, Legal);
216     setOperationAction(ISD::SUBC, VT, Legal);
217     setOperationAction(ISD::SUBE, VT, Legal);
218   }
219 
220   if (Subtarget.useCRBits()) {
221     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
222 
223     if (isPPC64 || Subtarget.hasFPCVT()) {
224       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
225       AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
226                         isPPC64 ? MVT::i64 : MVT::i32);
227       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
228       AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
229                         isPPC64 ? MVT::i64 : MVT::i32);
230 
231       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
232       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
233                          isPPC64 ? MVT::i64 : MVT::i32);
234       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
235       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
236                         isPPC64 ? MVT::i64 : MVT::i32);
237     } else {
238       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
239       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
240       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
241       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
242     }
243 
244     // PowerPC does not support direct load/store of condition registers.
245     setOperationAction(ISD::LOAD, MVT::i1, Custom);
246     setOperationAction(ISD::STORE, MVT::i1, Custom);
247 
248     // FIXME: Remove this once the ANDI glue bug is fixed:
249     if (ANDIGlueBug)
250       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
251 
252     for (MVT VT : MVT::integer_valuetypes()) {
253       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
254       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
255       setTruncStoreAction(VT, MVT::i1, Expand);
256     }
257 
258     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
259   }
260 
261   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
262   // PPC (the libcall is not available).
263   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
264   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
265   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom);
266   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom);
267 
268   // We do not currently implement these libm ops for PowerPC.
269   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
270   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
271   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
272   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
273   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
274   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
275 
276   // PowerPC has no SREM/UREM instructions unless we are on P9
277   // On P9 we may use a hardware instruction to compute the remainder.
278   // When the result of both the remainder and the division is required it is
279   // more efficient to compute the remainder from the result of the division
280   // rather than use the remainder instruction. The instructions are legalized
281   // directly because the DivRemPairsPass performs the transformation at the IR
282   // level.
283   if (Subtarget.isISA3_0()) {
284     setOperationAction(ISD::SREM, MVT::i32, Legal);
285     setOperationAction(ISD::UREM, MVT::i32, Legal);
286     setOperationAction(ISD::SREM, MVT::i64, Legal);
287     setOperationAction(ISD::UREM, MVT::i64, Legal);
288   } else {
289     setOperationAction(ISD::SREM, MVT::i32, Expand);
290     setOperationAction(ISD::UREM, MVT::i32, Expand);
291     setOperationAction(ISD::SREM, MVT::i64, Expand);
292     setOperationAction(ISD::UREM, MVT::i64, Expand);
293   }
294 
295   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
296   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
297   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
298   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
299   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
300   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
301   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
302   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
303   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
304 
305   // Handle constrained floating-point operations of scalar.
306   // TODO: Handle SPE specific operation.
307   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
308   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
309   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
310   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
311   setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
312   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
313 
314   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
315   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
316   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
317   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
318   setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
319   if (Subtarget.hasVSX()) {
320     setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal);
321     setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal);
322   }
323 
324   if (Subtarget.hasFSQRT()) {
325     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
326     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
327   }
328 
329   if (Subtarget.hasFPRND()) {
330     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
331     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
332     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
333     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
334 
335     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
336     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
337     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
338     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
339   }
340 
341   // We don't support sin/cos/sqrt/fmod/pow
342   setOperationAction(ISD::FSIN , MVT::f64, Expand);
343   setOperationAction(ISD::FCOS , MVT::f64, Expand);
344   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
345   setOperationAction(ISD::FREM , MVT::f64, Expand);
346   setOperationAction(ISD::FPOW , MVT::f64, Expand);
347   setOperationAction(ISD::FSIN , MVT::f32, Expand);
348   setOperationAction(ISD::FCOS , MVT::f32, Expand);
349   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
350   setOperationAction(ISD::FREM , MVT::f32, Expand);
351   setOperationAction(ISD::FPOW , MVT::f32, Expand);
352   if (Subtarget.hasSPE()) {
353     setOperationAction(ISD::FMA  , MVT::f64, Expand);
354     setOperationAction(ISD::FMA  , MVT::f32, Expand);
355   } else {
356     setOperationAction(ISD::FMA  , MVT::f64, Legal);
357     setOperationAction(ISD::FMA  , MVT::f32, Legal);
358   }
359 
360   if (Subtarget.hasSPE())
361     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
362 
363   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
364 
365   // If we're enabling GP optimizations, use hardware square root
366   if (!Subtarget.hasFSQRT() &&
367       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
368         Subtarget.hasFRE()))
369     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
370 
371   if (!Subtarget.hasFSQRT() &&
372       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
373         Subtarget.hasFRES()))
374     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
375 
376   if (Subtarget.hasFCPSGN()) {
377     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
378     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
379   } else {
380     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
381     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
382   }
383 
384   if (Subtarget.hasFPRND()) {
385     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
386     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
387     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
388     setOperationAction(ISD::FROUND, MVT::f64, Legal);
389 
390     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
391     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
392     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
393     setOperationAction(ISD::FROUND, MVT::f32, Legal);
394   }
395 
396   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
397   // to speed up scalar BSWAP64.
398   // CTPOP or CTTZ were introduced in P8/P9 respectively
399   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
400   if (Subtarget.hasP9Vector())
401     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
402   else
403     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
404   if (Subtarget.isISA3_0()) {
405     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
406     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
407   } else {
408     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
409     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
410   }
411 
412   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
413     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
414     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
415   } else {
416     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
417     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
418   }
419 
420   // PowerPC does not have ROTR
421   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
422   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
423 
424   if (!Subtarget.useCRBits()) {
425     // PowerPC does not have Select
426     setOperationAction(ISD::SELECT, MVT::i32, Expand);
427     setOperationAction(ISD::SELECT, MVT::i64, Expand);
428     setOperationAction(ISD::SELECT, MVT::f32, Expand);
429     setOperationAction(ISD::SELECT, MVT::f64, Expand);
430   }
431 
432   // PowerPC wants to turn select_cc of FP into fsel when possible.
433   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
434   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
435 
436   // PowerPC wants to optimize integer setcc a bit
437   if (!Subtarget.useCRBits())
438     setOperationAction(ISD::SETCC, MVT::i32, Custom);
439 
440   if (Subtarget.hasFPU()) {
441     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
442     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
443     setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
444 
445     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
446     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
447     setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
448   }
449 
450   // PowerPC does not have BRCOND which requires SetCC
451   if (!Subtarget.useCRBits())
452     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
453 
454   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
455 
456   if (Subtarget.hasSPE()) {
457     // SPE has built-in conversions
458     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
459     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
460     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
461     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
462     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
463     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
464   } else {
465     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
466     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
467     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
468 
469     // PowerPC does not have [U|S]INT_TO_FP
470     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
471     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
472     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
473     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
474   }
475 
476   if (Subtarget.hasDirectMove() && isPPC64) {
477     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
478     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
479     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
480     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
481     if (TM.Options.UnsafeFPMath) {
482       setOperationAction(ISD::LRINT, MVT::f64, Legal);
483       setOperationAction(ISD::LRINT, MVT::f32, Legal);
484       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
485       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
486       setOperationAction(ISD::LROUND, MVT::f64, Legal);
487       setOperationAction(ISD::LROUND, MVT::f32, Legal);
488       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
489       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
490     }
491   } else {
492     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
493     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
494     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
495     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
496   }
497 
498   // We cannot sextinreg(i1).  Expand to shifts.
499   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
500 
501   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
502   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
503   // support continuation, user-level threading, and etc.. As a result, no
504   // other SjLj exception interfaces are implemented and please don't build
505   // your own exception handling based on them.
506   // LLVM/Clang supports zero-cost DWARF exception handling.
507   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
508   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
509 
510   // We want to legalize GlobalAddress and ConstantPool nodes into the
511   // appropriate instructions to materialize the address.
512   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
513   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
514   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
515   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
516   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
517   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
518   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
519   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
520   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
521   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
522 
523   // TRAP is legal.
524   setOperationAction(ISD::TRAP, MVT::Other, Legal);
525 
526   // TRAMPOLINE is custom lowered.
527   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
528   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
529 
530   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
531   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
532 
533   if (Subtarget.is64BitELFABI()) {
534     // VAARG always uses double-word chunks, so promote anything smaller.
535     setOperationAction(ISD::VAARG, MVT::i1, Promote);
536     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
537     setOperationAction(ISD::VAARG, MVT::i8, Promote);
538     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
539     setOperationAction(ISD::VAARG, MVT::i16, Promote);
540     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
541     setOperationAction(ISD::VAARG, MVT::i32, Promote);
542     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
543     setOperationAction(ISD::VAARG, MVT::Other, Expand);
544   } else if (Subtarget.is32BitELFABI()) {
545     // VAARG is custom lowered with the 32-bit SVR4 ABI.
546     setOperationAction(ISD::VAARG, MVT::Other, Custom);
547     setOperationAction(ISD::VAARG, MVT::i64, Custom);
548   } else
549     setOperationAction(ISD::VAARG, MVT::Other, Expand);
550 
551   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
552   if (Subtarget.is32BitELFABI())
553     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
554   else
555     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
556 
557   // Use the default implementation.
558   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
559   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
560   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
561   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
562   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
563   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
564   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
565   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
566   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
567 
568   // We want to custom lower some of our intrinsics.
569   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
570 
571   // To handle counter-based loop conditions.
572   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
573 
574   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
575   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
576   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
577   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
578 
579   // Comparisons that require checking two conditions.
580   if (Subtarget.hasSPE()) {
581     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
582     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
583     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
584     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
585   }
586   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
587   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
588   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
589   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
590   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
591   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
592   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
593   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
594   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
595   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
596   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
597   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
598 
599   if (Subtarget.has64BitSupport()) {
600     // They also have instructions for converting between i64 and fp.
601     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
602     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
603     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
604     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
605     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
606     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
607     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
608     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
609     // This is just the low 32 bits of a (signed) fp->i64 conversion.
610     // We cannot do this with Promote because i64 is not a legal type.
611     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
612     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
613 
614     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
615       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
616       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
617     }
618   } else {
619     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
620     if (Subtarget.hasSPE()) {
621       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
622       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
623     } else {
624       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
625       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
626     }
627   }
628 
629   // With the instructions enabled under FPCVT, we can do everything.
630   if (Subtarget.hasFPCVT()) {
631     if (Subtarget.has64BitSupport()) {
632       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
633       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
634       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
635       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
636       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
637       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
638       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
639       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
640     }
641 
642     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
643     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
644     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
645     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
646     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
647     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
648     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
649     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
650   }
651 
652   if (Subtarget.use64BitRegs()) {
653     // 64-bit PowerPC implementations can support i64 types directly
654     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
655     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
656     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
657     // 64-bit PowerPC wants to expand i128 shifts itself.
658     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
659     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
660     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
661   } else {
662     // 32-bit PowerPC wants to expand i64 shifts itself.
663     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
664     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
665     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
666   }
667 
668   // PowerPC has better expansions for funnel shifts than the generic
669   // TargetLowering::expandFunnelShift.
670   if (Subtarget.has64BitSupport()) {
671     setOperationAction(ISD::FSHL, MVT::i64, Custom);
672     setOperationAction(ISD::FSHR, MVT::i64, Custom);
673   }
674   setOperationAction(ISD::FSHL, MVT::i32, Custom);
675   setOperationAction(ISD::FSHR, MVT::i32, Custom);
676 
677   if (Subtarget.hasVSX()) {
678     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
679     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
680     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
681     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
682   }
683 
684   if (Subtarget.hasAltivec()) {
685     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
686       setOperationAction(ISD::SADDSAT, VT, Legal);
687       setOperationAction(ISD::SSUBSAT, VT, Legal);
688       setOperationAction(ISD::UADDSAT, VT, Legal);
689       setOperationAction(ISD::USUBSAT, VT, Legal);
690     }
691     // First set operation action for all vector types to expand. Then we
692     // will selectively turn on ones that can be effectively codegen'd.
693     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
694       // add/sub are legal for all supported vector VT's.
695       setOperationAction(ISD::ADD, VT, Legal);
696       setOperationAction(ISD::SUB, VT, Legal);
697 
698       // For v2i64, these are only valid with P8Vector. This is corrected after
699       // the loop.
700       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
701         setOperationAction(ISD::SMAX, VT, Legal);
702         setOperationAction(ISD::SMIN, VT, Legal);
703         setOperationAction(ISD::UMAX, VT, Legal);
704         setOperationAction(ISD::UMIN, VT, Legal);
705       }
706       else {
707         setOperationAction(ISD::SMAX, VT, Expand);
708         setOperationAction(ISD::SMIN, VT, Expand);
709         setOperationAction(ISD::UMAX, VT, Expand);
710         setOperationAction(ISD::UMIN, VT, Expand);
711       }
712 
713       if (Subtarget.hasVSX()) {
714         setOperationAction(ISD::FMAXNUM, VT, Legal);
715         setOperationAction(ISD::FMINNUM, VT, Legal);
716       }
717 
718       // Vector instructions introduced in P8
719       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
720         setOperationAction(ISD::CTPOP, VT, Legal);
721         setOperationAction(ISD::CTLZ, VT, Legal);
722       }
723       else {
724         setOperationAction(ISD::CTPOP, VT, Expand);
725         setOperationAction(ISD::CTLZ, VT, Expand);
726       }
727 
728       // Vector instructions introduced in P9
729       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
730         setOperationAction(ISD::CTTZ, VT, Legal);
731       else
732         setOperationAction(ISD::CTTZ, VT, Expand);
733 
734       // We promote all shuffles to v16i8.
735       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
736       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
737 
738       // We promote all non-typed operations to v4i32.
739       setOperationAction(ISD::AND   , VT, Promote);
740       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
741       setOperationAction(ISD::OR    , VT, Promote);
742       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
743       setOperationAction(ISD::XOR   , VT, Promote);
744       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
745       setOperationAction(ISD::LOAD  , VT, Promote);
746       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
747       setOperationAction(ISD::SELECT, VT, Promote);
748       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
749       setOperationAction(ISD::VSELECT, VT, Legal);
750       setOperationAction(ISD::SELECT_CC, VT, Promote);
751       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
752       setOperationAction(ISD::STORE, VT, Promote);
753       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
754 
755       // No other operations are legal.
756       setOperationAction(ISD::MUL , VT, Expand);
757       setOperationAction(ISD::SDIV, VT, Expand);
758       setOperationAction(ISD::SREM, VT, Expand);
759       setOperationAction(ISD::UDIV, VT, Expand);
760       setOperationAction(ISD::UREM, VT, Expand);
761       setOperationAction(ISD::FDIV, VT, Expand);
762       setOperationAction(ISD::FREM, VT, Expand);
763       setOperationAction(ISD::FNEG, VT, Expand);
764       setOperationAction(ISD::FSQRT, VT, Expand);
765       setOperationAction(ISD::FLOG, VT, Expand);
766       setOperationAction(ISD::FLOG10, VT, Expand);
767       setOperationAction(ISD::FLOG2, VT, Expand);
768       setOperationAction(ISD::FEXP, VT, Expand);
769       setOperationAction(ISD::FEXP2, VT, Expand);
770       setOperationAction(ISD::FSIN, VT, Expand);
771       setOperationAction(ISD::FCOS, VT, Expand);
772       setOperationAction(ISD::FABS, VT, Expand);
773       setOperationAction(ISD::FFLOOR, VT, Expand);
774       setOperationAction(ISD::FCEIL,  VT, Expand);
775       setOperationAction(ISD::FTRUNC, VT, Expand);
776       setOperationAction(ISD::FRINT,  VT, Expand);
777       setOperationAction(ISD::FNEARBYINT, VT, Expand);
778       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
779       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
780       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
781       setOperationAction(ISD::MULHU, VT, Expand);
782       setOperationAction(ISD::MULHS, VT, Expand);
783       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
784       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
785       setOperationAction(ISD::UDIVREM, VT, Expand);
786       setOperationAction(ISD::SDIVREM, VT, Expand);
787       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
788       setOperationAction(ISD::FPOW, VT, Expand);
789       setOperationAction(ISD::BSWAP, VT, Expand);
790       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
791       setOperationAction(ISD::ROTL, VT, Expand);
792       setOperationAction(ISD::ROTR, VT, Expand);
793 
794       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
795         setTruncStoreAction(VT, InnerVT, Expand);
796         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
797         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
798         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
799       }
800     }
801     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
802     if (!Subtarget.hasP8Vector()) {
803       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
804       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
805       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
806       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
807     }
808 
809     for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
810       setOperationAction(ISD::ABS, VT, Custom);
811 
812     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
813     // with merges, splats, etc.
814     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
815 
816     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
817     // are cheap, so handle them before they get expanded to scalar.
818     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
819     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
820     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
821     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
822     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
823 
824     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
825     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
826     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
827     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
828     setOperationAction(ISD::SELECT, MVT::v4i32,
829                        Subtarget.useCRBits() ? Legal : Expand);
830     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
831     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
832     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
833     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
834     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
835     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
836     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
837     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
838     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
839     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
840     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
841     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
842     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
843 
844     // Without hasP8Altivec set, v2i64 SMAX isn't available.
845     // But ABS custom lowering requires SMAX support.
846     if (!Subtarget.hasP8Altivec())
847       setOperationAction(ISD::ABS, MVT::v2i64, Expand);
848 
849     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
850     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
851     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
852     if (Subtarget.hasAltivec())
853       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
854         setOperationAction(ISD::ROTL, VT, Legal);
855     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
856     if (Subtarget.hasP8Altivec())
857       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
858 
859     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
860     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
861     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
862     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
863 
864     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
865     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
866 
867     if (Subtarget.hasVSX()) {
868       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
869       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
870     }
871 
872     if (Subtarget.hasP8Altivec())
873       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
874     else
875       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
876 
877     if (Subtarget.isISA3_1()) {
878       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
879       setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
880       setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
881       setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
882       setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
883       setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
884       setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
885       setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
886       setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
887       setOperationAction(ISD::UREM, MVT::v2i64, Legal);
888       setOperationAction(ISD::SREM, MVT::v2i64, Legal);
889       setOperationAction(ISD::UREM, MVT::v4i32, Legal);
890       setOperationAction(ISD::SREM, MVT::v4i32, Legal);
891     }
892 
893     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
894     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
895 
896     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
897     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
898 
899     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
900     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
901     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
902     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
903 
904     // Altivec does not contain unordered floating-point compare instructions
905     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
906     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
907     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
908     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
909 
910     if (Subtarget.hasVSX()) {
911       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
912       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
913       if (Subtarget.hasP8Vector()) {
914         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
915         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
916       }
917       if (Subtarget.hasDirectMove() && isPPC64) {
918         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
919         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
920         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
921         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
922         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
923         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
924         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
925         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
926       }
927       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
928 
929       // The nearbyint variants are not allowed to raise the inexact exception
930       // so we can only code-gen them with unsafe math.
931       if (TM.Options.UnsafeFPMath) {
932         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
933         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
934       }
935 
936       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
937       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
938       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
939       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
940       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
941       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
942       setOperationAction(ISD::FROUND, MVT::f64, Legal);
943       setOperationAction(ISD::FRINT, MVT::f64, Legal);
944 
945       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
946       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
947       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
948       setOperationAction(ISD::FROUND, MVT::f32, Legal);
949       setOperationAction(ISD::FRINT, MVT::f32, Legal);
950 
951       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
952       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
953 
954       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
955       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
956 
957       // Share the Altivec comparison restrictions.
958       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
959       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
960       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
961       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
962 
963       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
964       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
965 
966       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
967 
968       if (Subtarget.hasP8Vector())
969         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
970 
971       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
972 
973       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
974       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
975       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
976 
977       if (Subtarget.hasP8Altivec()) {
978         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
979         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
980         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
981 
982         // 128 bit shifts can be accomplished via 3 instructions for SHL and
983         // SRL, but not for SRA because of the instructions available:
984         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
985         // doing
986         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
987         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
988         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
989 
990         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
991       }
992       else {
993         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
994         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
995         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
996 
997         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
998 
999         // VSX v2i64 only supports non-arithmetic operations.
1000         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
1001         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
1002       }
1003 
1004       setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1005 
1006       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1007       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1008       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
1009       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1010 
1011       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
1012 
1013       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
1014       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
1015       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
1016       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
1017       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1018       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1019       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1020       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1021 
1022       // Custom handling for partial vectors of integers converted to
1023       // floating point. We already have optimal handling for v2i32 through
1024       // the DAG combine, so those aren't necessary.
1025       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
1026       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
1027       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
1028       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
1029       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
1030       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
1031       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
1032       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
1033       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
1034       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1035       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
1036       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1037       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
1038       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
1039       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
1040       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
1041 
1042       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1043       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1044       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1045       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1046       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1047       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
1048 
1049       if (Subtarget.hasDirectMove())
1050         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1051       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1052 
1053       // Handle constrained floating-point operations of vector.
1054       // The predictor is `hasVSX` because altivec instruction has
1055       // no exception but VSX vector instruction has.
1056       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1057       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1058       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1059       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1060       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1061       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1062       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1063       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1064       setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
1065       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1066       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
1067       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1068       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1069 
1070       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1071       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1072       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1073       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1074       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1075       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1076       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1077       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1078       setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
1079       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1080       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
1081       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1082       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1083 
1084       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1085     }
1086 
1087     if (Subtarget.hasP8Altivec()) {
1088       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1089       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1090     }
1091 
1092     if (Subtarget.hasP9Vector()) {
1093       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1094       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1095 
1096       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1097       // SRL, but not for SRA because of the instructions available:
1098       // VS{RL} and VS{RL}O.
1099       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1100       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1101       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1102 
1103       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1104       setOperationAction(ISD::FADD, MVT::f128, Legal);
1105       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1106       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1107       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1108       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1109       // No extending loads to f128 on PPC.
1110       for (MVT FPT : MVT::fp_valuetypes())
1111         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1112       setOperationAction(ISD::FMA, MVT::f128, Legal);
1113       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1114       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1115       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1116       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1117       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1118       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1119 
1120       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1121       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1122       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1123       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1124       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1125       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1126 
1127       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1128       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1129       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1130       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1131       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1132       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1133       // No implementation for these ops for PowerPC.
1134       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1135       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1136       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1137       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1138       setOperationAction(ISD::FREM, MVT::f128, Expand);
1139 
1140       // Handle constrained floating-point operations of fp128
1141       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1142       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1143       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1144       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1145       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1146       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1147       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1148       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1149       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1150       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1151       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1152       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1153       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1154       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1155       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1156       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1157       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1158       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1159       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1160       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1161     }
1162 
1163     if (Subtarget.hasP9Altivec()) {
1164       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1165       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1166 
1167       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1168       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1169       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1170       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1171       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1172       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1173       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1174     }
1175   }
1176 
1177   if (Subtarget.has64BitSupport())
1178     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1179 
1180   if (Subtarget.isISA3_1())
1181     setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1182 
1183   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1184 
1185   if (!isPPC64) {
1186     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1187     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1188   }
1189 
1190   setBooleanContents(ZeroOrOneBooleanContent);
1191 
1192   if (Subtarget.hasAltivec()) {
1193     // Altivec instructions set fields to all zeros or all ones.
1194     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1195   }
1196 
1197   if (!isPPC64) {
1198     // These libcalls are not available in 32-bit.
1199     setLibcallName(RTLIB::SHL_I128, nullptr);
1200     setLibcallName(RTLIB::SRL_I128, nullptr);
1201     setLibcallName(RTLIB::SRA_I128, nullptr);
1202   }
1203 
1204   if (!isPPC64)
1205     setMaxAtomicSizeInBitsSupported(32);
1206 
1207   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1208 
1209   // We have target-specific dag combine patterns for the following nodes:
1210   setTargetDAGCombine(ISD::ADD);
1211   setTargetDAGCombine(ISD::SHL);
1212   setTargetDAGCombine(ISD::SRA);
1213   setTargetDAGCombine(ISD::SRL);
1214   setTargetDAGCombine(ISD::MUL);
1215   setTargetDAGCombine(ISD::FMA);
1216   setTargetDAGCombine(ISD::SINT_TO_FP);
1217   setTargetDAGCombine(ISD::BUILD_VECTOR);
1218   if (Subtarget.hasFPCVT())
1219     setTargetDAGCombine(ISD::UINT_TO_FP);
1220   setTargetDAGCombine(ISD::LOAD);
1221   setTargetDAGCombine(ISD::STORE);
1222   setTargetDAGCombine(ISD::BR_CC);
1223   if (Subtarget.useCRBits())
1224     setTargetDAGCombine(ISD::BRCOND);
1225   setTargetDAGCombine(ISD::BSWAP);
1226   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1227   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1228   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1229 
1230   setTargetDAGCombine(ISD::SIGN_EXTEND);
1231   setTargetDAGCombine(ISD::ZERO_EXTEND);
1232   setTargetDAGCombine(ISD::ANY_EXTEND);
1233 
1234   setTargetDAGCombine(ISD::TRUNCATE);
1235   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1236 
1237 
1238   if (Subtarget.useCRBits()) {
1239     setTargetDAGCombine(ISD::TRUNCATE);
1240     setTargetDAGCombine(ISD::SETCC);
1241     setTargetDAGCombine(ISD::SELECT_CC);
1242   }
1243 
1244   if (Subtarget.hasP9Altivec()) {
1245     setTargetDAGCombine(ISD::ABS);
1246     setTargetDAGCombine(ISD::VSELECT);
1247   }
1248 
1249   setLibcallName(RTLIB::LOG_F128, "logf128");
1250   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1251   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1252   setLibcallName(RTLIB::EXP_F128, "expf128");
1253   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1254   setLibcallName(RTLIB::SIN_F128, "sinf128");
1255   setLibcallName(RTLIB::COS_F128, "cosf128");
1256   setLibcallName(RTLIB::POW_F128, "powf128");
1257   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1258   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1259   setLibcallName(RTLIB::POWI_F128, "__powikf2");
1260   setLibcallName(RTLIB::REM_F128, "fmodf128");
1261 
1262   // With 32 condition bits, we don't need to sink (and duplicate) compares
1263   // aggressively in CodeGenPrep.
1264   if (Subtarget.useCRBits()) {
1265     setHasMultipleConditionRegisters();
1266     setJumpIsExpensive();
1267   }
1268 
1269   setMinFunctionAlignment(Align(4));
1270 
1271   switch (Subtarget.getCPUDirective()) {
1272   default: break;
1273   case PPC::DIR_970:
1274   case PPC::DIR_A2:
1275   case PPC::DIR_E500:
1276   case PPC::DIR_E500mc:
1277   case PPC::DIR_E5500:
1278   case PPC::DIR_PWR4:
1279   case PPC::DIR_PWR5:
1280   case PPC::DIR_PWR5X:
1281   case PPC::DIR_PWR6:
1282   case PPC::DIR_PWR6X:
1283   case PPC::DIR_PWR7:
1284   case PPC::DIR_PWR8:
1285   case PPC::DIR_PWR9:
1286   case PPC::DIR_PWR10:
1287   case PPC::DIR_PWR_FUTURE:
1288     setPrefLoopAlignment(Align(16));
1289     setPrefFunctionAlignment(Align(16));
1290     break;
1291   }
1292 
1293   if (Subtarget.enableMachineScheduler())
1294     setSchedulingPreference(Sched::Source);
1295   else
1296     setSchedulingPreference(Sched::Hybrid);
1297 
1298   computeRegisterProperties(STI.getRegisterInfo());
1299 
1300   // The Freescale cores do better with aggressive inlining of memcpy and
1301   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1302   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1303       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1304     MaxStoresPerMemset = 32;
1305     MaxStoresPerMemsetOptSize = 16;
1306     MaxStoresPerMemcpy = 32;
1307     MaxStoresPerMemcpyOptSize = 8;
1308     MaxStoresPerMemmove = 32;
1309     MaxStoresPerMemmoveOptSize = 8;
1310   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1311     // The A2 also benefits from (very) aggressive inlining of memcpy and
1312     // friends. The overhead of a the function call, even when warm, can be
1313     // over one hundred cycles.
1314     MaxStoresPerMemset = 128;
1315     MaxStoresPerMemcpy = 128;
1316     MaxStoresPerMemmove = 128;
1317     MaxLoadsPerMemcmp = 128;
1318   } else {
1319     MaxLoadsPerMemcmp = 8;
1320     MaxLoadsPerMemcmpOptSize = 4;
1321   }
1322 
1323   IsStrictFPEnabled = true;
1324 
1325   // Let the subtarget (CPU) decide if a predictable select is more expensive
1326   // than the corresponding branch. This information is used in CGP to decide
1327   // when to convert selects into branches.
1328   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1329 }
1330 
1331 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1332 /// the desired ByVal argument alignment.
1333 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1334   if (MaxAlign == MaxMaxAlign)
1335     return;
1336   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1337     if (MaxMaxAlign >= 32 &&
1338         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1339       MaxAlign = Align(32);
1340     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1341              MaxAlign < 16)
1342       MaxAlign = Align(16);
1343   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1344     Align EltAlign;
1345     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1346     if (EltAlign > MaxAlign)
1347       MaxAlign = EltAlign;
1348   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1349     for (auto *EltTy : STy->elements()) {
1350       Align EltAlign;
1351       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1352       if (EltAlign > MaxAlign)
1353         MaxAlign = EltAlign;
1354       if (MaxAlign == MaxMaxAlign)
1355         break;
1356     }
1357   }
1358 }
1359 
1360 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1361 /// function arguments in the caller parameter area.
1362 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1363                                                   const DataLayout &DL) const {
1364   // 16byte and wider vectors are passed on 16byte boundary.
1365   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1366   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1367   if (Subtarget.hasAltivec())
1368     getMaxByValAlign(Ty, Alignment, Align(16));
1369   return Alignment.value();
1370 }
1371 
1372 bool PPCTargetLowering::useSoftFloat() const {
1373   return Subtarget.useSoftFloat();
1374 }
1375 
1376 bool PPCTargetLowering::hasSPE() const {
1377   return Subtarget.hasSPE();
1378 }
1379 
1380 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1381   return VT.isScalarInteger();
1382 }
1383 
1384 /// isMulhCheaperThanMulShift - Return true if a mulh[s|u] node for a specific
1385 /// type is cheaper than a multiply followed by a shift.
1386 /// This is true for words and doublewords on 64-bit PowerPC.
1387 bool PPCTargetLowering::isMulhCheaperThanMulShift(EVT Type) const {
1388   if (Subtarget.isPPC64() && (isOperationLegal(ISD::MULHS, Type) ||
1389                               isOperationLegal(ISD::MULHU, Type)))
1390     return true;
1391   return TargetLowering::isMulhCheaperThanMulShift(Type);
1392 }
1393 
1394 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1395   switch ((PPCISD::NodeType)Opcode) {
1396   case PPCISD::FIRST_NUMBER:    break;
1397   case PPCISD::FSEL:            return "PPCISD::FSEL";
1398   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1399   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1400   case PPCISD::FCFID:           return "PPCISD::FCFID";
1401   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1402   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1403   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1404   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1405   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1406   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1407   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1408   case PPCISD::FP_TO_UINT_IN_VSR:
1409                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1410   case PPCISD::FP_TO_SINT_IN_VSR:
1411                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1412   case PPCISD::FRE:             return "PPCISD::FRE";
1413   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1414   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1415   case PPCISD::VPERM:           return "PPCISD::VPERM";
1416   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1417   case PPCISD::XXSPLTI_SP_TO_DP:
1418     return "PPCISD::XXSPLTI_SP_TO_DP";
1419   case PPCISD::XXSPLTI32DX:
1420     return "PPCISD::XXSPLTI32DX";
1421   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1422   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1423   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1424   case PPCISD::CMPB:            return "PPCISD::CMPB";
1425   case PPCISD::Hi:              return "PPCISD::Hi";
1426   case PPCISD::Lo:              return "PPCISD::Lo";
1427   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1428   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1429   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1430   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1431   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1432   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1433   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1434   case PPCISD::SRL:             return "PPCISD::SRL";
1435   case PPCISD::SRA:             return "PPCISD::SRA";
1436   case PPCISD::SHL:             return "PPCISD::SHL";
1437   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1438   case PPCISD::CALL:            return "PPCISD::CALL";
1439   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1440   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1441   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1442   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1443   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1444   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1445   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1446   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1447   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1448   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1449   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1450   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1451   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1452   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1453   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1454   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1455     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1456   case PPCISD::ANDI_rec_1_EQ_BIT:
1457     return "PPCISD::ANDI_rec_1_EQ_BIT";
1458   case PPCISD::ANDI_rec_1_GT_BIT:
1459     return "PPCISD::ANDI_rec_1_GT_BIT";
1460   case PPCISD::VCMP:            return "PPCISD::VCMP";
1461   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1462   case PPCISD::LBRX:            return "PPCISD::LBRX";
1463   case PPCISD::STBRX:           return "PPCISD::STBRX";
1464   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1465   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1466   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1467   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1468   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1469   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1470   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1471   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1472   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1473   case PPCISD::ST_VSR_SCAL_INT:
1474                                 return "PPCISD::ST_VSR_SCAL_INT";
1475   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1476   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1477   case PPCISD::BDZ:             return "PPCISD::BDZ";
1478   case PPCISD::MFFS:            return "PPCISD::MFFS";
1479   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1480   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1481   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1482   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1483   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1484   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1485   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1486   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1487   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1488   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1489   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1490   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1491   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1492   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1493   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1494   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1495   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1496   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1497   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1498   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1499   case PPCISD::SC:              return "PPCISD::SC";
1500   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1501   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1502   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1503   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1504   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1505   case PPCISD::VABSD:           return "PPCISD::VABSD";
1506   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1507   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1508   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1509   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1510   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1511   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1512   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1513   case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
1514     return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1515   case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR:
1516     return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1517   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1518   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1519   case PPCISD::STRICT_FADDRTZ:
1520     return "PPCISD::STRICT_FADDRTZ";
1521   case PPCISD::STRICT_FCTIDZ:
1522     return "PPCISD::STRICT_FCTIDZ";
1523   case PPCISD::STRICT_FCTIWZ:
1524     return "PPCISD::STRICT_FCTIWZ";
1525   case PPCISD::STRICT_FCTIDUZ:
1526     return "PPCISD::STRICT_FCTIDUZ";
1527   case PPCISD::STRICT_FCTIWUZ:
1528     return "PPCISD::STRICT_FCTIWUZ";
1529   case PPCISD::STRICT_FCFID:
1530     return "PPCISD::STRICT_FCFID";
1531   case PPCISD::STRICT_FCFIDU:
1532     return "PPCISD::STRICT_FCFIDU";
1533   case PPCISD::STRICT_FCFIDS:
1534     return "PPCISD::STRICT_FCFIDS";
1535   case PPCISD::STRICT_FCFIDUS:
1536     return "PPCISD::STRICT_FCFIDUS";
1537   case PPCISD::LXVRZX:          return "PPCISD::LXVRZX";
1538   }
1539   return nullptr;
1540 }
1541 
1542 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1543                                           EVT VT) const {
1544   if (!VT.isVector())
1545     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1546 
1547   return VT.changeVectorElementTypeToInteger();
1548 }
1549 
1550 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1551   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1552   return true;
1553 }
1554 
1555 //===----------------------------------------------------------------------===//
1556 // Node matching predicates, for use by the tblgen matching code.
1557 //===----------------------------------------------------------------------===//
1558 
1559 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1560 static bool isFloatingPointZero(SDValue Op) {
1561   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1562     return CFP->getValueAPF().isZero();
1563   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1564     // Maybe this has already been legalized into the constant pool?
1565     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1566       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1567         return CFP->getValueAPF().isZero();
1568   }
1569   return false;
1570 }
1571 
1572 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1573 /// true if Op is undef or if it matches the specified value.
1574 static bool isConstantOrUndef(int Op, int Val) {
1575   return Op < 0 || Op == Val;
1576 }
1577 
1578 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1579 /// VPKUHUM instruction.
1580 /// The ShuffleKind distinguishes between big-endian operations with
1581 /// two different inputs (0), either-endian operations with two identical
1582 /// inputs (1), and little-endian operations with two different inputs (2).
1583 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1584 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1585                                SelectionDAG &DAG) {
1586   bool IsLE = DAG.getDataLayout().isLittleEndian();
1587   if (ShuffleKind == 0) {
1588     if (IsLE)
1589       return false;
1590     for (unsigned i = 0; i != 16; ++i)
1591       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1592         return false;
1593   } else if (ShuffleKind == 2) {
1594     if (!IsLE)
1595       return false;
1596     for (unsigned i = 0; i != 16; ++i)
1597       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1598         return false;
1599   } else if (ShuffleKind == 1) {
1600     unsigned j = IsLE ? 0 : 1;
1601     for (unsigned i = 0; i != 8; ++i)
1602       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1603           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1604         return false;
1605   }
1606   return true;
1607 }
1608 
1609 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1610 /// VPKUWUM instruction.
1611 /// The ShuffleKind distinguishes between big-endian operations with
1612 /// two different inputs (0), either-endian operations with two identical
1613 /// inputs (1), and little-endian operations with two different inputs (2).
1614 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1615 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1616                                SelectionDAG &DAG) {
1617   bool IsLE = DAG.getDataLayout().isLittleEndian();
1618   if (ShuffleKind == 0) {
1619     if (IsLE)
1620       return false;
1621     for (unsigned i = 0; i != 16; i += 2)
1622       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1623           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1624         return false;
1625   } else if (ShuffleKind == 2) {
1626     if (!IsLE)
1627       return false;
1628     for (unsigned i = 0; i != 16; i += 2)
1629       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1630           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1631         return false;
1632   } else if (ShuffleKind == 1) {
1633     unsigned j = IsLE ? 0 : 2;
1634     for (unsigned i = 0; i != 8; i += 2)
1635       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1636           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1637           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1638           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1639         return false;
1640   }
1641   return true;
1642 }
1643 
1644 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1645 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1646 /// current subtarget.
1647 ///
1648 /// The ShuffleKind distinguishes between big-endian operations with
1649 /// two different inputs (0), either-endian operations with two identical
1650 /// inputs (1), and little-endian operations with two different inputs (2).
1651 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1652 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1653                                SelectionDAG &DAG) {
1654   const PPCSubtarget& Subtarget =
1655       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1656   if (!Subtarget.hasP8Vector())
1657     return false;
1658 
1659   bool IsLE = DAG.getDataLayout().isLittleEndian();
1660   if (ShuffleKind == 0) {
1661     if (IsLE)
1662       return false;
1663     for (unsigned i = 0; i != 16; i += 4)
1664       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1665           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1666           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1667           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1668         return false;
1669   } else if (ShuffleKind == 2) {
1670     if (!IsLE)
1671       return false;
1672     for (unsigned i = 0; i != 16; i += 4)
1673       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1674           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1675           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1676           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1677         return false;
1678   } else if (ShuffleKind == 1) {
1679     unsigned j = IsLE ? 0 : 4;
1680     for (unsigned i = 0; i != 8; i += 4)
1681       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1682           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1683           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1684           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1685           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1686           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1687           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1688           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1689         return false;
1690   }
1691   return true;
1692 }
1693 
1694 /// isVMerge - Common function, used to match vmrg* shuffles.
1695 ///
1696 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1697                      unsigned LHSStart, unsigned RHSStart) {
1698   if (N->getValueType(0) != MVT::v16i8)
1699     return false;
1700   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1701          "Unsupported merge size!");
1702 
1703   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1704     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1705       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1706                              LHSStart+j+i*UnitSize) ||
1707           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1708                              RHSStart+j+i*UnitSize))
1709         return false;
1710     }
1711   return true;
1712 }
1713 
1714 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1715 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1716 /// The ShuffleKind distinguishes between big-endian merges with two
1717 /// different inputs (0), either-endian merges with two identical inputs (1),
1718 /// and little-endian merges with two different inputs (2).  For the latter,
1719 /// the input operands are swapped (see PPCInstrAltivec.td).
1720 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1721                              unsigned ShuffleKind, SelectionDAG &DAG) {
1722   if (DAG.getDataLayout().isLittleEndian()) {
1723     if (ShuffleKind == 1) // unary
1724       return isVMerge(N, UnitSize, 0, 0);
1725     else if (ShuffleKind == 2) // swapped
1726       return isVMerge(N, UnitSize, 0, 16);
1727     else
1728       return false;
1729   } else {
1730     if (ShuffleKind == 1) // unary
1731       return isVMerge(N, UnitSize, 8, 8);
1732     else if (ShuffleKind == 0) // normal
1733       return isVMerge(N, UnitSize, 8, 24);
1734     else
1735       return false;
1736   }
1737 }
1738 
1739 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1740 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1741 /// The ShuffleKind distinguishes between big-endian merges with two
1742 /// different inputs (0), either-endian merges with two identical inputs (1),
1743 /// and little-endian merges with two different inputs (2).  For the latter,
1744 /// the input operands are swapped (see PPCInstrAltivec.td).
1745 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1746                              unsigned ShuffleKind, SelectionDAG &DAG) {
1747   if (DAG.getDataLayout().isLittleEndian()) {
1748     if (ShuffleKind == 1) // unary
1749       return isVMerge(N, UnitSize, 8, 8);
1750     else if (ShuffleKind == 2) // swapped
1751       return isVMerge(N, UnitSize, 8, 24);
1752     else
1753       return false;
1754   } else {
1755     if (ShuffleKind == 1) // unary
1756       return isVMerge(N, UnitSize, 0, 0);
1757     else if (ShuffleKind == 0) // normal
1758       return isVMerge(N, UnitSize, 0, 16);
1759     else
1760       return false;
1761   }
1762 }
1763 
1764 /**
1765  * Common function used to match vmrgew and vmrgow shuffles
1766  *
1767  * The indexOffset determines whether to look for even or odd words in
1768  * the shuffle mask. This is based on the of the endianness of the target
1769  * machine.
1770  *   - Little Endian:
1771  *     - Use offset of 0 to check for odd elements
1772  *     - Use offset of 4 to check for even elements
1773  *   - Big Endian:
1774  *     - Use offset of 0 to check for even elements
1775  *     - Use offset of 4 to check for odd elements
1776  * A detailed description of the vector element ordering for little endian and
1777  * big endian can be found at
1778  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1779  * Targeting your applications - what little endian and big endian IBM XL C/C++
1780  * compiler differences mean to you
1781  *
1782  * The mask to the shuffle vector instruction specifies the indices of the
1783  * elements from the two input vectors to place in the result. The elements are
1784  * numbered in array-access order, starting with the first vector. These vectors
1785  * are always of type v16i8, thus each vector will contain 16 elements of size
1786  * 8. More info on the shuffle vector can be found in the
1787  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1788  * Language Reference.
1789  *
1790  * The RHSStartValue indicates whether the same input vectors are used (unary)
1791  * or two different input vectors are used, based on the following:
1792  *   - If the instruction uses the same vector for both inputs, the range of the
1793  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1794  *     be 0.
1795  *   - If the instruction has two different vectors then the range of the
1796  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1797  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1798  *     to 31 specify elements in the second vector).
1799  *
1800  * \param[in] N The shuffle vector SD Node to analyze
1801  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1802  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1803  * vector to the shuffle_vector instruction
1804  * \return true iff this shuffle vector represents an even or odd word merge
1805  */
1806 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1807                      unsigned RHSStartValue) {
1808   if (N->getValueType(0) != MVT::v16i8)
1809     return false;
1810 
1811   for (unsigned i = 0; i < 2; ++i)
1812     for (unsigned j = 0; j < 4; ++j)
1813       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1814                              i*RHSStartValue+j+IndexOffset) ||
1815           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1816                              i*RHSStartValue+j+IndexOffset+8))
1817         return false;
1818   return true;
1819 }
1820 
1821 /**
1822  * Determine if the specified shuffle mask is suitable for the vmrgew or
1823  * vmrgow instructions.
1824  *
1825  * \param[in] N The shuffle vector SD Node to analyze
1826  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1827  * \param[in] ShuffleKind Identify the type of merge:
1828  *   - 0 = big-endian merge with two different inputs;
1829  *   - 1 = either-endian merge with two identical inputs;
1830  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1831  *     little-endian merges).
1832  * \param[in] DAG The current SelectionDAG
1833  * \return true iff this shuffle mask
1834  */
1835 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1836                               unsigned ShuffleKind, SelectionDAG &DAG) {
1837   if (DAG.getDataLayout().isLittleEndian()) {
1838     unsigned indexOffset = CheckEven ? 4 : 0;
1839     if (ShuffleKind == 1) // Unary
1840       return isVMerge(N, indexOffset, 0);
1841     else if (ShuffleKind == 2) // swapped
1842       return isVMerge(N, indexOffset, 16);
1843     else
1844       return false;
1845   }
1846   else {
1847     unsigned indexOffset = CheckEven ? 0 : 4;
1848     if (ShuffleKind == 1) // Unary
1849       return isVMerge(N, indexOffset, 0);
1850     else if (ShuffleKind == 0) // Normal
1851       return isVMerge(N, indexOffset, 16);
1852     else
1853       return false;
1854   }
1855   return false;
1856 }
1857 
1858 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1859 /// amount, otherwise return -1.
1860 /// The ShuffleKind distinguishes between big-endian operations with two
1861 /// different inputs (0), either-endian operations with two identical inputs
1862 /// (1), and little-endian operations with two different inputs (2).  For the
1863 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1864 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1865                              SelectionDAG &DAG) {
1866   if (N->getValueType(0) != MVT::v16i8)
1867     return -1;
1868 
1869   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1870 
1871   // Find the first non-undef value in the shuffle mask.
1872   unsigned i;
1873   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1874     /*search*/;
1875 
1876   if (i == 16) return -1;  // all undef.
1877 
1878   // Otherwise, check to see if the rest of the elements are consecutively
1879   // numbered from this value.
1880   unsigned ShiftAmt = SVOp->getMaskElt(i);
1881   if (ShiftAmt < i) return -1;
1882 
1883   ShiftAmt -= i;
1884   bool isLE = DAG.getDataLayout().isLittleEndian();
1885 
1886   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1887     // Check the rest of the elements to see if they are consecutive.
1888     for (++i; i != 16; ++i)
1889       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1890         return -1;
1891   } else if (ShuffleKind == 1) {
1892     // Check the rest of the elements to see if they are consecutive.
1893     for (++i; i != 16; ++i)
1894       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1895         return -1;
1896   } else
1897     return -1;
1898 
1899   if (isLE)
1900     ShiftAmt = 16 - ShiftAmt;
1901 
1902   return ShiftAmt;
1903 }
1904 
1905 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1906 /// specifies a splat of a single element that is suitable for input to
1907 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1908 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1909   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1910          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1911 
1912   // The consecutive indices need to specify an element, not part of two
1913   // different elements.  So abandon ship early if this isn't the case.
1914   if (N->getMaskElt(0) % EltSize != 0)
1915     return false;
1916 
1917   // This is a splat operation if each element of the permute is the same, and
1918   // if the value doesn't reference the second vector.
1919   unsigned ElementBase = N->getMaskElt(0);
1920 
1921   // FIXME: Handle UNDEF elements too!
1922   if (ElementBase >= 16)
1923     return false;
1924 
1925   // Check that the indices are consecutive, in the case of a multi-byte element
1926   // splatted with a v16i8 mask.
1927   for (unsigned i = 1; i != EltSize; ++i)
1928     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1929       return false;
1930 
1931   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1932     if (N->getMaskElt(i) < 0) continue;
1933     for (unsigned j = 0; j != EltSize; ++j)
1934       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1935         return false;
1936   }
1937   return true;
1938 }
1939 
1940 /// Check that the mask is shuffling N byte elements. Within each N byte
1941 /// element of the mask, the indices could be either in increasing or
1942 /// decreasing order as long as they are consecutive.
1943 /// \param[in] N the shuffle vector SD Node to analyze
1944 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1945 /// Word/DoubleWord/QuadWord).
1946 /// \param[in] StepLen the delta indices number among the N byte element, if
1947 /// the mask is in increasing/decreasing order then it is 1/-1.
1948 /// \return true iff the mask is shuffling N byte elements.
1949 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1950                                    int StepLen) {
1951   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1952          "Unexpected element width.");
1953   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1954 
1955   unsigned NumOfElem = 16 / Width;
1956   unsigned MaskVal[16]; //  Width is never greater than 16
1957   for (unsigned i = 0; i < NumOfElem; ++i) {
1958     MaskVal[0] = N->getMaskElt(i * Width);
1959     if ((StepLen == 1) && (MaskVal[0] % Width)) {
1960       return false;
1961     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1962       return false;
1963     }
1964 
1965     for (unsigned int j = 1; j < Width; ++j) {
1966       MaskVal[j] = N->getMaskElt(i * Width + j);
1967       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1968         return false;
1969       }
1970     }
1971   }
1972 
1973   return true;
1974 }
1975 
1976 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1977                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1978   if (!isNByteElemShuffleMask(N, 4, 1))
1979     return false;
1980 
1981   // Now we look at mask elements 0,4,8,12
1982   unsigned M0 = N->getMaskElt(0) / 4;
1983   unsigned M1 = N->getMaskElt(4) / 4;
1984   unsigned M2 = N->getMaskElt(8) / 4;
1985   unsigned M3 = N->getMaskElt(12) / 4;
1986   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1987   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1988 
1989   // Below, let H and L be arbitrary elements of the shuffle mask
1990   // where H is in the range [4,7] and L is in the range [0,3].
1991   // H, 1, 2, 3 or L, 5, 6, 7
1992   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1993       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1994     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1995     InsertAtByte = IsLE ? 12 : 0;
1996     Swap = M0 < 4;
1997     return true;
1998   }
1999   // 0, H, 2, 3 or 4, L, 6, 7
2000   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2001       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2002     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2003     InsertAtByte = IsLE ? 8 : 4;
2004     Swap = M1 < 4;
2005     return true;
2006   }
2007   // 0, 1, H, 3 or 4, 5, L, 7
2008   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2009       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2010     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2011     InsertAtByte = IsLE ? 4 : 8;
2012     Swap = M2 < 4;
2013     return true;
2014   }
2015   // 0, 1, 2, H or 4, 5, 6, L
2016   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2017       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2018     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2019     InsertAtByte = IsLE ? 0 : 12;
2020     Swap = M3 < 4;
2021     return true;
2022   }
2023 
2024   // If both vector operands for the shuffle are the same vector, the mask will
2025   // contain only elements from the first one and the second one will be undef.
2026   if (N->getOperand(1).isUndef()) {
2027     ShiftElts = 0;
2028     Swap = true;
2029     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2030     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2031       InsertAtByte = IsLE ? 12 : 0;
2032       return true;
2033     }
2034     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2035       InsertAtByte = IsLE ? 8 : 4;
2036       return true;
2037     }
2038     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2039       InsertAtByte = IsLE ? 4 : 8;
2040       return true;
2041     }
2042     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2043       InsertAtByte = IsLE ? 0 : 12;
2044       return true;
2045     }
2046   }
2047 
2048   return false;
2049 }
2050 
2051 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2052                                bool &Swap, bool IsLE) {
2053   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2054   // Ensure each byte index of the word is consecutive.
2055   if (!isNByteElemShuffleMask(N, 4, 1))
2056     return false;
2057 
2058   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2059   unsigned M0 = N->getMaskElt(0) / 4;
2060   unsigned M1 = N->getMaskElt(4) / 4;
2061   unsigned M2 = N->getMaskElt(8) / 4;
2062   unsigned M3 = N->getMaskElt(12) / 4;
2063 
2064   // If both vector operands for the shuffle are the same vector, the mask will
2065   // contain only elements from the first one and the second one will be undef.
2066   if (N->getOperand(1).isUndef()) {
2067     assert(M0 < 4 && "Indexing into an undef vector?");
2068     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2069       return false;
2070 
2071     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2072     Swap = false;
2073     return true;
2074   }
2075 
2076   // Ensure each word index of the ShuffleVector Mask is consecutive.
2077   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2078     return false;
2079 
2080   if (IsLE) {
2081     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2082       // Input vectors don't need to be swapped if the leading element
2083       // of the result is one of the 3 left elements of the second vector
2084       // (or if there is no shift to be done at all).
2085       Swap = false;
2086       ShiftElts = (8 - M0) % 8;
2087     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2088       // Input vectors need to be swapped if the leading element
2089       // of the result is one of the 3 left elements of the first vector
2090       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2091       Swap = true;
2092       ShiftElts = (4 - M0) % 4;
2093     }
2094 
2095     return true;
2096   } else {                                          // BE
2097     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2098       // Input vectors don't need to be swapped if the leading element
2099       // of the result is one of the 4 elements of the first vector.
2100       Swap = false;
2101       ShiftElts = M0;
2102     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2103       // Input vectors need to be swapped if the leading element
2104       // of the result is one of the 4 elements of the right vector.
2105       Swap = true;
2106       ShiftElts = M0 - 4;
2107     }
2108 
2109     return true;
2110   }
2111 }
2112 
2113 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2114   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2115 
2116   if (!isNByteElemShuffleMask(N, Width, -1))
2117     return false;
2118 
2119   for (int i = 0; i < 16; i += Width)
2120     if (N->getMaskElt(i) != i + Width - 1)
2121       return false;
2122 
2123   return true;
2124 }
2125 
2126 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2127   return isXXBRShuffleMaskHelper(N, 2);
2128 }
2129 
2130 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2131   return isXXBRShuffleMaskHelper(N, 4);
2132 }
2133 
2134 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2135   return isXXBRShuffleMaskHelper(N, 8);
2136 }
2137 
2138 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2139   return isXXBRShuffleMaskHelper(N, 16);
2140 }
2141 
2142 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2143 /// if the inputs to the instruction should be swapped and set \p DM to the
2144 /// value for the immediate.
2145 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2146 /// AND element 0 of the result comes from the first input (LE) or second input
2147 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2148 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2149 /// mask.
2150 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2151                                bool &Swap, bool IsLE) {
2152   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2153 
2154   // Ensure each byte index of the double word is consecutive.
2155   if (!isNByteElemShuffleMask(N, 8, 1))
2156     return false;
2157 
2158   unsigned M0 = N->getMaskElt(0) / 8;
2159   unsigned M1 = N->getMaskElt(8) / 8;
2160   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2161 
2162   // If both vector operands for the shuffle are the same vector, the mask will
2163   // contain only elements from the first one and the second one will be undef.
2164   if (N->getOperand(1).isUndef()) {
2165     if ((M0 | M1) < 2) {
2166       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2167       Swap = false;
2168       return true;
2169     } else
2170       return false;
2171   }
2172 
2173   if (IsLE) {
2174     if (M0 > 1 && M1 < 2) {
2175       Swap = false;
2176     } else if (M0 < 2 && M1 > 1) {
2177       M0 = (M0 + 2) % 4;
2178       M1 = (M1 + 2) % 4;
2179       Swap = true;
2180     } else
2181       return false;
2182 
2183     // Note: if control flow comes here that means Swap is already set above
2184     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2185     return true;
2186   } else { // BE
2187     if (M0 < 2 && M1 > 1) {
2188       Swap = false;
2189     } else if (M0 > 1 && M1 < 2) {
2190       M0 = (M0 + 2) % 4;
2191       M1 = (M1 + 2) % 4;
2192       Swap = true;
2193     } else
2194       return false;
2195 
2196     // Note: if control flow comes here that means Swap is already set above
2197     DM = (M0 << 1) + (M1 & 1);
2198     return true;
2199   }
2200 }
2201 
2202 
2203 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2204 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2205 /// elements are counted from the left of the vector register).
2206 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2207                                          SelectionDAG &DAG) {
2208   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2209   assert(isSplatShuffleMask(SVOp, EltSize));
2210   if (DAG.getDataLayout().isLittleEndian())
2211     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2212   else
2213     return SVOp->getMaskElt(0) / EltSize;
2214 }
2215 
2216 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2217 /// by using a vspltis[bhw] instruction of the specified element size, return
2218 /// the constant being splatted.  The ByteSize field indicates the number of
2219 /// bytes of each element [124] -> [bhw].
2220 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2221   SDValue OpVal(nullptr, 0);
2222 
2223   // If ByteSize of the splat is bigger than the element size of the
2224   // build_vector, then we have a case where we are checking for a splat where
2225   // multiple elements of the buildvector are folded together into a single
2226   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2227   unsigned EltSize = 16/N->getNumOperands();
2228   if (EltSize < ByteSize) {
2229     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2230     SDValue UniquedVals[4];
2231     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2232 
2233     // See if all of the elements in the buildvector agree across.
2234     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2235       if (N->getOperand(i).isUndef()) continue;
2236       // If the element isn't a constant, bail fully out.
2237       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2238 
2239       if (!UniquedVals[i&(Multiple-1)].getNode())
2240         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2241       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2242         return SDValue();  // no match.
2243     }
2244 
2245     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2246     // either constant or undef values that are identical for each chunk.  See
2247     // if these chunks can form into a larger vspltis*.
2248 
2249     // Check to see if all of the leading entries are either 0 or -1.  If
2250     // neither, then this won't fit into the immediate field.
2251     bool LeadingZero = true;
2252     bool LeadingOnes = true;
2253     for (unsigned i = 0; i != Multiple-1; ++i) {
2254       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2255 
2256       LeadingZero &= isNullConstant(UniquedVals[i]);
2257       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2258     }
2259     // Finally, check the least significant entry.
2260     if (LeadingZero) {
2261       if (!UniquedVals[Multiple-1].getNode())
2262         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2263       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2264       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2265         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2266     }
2267     if (LeadingOnes) {
2268       if (!UniquedVals[Multiple-1].getNode())
2269         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2270       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2271       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2272         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2273     }
2274 
2275     return SDValue();
2276   }
2277 
2278   // Check to see if this buildvec has a single non-undef value in its elements.
2279   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2280     if (N->getOperand(i).isUndef()) continue;
2281     if (!OpVal.getNode())
2282       OpVal = N->getOperand(i);
2283     else if (OpVal != N->getOperand(i))
2284       return SDValue();
2285   }
2286 
2287   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2288 
2289   unsigned ValSizeInBytes = EltSize;
2290   uint64_t Value = 0;
2291   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2292     Value = CN->getZExtValue();
2293   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2294     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2295     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2296   }
2297 
2298   // If the splat value is larger than the element value, then we can never do
2299   // this splat.  The only case that we could fit the replicated bits into our
2300   // immediate field for would be zero, and we prefer to use vxor for it.
2301   if (ValSizeInBytes < ByteSize) return SDValue();
2302 
2303   // If the element value is larger than the splat value, check if it consists
2304   // of a repeated bit pattern of size ByteSize.
2305   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2306     return SDValue();
2307 
2308   // Properly sign extend the value.
2309   int MaskVal = SignExtend32(Value, ByteSize * 8);
2310 
2311   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2312   if (MaskVal == 0) return SDValue();
2313 
2314   // Finally, if this value fits in a 5 bit sext field, return it
2315   if (SignExtend32<5>(MaskVal) == MaskVal)
2316     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2317   return SDValue();
2318 }
2319 
2320 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2321 /// amount, otherwise return -1.
2322 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2323   EVT VT = N->getValueType(0);
2324   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2325     return -1;
2326 
2327   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2328 
2329   // Find the first non-undef value in the shuffle mask.
2330   unsigned i;
2331   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2332     /*search*/;
2333 
2334   if (i == 4) return -1;  // all undef.
2335 
2336   // Otherwise, check to see if the rest of the elements are consecutively
2337   // numbered from this value.
2338   unsigned ShiftAmt = SVOp->getMaskElt(i);
2339   if (ShiftAmt < i) return -1;
2340   ShiftAmt -= i;
2341 
2342   // Check the rest of the elements to see if they are consecutive.
2343   for (++i; i != 4; ++i)
2344     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2345       return -1;
2346 
2347   return ShiftAmt;
2348 }
2349 
2350 //===----------------------------------------------------------------------===//
2351 //  Addressing Mode Selection
2352 //===----------------------------------------------------------------------===//
2353 
2354 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2355 /// or 64-bit immediate, and if the value can be accurately represented as a
2356 /// sign extension from a 16-bit value.  If so, this returns true and the
2357 /// immediate.
2358 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2359   if (!isa<ConstantSDNode>(N))
2360     return false;
2361 
2362   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2363   if (N->getValueType(0) == MVT::i32)
2364     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2365   else
2366     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2367 }
2368 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2369   return isIntS16Immediate(Op.getNode(), Imm);
2370 }
2371 
2372 
2373 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2374 /// be represented as an indexed [r+r] operation.
2375 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2376                                                SDValue &Index,
2377                                                SelectionDAG &DAG) const {
2378   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2379       UI != E; ++UI) {
2380     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2381       if (Memop->getMemoryVT() == MVT::f64) {
2382           Base = N.getOperand(0);
2383           Index = N.getOperand(1);
2384           return true;
2385       }
2386     }
2387   }
2388   return false;
2389 }
2390 
2391 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2392 /// can be represented as an indexed [r+r] operation.  Returns false if it
2393 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2394 /// non-zero and N can be represented by a base register plus a signed 16-bit
2395 /// displacement, make a more precise judgement by checking (displacement % \p
2396 /// EncodingAlignment).
2397 bool PPCTargetLowering::SelectAddressRegReg(
2398     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2399     MaybeAlign EncodingAlignment) const {
2400   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2401   // a [pc+imm].
2402   if (SelectAddressPCRel(N, Base))
2403     return false;
2404 
2405   int16_t Imm = 0;
2406   if (N.getOpcode() == ISD::ADD) {
2407     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2408     // SPE load/store can only handle 8-bit offsets.
2409     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2410         return true;
2411     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2412         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2413       return false; // r+i
2414     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2415       return false;    // r+i
2416 
2417     Base = N.getOperand(0);
2418     Index = N.getOperand(1);
2419     return true;
2420   } else if (N.getOpcode() == ISD::OR) {
2421     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2422         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2423       return false; // r+i can fold it if we can.
2424 
2425     // If this is an or of disjoint bitfields, we can codegen this as an add
2426     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2427     // disjoint.
2428     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2429 
2430     if (LHSKnown.Zero.getBoolValue()) {
2431       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2432       // If all of the bits are known zero on the LHS or RHS, the add won't
2433       // carry.
2434       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2435         Base = N.getOperand(0);
2436         Index = N.getOperand(1);
2437         return true;
2438       }
2439     }
2440   }
2441 
2442   return false;
2443 }
2444 
2445 // If we happen to be doing an i64 load or store into a stack slot that has
2446 // less than a 4-byte alignment, then the frame-index elimination may need to
2447 // use an indexed load or store instruction (because the offset may not be a
2448 // multiple of 4). The extra register needed to hold the offset comes from the
2449 // register scavenger, and it is possible that the scavenger will need to use
2450 // an emergency spill slot. As a result, we need to make sure that a spill slot
2451 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2452 // stack slot.
2453 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2454   // FIXME: This does not handle the LWA case.
2455   if (VT != MVT::i64)
2456     return;
2457 
2458   // NOTE: We'll exclude negative FIs here, which come from argument
2459   // lowering, because there are no known test cases triggering this problem
2460   // using packed structures (or similar). We can remove this exclusion if
2461   // we find such a test case. The reason why this is so test-case driven is
2462   // because this entire 'fixup' is only to prevent crashes (from the
2463   // register scavenger) on not-really-valid inputs. For example, if we have:
2464   //   %a = alloca i1
2465   //   %b = bitcast i1* %a to i64*
2466   //   store i64* a, i64 b
2467   // then the store should really be marked as 'align 1', but is not. If it
2468   // were marked as 'align 1' then the indexed form would have been
2469   // instruction-selected initially, and the problem this 'fixup' is preventing
2470   // won't happen regardless.
2471   if (FrameIdx < 0)
2472     return;
2473 
2474   MachineFunction &MF = DAG.getMachineFunction();
2475   MachineFrameInfo &MFI = MF.getFrameInfo();
2476 
2477   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2478     return;
2479 
2480   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2481   FuncInfo->setHasNonRISpills();
2482 }
2483 
2484 /// Returns true if the address N can be represented by a base register plus
2485 /// a signed 16-bit displacement [r+imm], and if it is not better
2486 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2487 /// displacements that are multiples of that value.
2488 bool PPCTargetLowering::SelectAddressRegImm(
2489     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2490     MaybeAlign EncodingAlignment) const {
2491   // FIXME dl should come from parent load or store, not from address
2492   SDLoc dl(N);
2493 
2494   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2495   // a [pc+imm].
2496   if (SelectAddressPCRel(N, Base))
2497     return false;
2498 
2499   // If this can be more profitably realized as r+r, fail.
2500   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2501     return false;
2502 
2503   if (N.getOpcode() == ISD::ADD) {
2504     int16_t imm = 0;
2505     if (isIntS16Immediate(N.getOperand(1), imm) &&
2506         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2507       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2508       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2509         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2510         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2511       } else {
2512         Base = N.getOperand(0);
2513       }
2514       return true; // [r+i]
2515     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2516       // Match LOAD (ADD (X, Lo(G))).
2517       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2518              && "Cannot handle constant offsets yet!");
2519       Disp = N.getOperand(1).getOperand(0);  // The global address.
2520       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2521              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2522              Disp.getOpcode() == ISD::TargetConstantPool ||
2523              Disp.getOpcode() == ISD::TargetJumpTable);
2524       Base = N.getOperand(0);
2525       return true;  // [&g+r]
2526     }
2527   } else if (N.getOpcode() == ISD::OR) {
2528     int16_t imm = 0;
2529     if (isIntS16Immediate(N.getOperand(1), imm) &&
2530         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2531       // If this is an or of disjoint bitfields, we can codegen this as an add
2532       // (for better address arithmetic) if the LHS and RHS of the OR are
2533       // provably disjoint.
2534       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2535 
2536       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2537         // If all of the bits are known zero on the LHS or RHS, the add won't
2538         // carry.
2539         if (FrameIndexSDNode *FI =
2540               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2541           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2542           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2543         } else {
2544           Base = N.getOperand(0);
2545         }
2546         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2547         return true;
2548       }
2549     }
2550   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2551     // Loading from a constant address.
2552 
2553     // If this address fits entirely in a 16-bit sext immediate field, codegen
2554     // this as "d, 0"
2555     int16_t Imm;
2556     if (isIntS16Immediate(CN, Imm) &&
2557         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2558       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2559       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2560                              CN->getValueType(0));
2561       return true;
2562     }
2563 
2564     // Handle 32-bit sext immediates with LIS + addr mode.
2565     if ((CN->getValueType(0) == MVT::i32 ||
2566          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2567         (!EncodingAlignment ||
2568          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2569       int Addr = (int)CN->getZExtValue();
2570 
2571       // Otherwise, break this down into an LIS + disp.
2572       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2573 
2574       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2575                                    MVT::i32);
2576       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2577       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2578       return true;
2579     }
2580   }
2581 
2582   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2583   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2584     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2585     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2586   } else
2587     Base = N;
2588   return true;      // [r+0]
2589 }
2590 
2591 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2592 /// represented as an indexed [r+r] operation.
2593 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2594                                                 SDValue &Index,
2595                                                 SelectionDAG &DAG) const {
2596   // Check to see if we can easily represent this as an [r+r] address.  This
2597   // will fail if it thinks that the address is more profitably represented as
2598   // reg+imm, e.g. where imm = 0.
2599   if (SelectAddressRegReg(N, Base, Index, DAG))
2600     return true;
2601 
2602   // If the address is the result of an add, we will utilize the fact that the
2603   // address calculation includes an implicit add.  However, we can reduce
2604   // register pressure if we do not materialize a constant just for use as the
2605   // index register.  We only get rid of the add if it is not an add of a
2606   // value and a 16-bit signed constant and both have a single use.
2607   int16_t imm = 0;
2608   if (N.getOpcode() == ISD::ADD &&
2609       (!isIntS16Immediate(N.getOperand(1), imm) ||
2610        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2611     Base = N.getOperand(0);
2612     Index = N.getOperand(1);
2613     return true;
2614   }
2615 
2616   // Otherwise, do it the hard way, using R0 as the base register.
2617   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2618                          N.getValueType());
2619   Index = N;
2620   return true;
2621 }
2622 
2623 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2624   Ty *PCRelCand = dyn_cast<Ty>(N);
2625   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2626 }
2627 
2628 /// Returns true if this address is a PC Relative address.
2629 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2630 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2631 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2632   // This is a materialize PC Relative node. Always select this as PC Relative.
2633   Base = N;
2634   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2635     return true;
2636   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2637       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2638       isValidPCRelNode<JumpTableSDNode>(N) ||
2639       isValidPCRelNode<BlockAddressSDNode>(N))
2640     return true;
2641   return false;
2642 }
2643 
2644 /// Returns true if we should use a direct load into vector instruction
2645 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2646 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2647 
2648   // If there are any other uses other than scalar to vector, then we should
2649   // keep it as a scalar load -> direct move pattern to prevent multiple
2650   // loads.
2651   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2652   if (!LD)
2653     return false;
2654 
2655   EVT MemVT = LD->getMemoryVT();
2656   if (!MemVT.isSimple())
2657     return false;
2658   switch(MemVT.getSimpleVT().SimpleTy) {
2659   case MVT::i64:
2660     break;
2661   case MVT::i32:
2662     if (!ST.hasP8Vector())
2663       return false;
2664     break;
2665   case MVT::i16:
2666   case MVT::i8:
2667     if (!ST.hasP9Vector())
2668       return false;
2669     break;
2670   default:
2671     return false;
2672   }
2673 
2674   SDValue LoadedVal(N, 0);
2675   if (!LoadedVal.hasOneUse())
2676     return false;
2677 
2678   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2679        UI != UE; ++UI)
2680     if (UI.getUse().get().getResNo() == 0 &&
2681         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2682         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2683       return false;
2684 
2685   return true;
2686 }
2687 
2688 /// getPreIndexedAddressParts - returns true by value, base pointer and
2689 /// offset pointer and addressing mode by reference if the node's address
2690 /// can be legally represented as pre-indexed load / store address.
2691 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2692                                                   SDValue &Offset,
2693                                                   ISD::MemIndexedMode &AM,
2694                                                   SelectionDAG &DAG) const {
2695   if (DisablePPCPreinc) return false;
2696 
2697   bool isLoad = true;
2698   SDValue Ptr;
2699   EVT VT;
2700   unsigned Alignment;
2701   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2702     Ptr = LD->getBasePtr();
2703     VT = LD->getMemoryVT();
2704     Alignment = LD->getAlignment();
2705   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2706     Ptr = ST->getBasePtr();
2707     VT  = ST->getMemoryVT();
2708     Alignment = ST->getAlignment();
2709     isLoad = false;
2710   } else
2711     return false;
2712 
2713   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2714   // instructions because we can fold these into a more efficient instruction
2715   // instead, (such as LXSD).
2716   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2717     return false;
2718   }
2719 
2720   // PowerPC doesn't have preinc load/store instructions for vectors
2721   if (VT.isVector())
2722     return false;
2723 
2724   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2725     // Common code will reject creating a pre-inc form if the base pointer
2726     // is a frame index, or if N is a store and the base pointer is either
2727     // the same as or a predecessor of the value being stored.  Check for
2728     // those situations here, and try with swapped Base/Offset instead.
2729     bool Swap = false;
2730 
2731     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2732       Swap = true;
2733     else if (!isLoad) {
2734       SDValue Val = cast<StoreSDNode>(N)->getValue();
2735       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2736         Swap = true;
2737     }
2738 
2739     if (Swap)
2740       std::swap(Base, Offset);
2741 
2742     AM = ISD::PRE_INC;
2743     return true;
2744   }
2745 
2746   // LDU/STU can only handle immediates that are a multiple of 4.
2747   if (VT != MVT::i64) {
2748     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2749       return false;
2750   } else {
2751     // LDU/STU need an address with at least 4-byte alignment.
2752     if (Alignment < 4)
2753       return false;
2754 
2755     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2756       return false;
2757   }
2758 
2759   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2760     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2761     // sext i32 to i64 when addr mode is r+i.
2762     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2763         LD->getExtensionType() == ISD::SEXTLOAD &&
2764         isa<ConstantSDNode>(Offset))
2765       return false;
2766   }
2767 
2768   AM = ISD::PRE_INC;
2769   return true;
2770 }
2771 
2772 //===----------------------------------------------------------------------===//
2773 //  LowerOperation implementation
2774 //===----------------------------------------------------------------------===//
2775 
2776 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2777 /// and LoOpFlags to the target MO flags.
2778 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2779                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2780                                const GlobalValue *GV = nullptr) {
2781   HiOpFlags = PPCII::MO_HA;
2782   LoOpFlags = PPCII::MO_LO;
2783 
2784   // Don't use the pic base if not in PIC relocation model.
2785   if (IsPIC) {
2786     HiOpFlags |= PPCII::MO_PIC_FLAG;
2787     LoOpFlags |= PPCII::MO_PIC_FLAG;
2788   }
2789 }
2790 
2791 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2792                              SelectionDAG &DAG) {
2793   SDLoc DL(HiPart);
2794   EVT PtrVT = HiPart.getValueType();
2795   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2796 
2797   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2798   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2799 
2800   // With PIC, the first instruction is actually "GR+hi(&G)".
2801   if (isPIC)
2802     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2803                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2804 
2805   // Generate non-pic code that has direct accesses to the constant pool.
2806   // The address of the global is just (hi(&g)+lo(&g)).
2807   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2808 }
2809 
2810 static void setUsesTOCBasePtr(MachineFunction &MF) {
2811   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2812   FuncInfo->setUsesTOCBasePtr();
2813 }
2814 
2815 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2816   setUsesTOCBasePtr(DAG.getMachineFunction());
2817 }
2818 
2819 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2820                                        SDValue GA) const {
2821   const bool Is64Bit = Subtarget.isPPC64();
2822   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2823   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2824                         : Subtarget.isAIXABI()
2825                               ? DAG.getRegister(PPC::R2, VT)
2826                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2827   SDValue Ops[] = { GA, Reg };
2828   return DAG.getMemIntrinsicNode(
2829       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2830       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2831       MachineMemOperand::MOLoad);
2832 }
2833 
2834 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2835                                              SelectionDAG &DAG) const {
2836   EVT PtrVT = Op.getValueType();
2837   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2838   const Constant *C = CP->getConstVal();
2839 
2840   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2841   // The actual address of the GlobalValue is stored in the TOC.
2842   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2843     if (Subtarget.isUsingPCRelativeCalls()) {
2844       SDLoc DL(CP);
2845       EVT Ty = getPointerTy(DAG.getDataLayout());
2846       SDValue ConstPool = DAG.getTargetConstantPool(
2847           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2848       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2849     }
2850     setUsesTOCBasePtr(DAG);
2851     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2852     return getTOCEntry(DAG, SDLoc(CP), GA);
2853   }
2854 
2855   unsigned MOHiFlag, MOLoFlag;
2856   bool IsPIC = isPositionIndependent();
2857   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2858 
2859   if (IsPIC && Subtarget.isSVR4ABI()) {
2860     SDValue GA =
2861         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2862     return getTOCEntry(DAG, SDLoc(CP), GA);
2863   }
2864 
2865   SDValue CPIHi =
2866       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2867   SDValue CPILo =
2868       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2869   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2870 }
2871 
2872 // For 64-bit PowerPC, prefer the more compact relative encodings.
2873 // This trades 32 bits per jump table entry for one or two instructions
2874 // on the jump site.
2875 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2876   if (isJumpTableRelative())
2877     return MachineJumpTableInfo::EK_LabelDifference32;
2878 
2879   return TargetLowering::getJumpTableEncoding();
2880 }
2881 
2882 bool PPCTargetLowering::isJumpTableRelative() const {
2883   if (UseAbsoluteJumpTables)
2884     return false;
2885   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2886     return true;
2887   return TargetLowering::isJumpTableRelative();
2888 }
2889 
2890 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2891                                                     SelectionDAG &DAG) const {
2892   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2893     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2894 
2895   switch (getTargetMachine().getCodeModel()) {
2896   case CodeModel::Small:
2897   case CodeModel::Medium:
2898     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2899   default:
2900     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2901                        getPointerTy(DAG.getDataLayout()));
2902   }
2903 }
2904 
2905 const MCExpr *
2906 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2907                                                 unsigned JTI,
2908                                                 MCContext &Ctx) const {
2909   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2910     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2911 
2912   switch (getTargetMachine().getCodeModel()) {
2913   case CodeModel::Small:
2914   case CodeModel::Medium:
2915     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2916   default:
2917     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2918   }
2919 }
2920 
2921 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2922   EVT PtrVT = Op.getValueType();
2923   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2924 
2925   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2926   if (Subtarget.isUsingPCRelativeCalls()) {
2927     SDLoc DL(JT);
2928     EVT Ty = getPointerTy(DAG.getDataLayout());
2929     SDValue GA =
2930         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
2931     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2932     return MatAddr;
2933   }
2934 
2935   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2936   // The actual address of the GlobalValue is stored in the TOC.
2937   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2938     setUsesTOCBasePtr(DAG);
2939     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2940     return getTOCEntry(DAG, SDLoc(JT), GA);
2941   }
2942 
2943   unsigned MOHiFlag, MOLoFlag;
2944   bool IsPIC = isPositionIndependent();
2945   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2946 
2947   if (IsPIC && Subtarget.isSVR4ABI()) {
2948     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2949                                         PPCII::MO_PIC_FLAG);
2950     return getTOCEntry(DAG, SDLoc(GA), GA);
2951   }
2952 
2953   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2954   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2955   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2956 }
2957 
2958 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2959                                              SelectionDAG &DAG) const {
2960   EVT PtrVT = Op.getValueType();
2961   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2962   const BlockAddress *BA = BASDN->getBlockAddress();
2963 
2964   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2965   if (Subtarget.isUsingPCRelativeCalls()) {
2966     SDLoc DL(BASDN);
2967     EVT Ty = getPointerTy(DAG.getDataLayout());
2968     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
2969                                            PPCII::MO_PCREL_FLAG);
2970     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2971     return MatAddr;
2972   }
2973 
2974   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2975   // The actual BlockAddress is stored in the TOC.
2976   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2977     setUsesTOCBasePtr(DAG);
2978     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2979     return getTOCEntry(DAG, SDLoc(BASDN), GA);
2980   }
2981 
2982   // 32-bit position-independent ELF stores the BlockAddress in the .got.
2983   if (Subtarget.is32BitELFABI() && isPositionIndependent())
2984     return getTOCEntry(
2985         DAG, SDLoc(BASDN),
2986         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
2987 
2988   unsigned MOHiFlag, MOLoFlag;
2989   bool IsPIC = isPositionIndependent();
2990   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2991   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2992   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2993   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2994 }
2995 
2996 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2997                                               SelectionDAG &DAG) const {
2998   // FIXME: TLS addresses currently use medium model code sequences,
2999   // which is the most useful form.  Eventually support for small and
3000   // large models could be added if users need it, at the cost of
3001   // additional complexity.
3002   if (Subtarget.isUsingPCRelativeCalls() && !EnablePPCPCRelTLS)
3003     report_fatal_error("Thread local storage is not supported with pc-relative"
3004                        " addressing - please compile with -mno-pcrel");
3005   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3006   if (DAG.getTarget().useEmulatedTLS())
3007     return LowerToTLSEmulatedModel(GA, DAG);
3008 
3009   SDLoc dl(GA);
3010   const GlobalValue *GV = GA->getGlobal();
3011   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3012   bool is64bit = Subtarget.isPPC64();
3013   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3014   PICLevel::Level picLevel = M->getPICLevel();
3015 
3016   const TargetMachine &TM = getTargetMachine();
3017   TLSModel::Model Model = TM.getTLSModel(GV);
3018 
3019   if (Model == TLSModel::LocalExec) {
3020     if (Subtarget.isUsingPCRelativeCalls()) {
3021       SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3022       SDValue TGA = DAG.getTargetGlobalAddress(
3023           GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG));
3024       SDValue MatAddr =
3025           DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
3026       return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
3027     }
3028 
3029     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3030                                                PPCII::MO_TPREL_HA);
3031     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3032                                                PPCII::MO_TPREL_LO);
3033     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3034                              : DAG.getRegister(PPC::R2, MVT::i32);
3035 
3036     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3037     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3038   }
3039 
3040   if (Model == TLSModel::InitialExec) {
3041     bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3042     SDValue TGA = DAG.getTargetGlobalAddress(
3043         GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3044     SDValue TGATLS = DAG.getTargetGlobalAddress(
3045         GV, dl, PtrVT, 0,
3046         IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
3047     SDValue TPOffset;
3048     if (IsPCRel) {
3049       SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3050       TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3051                              MachinePointerInfo());
3052     } else {
3053       SDValue GOTPtr;
3054       if (is64bit) {
3055         setUsesTOCBasePtr(DAG);
3056         SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3057         GOTPtr =
3058             DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3059       } else {
3060         if (!TM.isPositionIndependent())
3061           GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3062         else if (picLevel == PICLevel::SmallPIC)
3063           GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3064         else
3065           GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3066       }
3067       TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3068     }
3069     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3070   }
3071 
3072   if (Model == TLSModel::GeneralDynamic) {
3073     if (Subtarget.isUsingPCRelativeCalls()) {
3074       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3075                                                PPCII::MO_GOT_TLSGD_PCREL_FLAG);
3076       return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3077     }
3078 
3079     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3080     SDValue GOTPtr;
3081     if (is64bit) {
3082       setUsesTOCBasePtr(DAG);
3083       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3084       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3085                                    GOTReg, TGA);
3086     } else {
3087       if (picLevel == PICLevel::SmallPIC)
3088         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3089       else
3090         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3091     }
3092     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3093                        GOTPtr, TGA, TGA);
3094   }
3095 
3096   if (Model == TLSModel::LocalDynamic) {
3097     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3098     SDValue GOTPtr;
3099     if (is64bit) {
3100       setUsesTOCBasePtr(DAG);
3101       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3102       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3103                            GOTReg, TGA);
3104     } else {
3105       if (picLevel == PICLevel::SmallPIC)
3106         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3107       else
3108         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3109     }
3110     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3111                                   PtrVT, GOTPtr, TGA, TGA);
3112     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3113                                       PtrVT, TLSAddr, TGA);
3114     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3115   }
3116 
3117   llvm_unreachable("Unknown TLS model!");
3118 }
3119 
3120 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3121                                               SelectionDAG &DAG) const {
3122   EVT PtrVT = Op.getValueType();
3123   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3124   SDLoc DL(GSDN);
3125   const GlobalValue *GV = GSDN->getGlobal();
3126 
3127   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3128   // The actual address of the GlobalValue is stored in the TOC.
3129   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3130     if (Subtarget.isUsingPCRelativeCalls()) {
3131       EVT Ty = getPointerTy(DAG.getDataLayout());
3132       if (isAccessedAsGotIndirect(Op)) {
3133         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3134                                                 PPCII::MO_PCREL_FLAG |
3135                                                     PPCII::MO_GOT_FLAG);
3136         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3137         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3138                                    MachinePointerInfo());
3139         return Load;
3140       } else {
3141         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3142                                                 PPCII::MO_PCREL_FLAG);
3143         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3144       }
3145     }
3146     setUsesTOCBasePtr(DAG);
3147     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3148     return getTOCEntry(DAG, DL, GA);
3149   }
3150 
3151   unsigned MOHiFlag, MOLoFlag;
3152   bool IsPIC = isPositionIndependent();
3153   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3154 
3155   if (IsPIC && Subtarget.isSVR4ABI()) {
3156     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3157                                             GSDN->getOffset(),
3158                                             PPCII::MO_PIC_FLAG);
3159     return getTOCEntry(DAG, DL, GA);
3160   }
3161 
3162   SDValue GAHi =
3163     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3164   SDValue GALo =
3165     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3166 
3167   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3168 }
3169 
3170 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3171   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3172   SDLoc dl(Op);
3173 
3174   if (Op.getValueType() == MVT::v2i64) {
3175     // When the operands themselves are v2i64 values, we need to do something
3176     // special because VSX has no underlying comparison operations for these.
3177     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3178       // Equality can be handled by casting to the legal type for Altivec
3179       // comparisons, everything else needs to be expanded.
3180       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3181         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3182                  DAG.getSetCC(dl, MVT::v4i32,
3183                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3184                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3185                    CC));
3186       }
3187 
3188       return SDValue();
3189     }
3190 
3191     // We handle most of these in the usual way.
3192     return Op;
3193   }
3194 
3195   // If we're comparing for equality to zero, expose the fact that this is
3196   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3197   // fold the new nodes.
3198   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3199     return V;
3200 
3201   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3202     // Leave comparisons against 0 and -1 alone for now, since they're usually
3203     // optimized.  FIXME: revisit this when we can custom lower all setcc
3204     // optimizations.
3205     if (C->isAllOnesValue() || C->isNullValue())
3206       return SDValue();
3207   }
3208 
3209   // If we have an integer seteq/setne, turn it into a compare against zero
3210   // by xor'ing the rhs with the lhs, which is faster than setting a
3211   // condition register, reading it back out, and masking the correct bit.  The
3212   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3213   // the result to other bit-twiddling opportunities.
3214   EVT LHSVT = Op.getOperand(0).getValueType();
3215   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3216     EVT VT = Op.getValueType();
3217     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3218                                 Op.getOperand(1));
3219     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3220   }
3221   return SDValue();
3222 }
3223 
3224 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3225   SDNode *Node = Op.getNode();
3226   EVT VT = Node->getValueType(0);
3227   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3228   SDValue InChain = Node->getOperand(0);
3229   SDValue VAListPtr = Node->getOperand(1);
3230   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3231   SDLoc dl(Node);
3232 
3233   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3234 
3235   // gpr_index
3236   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3237                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3238   InChain = GprIndex.getValue(1);
3239 
3240   if (VT == MVT::i64) {
3241     // Check if GprIndex is even
3242     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3243                                  DAG.getConstant(1, dl, MVT::i32));
3244     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3245                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3246     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3247                                           DAG.getConstant(1, dl, MVT::i32));
3248     // Align GprIndex to be even if it isn't
3249     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3250                            GprIndex);
3251   }
3252 
3253   // fpr index is 1 byte after gpr
3254   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3255                                DAG.getConstant(1, dl, MVT::i32));
3256 
3257   // fpr
3258   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3259                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3260   InChain = FprIndex.getValue(1);
3261 
3262   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3263                                        DAG.getConstant(8, dl, MVT::i32));
3264 
3265   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3266                                         DAG.getConstant(4, dl, MVT::i32));
3267 
3268   // areas
3269   SDValue OverflowArea =
3270       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3271   InChain = OverflowArea.getValue(1);
3272 
3273   SDValue RegSaveArea =
3274       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3275   InChain = RegSaveArea.getValue(1);
3276 
3277   // select overflow_area if index > 8
3278   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3279                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3280 
3281   // adjustment constant gpr_index * 4/8
3282   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3283                                     VT.isInteger() ? GprIndex : FprIndex,
3284                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3285                                                     MVT::i32));
3286 
3287   // OurReg = RegSaveArea + RegConstant
3288   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3289                                RegConstant);
3290 
3291   // Floating types are 32 bytes into RegSaveArea
3292   if (VT.isFloatingPoint())
3293     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3294                          DAG.getConstant(32, dl, MVT::i32));
3295 
3296   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3297   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3298                                    VT.isInteger() ? GprIndex : FprIndex,
3299                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3300                                                    MVT::i32));
3301 
3302   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3303                               VT.isInteger() ? VAListPtr : FprPtr,
3304                               MachinePointerInfo(SV), MVT::i8);
3305 
3306   // determine if we should load from reg_save_area or overflow_area
3307   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3308 
3309   // increase overflow_area by 4/8 if gpr/fpr > 8
3310   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3311                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3312                                           dl, MVT::i32));
3313 
3314   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3315                              OverflowAreaPlusN);
3316 
3317   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3318                               MachinePointerInfo(), MVT::i32);
3319 
3320   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3321 }
3322 
3323 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3324   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3325 
3326   // We have to copy the entire va_list struct:
3327   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3328   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3329                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3330                        false, true, false, MachinePointerInfo(),
3331                        MachinePointerInfo());
3332 }
3333 
3334 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3335                                                   SelectionDAG &DAG) const {
3336   if (Subtarget.isAIXABI())
3337     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3338 
3339   return Op.getOperand(0);
3340 }
3341 
3342 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3343                                                 SelectionDAG &DAG) const {
3344   if (Subtarget.isAIXABI())
3345     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3346 
3347   SDValue Chain = Op.getOperand(0);
3348   SDValue Trmp = Op.getOperand(1); // trampoline
3349   SDValue FPtr = Op.getOperand(2); // nested function
3350   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3351   SDLoc dl(Op);
3352 
3353   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3354   bool isPPC64 = (PtrVT == MVT::i64);
3355   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3356 
3357   TargetLowering::ArgListTy Args;
3358   TargetLowering::ArgListEntry Entry;
3359 
3360   Entry.Ty = IntPtrTy;
3361   Entry.Node = Trmp; Args.push_back(Entry);
3362 
3363   // TrampSize == (isPPC64 ? 48 : 40);
3364   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3365                                isPPC64 ? MVT::i64 : MVT::i32);
3366   Args.push_back(Entry);
3367 
3368   Entry.Node = FPtr; Args.push_back(Entry);
3369   Entry.Node = Nest; Args.push_back(Entry);
3370 
3371   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3372   TargetLowering::CallLoweringInfo CLI(DAG);
3373   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3374       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3375       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3376 
3377   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3378   return CallResult.second;
3379 }
3380 
3381 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3382   MachineFunction &MF = DAG.getMachineFunction();
3383   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3384   EVT PtrVT = getPointerTy(MF.getDataLayout());
3385 
3386   SDLoc dl(Op);
3387 
3388   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3389     // vastart just stores the address of the VarArgsFrameIndex slot into the
3390     // memory location argument.
3391     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3392     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3393     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3394                         MachinePointerInfo(SV));
3395   }
3396 
3397   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3398   // We suppose the given va_list is already allocated.
3399   //
3400   // typedef struct {
3401   //  char gpr;     /* index into the array of 8 GPRs
3402   //                 * stored in the register save area
3403   //                 * gpr=0 corresponds to r3,
3404   //                 * gpr=1 to r4, etc.
3405   //                 */
3406   //  char fpr;     /* index into the array of 8 FPRs
3407   //                 * stored in the register save area
3408   //                 * fpr=0 corresponds to f1,
3409   //                 * fpr=1 to f2, etc.
3410   //                 */
3411   //  char *overflow_arg_area;
3412   //                /* location on stack that holds
3413   //                 * the next overflow argument
3414   //                 */
3415   //  char *reg_save_area;
3416   //               /* where r3:r10 and f1:f8 (if saved)
3417   //                * are stored
3418   //                */
3419   // } va_list[1];
3420 
3421   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3422   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3423   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3424                                             PtrVT);
3425   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3426                                  PtrVT);
3427 
3428   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3429   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3430 
3431   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3432   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3433 
3434   uint64_t FPROffset = 1;
3435   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3436 
3437   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3438 
3439   // Store first byte : number of int regs
3440   SDValue firstStore =
3441       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3442                         MachinePointerInfo(SV), MVT::i8);
3443   uint64_t nextOffset = FPROffset;
3444   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3445                                   ConstFPROffset);
3446 
3447   // Store second byte : number of float regs
3448   SDValue secondStore =
3449       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3450                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3451   nextOffset += StackOffset;
3452   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3453 
3454   // Store second word : arguments given on stack
3455   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3456                                     MachinePointerInfo(SV, nextOffset));
3457   nextOffset += FrameOffset;
3458   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3459 
3460   // Store third word : arguments given in registers
3461   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3462                       MachinePointerInfo(SV, nextOffset));
3463 }
3464 
3465 /// FPR - The set of FP registers that should be allocated for arguments
3466 /// on Darwin and AIX.
3467 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3468                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3469                                 PPC::F11, PPC::F12, PPC::F13};
3470 
3471 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3472 /// the stack.
3473 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3474                                        unsigned PtrByteSize) {
3475   unsigned ArgSize = ArgVT.getStoreSize();
3476   if (Flags.isByVal())
3477     ArgSize = Flags.getByValSize();
3478 
3479   // Round up to multiples of the pointer size, except for array members,
3480   // which are always packed.
3481   if (!Flags.isInConsecutiveRegs())
3482     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3483 
3484   return ArgSize;
3485 }
3486 
3487 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3488 /// on the stack.
3489 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3490                                          ISD::ArgFlagsTy Flags,
3491                                          unsigned PtrByteSize) {
3492   Align Alignment(PtrByteSize);
3493 
3494   // Altivec parameters are padded to a 16 byte boundary.
3495   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3496       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3497       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3498       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3499     Alignment = Align(16);
3500 
3501   // ByVal parameters are aligned as requested.
3502   if (Flags.isByVal()) {
3503     auto BVAlign = Flags.getNonZeroByValAlign();
3504     if (BVAlign > PtrByteSize) {
3505       if (BVAlign.value() % PtrByteSize != 0)
3506         llvm_unreachable(
3507             "ByVal alignment is not a multiple of the pointer size");
3508 
3509       Alignment = BVAlign;
3510     }
3511   }
3512 
3513   // Array members are always packed to their original alignment.
3514   if (Flags.isInConsecutiveRegs()) {
3515     // If the array member was split into multiple registers, the first
3516     // needs to be aligned to the size of the full type.  (Except for
3517     // ppcf128, which is only aligned as its f64 components.)
3518     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3519       Alignment = Align(OrigVT.getStoreSize());
3520     else
3521       Alignment = Align(ArgVT.getStoreSize());
3522   }
3523 
3524   return Alignment;
3525 }
3526 
3527 /// CalculateStackSlotUsed - Return whether this argument will use its
3528 /// stack slot (instead of being passed in registers).  ArgOffset,
3529 /// AvailableFPRs, and AvailableVRs must hold the current argument
3530 /// position, and will be updated to account for this argument.
3531 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
3532                                    unsigned PtrByteSize, unsigned LinkageSize,
3533                                    unsigned ParamAreaSize, unsigned &ArgOffset,
3534                                    unsigned &AvailableFPRs,
3535                                    unsigned &AvailableVRs) {
3536   bool UseMemory = false;
3537 
3538   // Respect alignment of argument on the stack.
3539   Align Alignment =
3540       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3541   ArgOffset = alignTo(ArgOffset, Alignment);
3542   // If there's no space left in the argument save area, we must
3543   // use memory (this check also catches zero-sized arguments).
3544   if (ArgOffset >= LinkageSize + ParamAreaSize)
3545     UseMemory = true;
3546 
3547   // Allocate argument on the stack.
3548   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3549   if (Flags.isInConsecutiveRegsLast())
3550     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3551   // If we overran the argument save area, we must use memory
3552   // (this check catches arguments passed partially in memory)
3553   if (ArgOffset > LinkageSize + ParamAreaSize)
3554     UseMemory = true;
3555 
3556   // However, if the argument is actually passed in an FPR or a VR,
3557   // we don't use memory after all.
3558   if (!Flags.isByVal()) {
3559     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3560       if (AvailableFPRs > 0) {
3561         --AvailableFPRs;
3562         return false;
3563       }
3564     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3565         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3566         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3567         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3568       if (AvailableVRs > 0) {
3569         --AvailableVRs;
3570         return false;
3571       }
3572   }
3573 
3574   return UseMemory;
3575 }
3576 
3577 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3578 /// ensure minimum alignment required for target.
3579 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3580                                      unsigned NumBytes) {
3581   return alignTo(NumBytes, Lowering->getStackAlign());
3582 }
3583 
3584 SDValue PPCTargetLowering::LowerFormalArguments(
3585     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3586     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3587     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3588   if (Subtarget.isAIXABI())
3589     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3590                                     InVals);
3591   if (Subtarget.is64BitELFABI())
3592     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3593                                        InVals);
3594   if (Subtarget.is32BitELFABI())
3595     return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3596                                        InVals);
3597 
3598   return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3599                                      InVals);
3600 }
3601 
3602 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3603     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3604     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3605     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3606 
3607   // 32-bit SVR4 ABI Stack Frame Layout:
3608   //              +-----------------------------------+
3609   //        +-->  |            Back chain             |
3610   //        |     +-----------------------------------+
3611   //        |     | Floating-point register save area |
3612   //        |     +-----------------------------------+
3613   //        |     |    General register save area     |
3614   //        |     +-----------------------------------+
3615   //        |     |          CR save word             |
3616   //        |     +-----------------------------------+
3617   //        |     |         VRSAVE save word          |
3618   //        |     +-----------------------------------+
3619   //        |     |         Alignment padding         |
3620   //        |     +-----------------------------------+
3621   //        |     |     Vector register save area     |
3622   //        |     +-----------------------------------+
3623   //        |     |       Local variable space        |
3624   //        |     +-----------------------------------+
3625   //        |     |        Parameter list area        |
3626   //        |     +-----------------------------------+
3627   //        |     |           LR save word            |
3628   //        |     +-----------------------------------+
3629   // SP-->  +---  |            Back chain             |
3630   //              +-----------------------------------+
3631   //
3632   // Specifications:
3633   //   System V Application Binary Interface PowerPC Processor Supplement
3634   //   AltiVec Technology Programming Interface Manual
3635 
3636   MachineFunction &MF = DAG.getMachineFunction();
3637   MachineFrameInfo &MFI = MF.getFrameInfo();
3638   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3639 
3640   EVT PtrVT = getPointerTy(MF.getDataLayout());
3641   // Potential tail calls could cause overwriting of argument stack slots.
3642   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3643                        (CallConv == CallingConv::Fast));
3644   const Align PtrAlign(4);
3645 
3646   // Assign locations to all of the incoming arguments.
3647   SmallVector<CCValAssign, 16> ArgLocs;
3648   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3649                  *DAG.getContext());
3650 
3651   // Reserve space for the linkage area on the stack.
3652   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3653   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3654   if (useSoftFloat())
3655     CCInfo.PreAnalyzeFormalArguments(Ins);
3656 
3657   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3658   CCInfo.clearWasPPCF128();
3659 
3660   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3661     CCValAssign &VA = ArgLocs[i];
3662 
3663     // Arguments stored in registers.
3664     if (VA.isRegLoc()) {
3665       const TargetRegisterClass *RC;
3666       EVT ValVT = VA.getValVT();
3667 
3668       switch (ValVT.getSimpleVT().SimpleTy) {
3669         default:
3670           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3671         case MVT::i1:
3672         case MVT::i32:
3673           RC = &PPC::GPRCRegClass;
3674           break;
3675         case MVT::f32:
3676           if (Subtarget.hasP8Vector())
3677             RC = &PPC::VSSRCRegClass;
3678           else if (Subtarget.hasSPE())
3679             RC = &PPC::GPRCRegClass;
3680           else
3681             RC = &PPC::F4RCRegClass;
3682           break;
3683         case MVT::f64:
3684           if (Subtarget.hasVSX())
3685             RC = &PPC::VSFRCRegClass;
3686           else if (Subtarget.hasSPE())
3687             // SPE passes doubles in GPR pairs.
3688             RC = &PPC::GPRCRegClass;
3689           else
3690             RC = &PPC::F8RCRegClass;
3691           break;
3692         case MVT::v16i8:
3693         case MVT::v8i16:
3694         case MVT::v4i32:
3695           RC = &PPC::VRRCRegClass;
3696           break;
3697         case MVT::v4f32:
3698           RC = &PPC::VRRCRegClass;
3699           break;
3700         case MVT::v2f64:
3701         case MVT::v2i64:
3702           RC = &PPC::VRRCRegClass;
3703           break;
3704       }
3705 
3706       SDValue ArgValue;
3707       // Transform the arguments stored in physical registers into
3708       // virtual ones.
3709       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3710         assert(i + 1 < e && "No second half of double precision argument");
3711         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3712         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3713         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3714         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3715         if (!Subtarget.isLittleEndian())
3716           std::swap (ArgValueLo, ArgValueHi);
3717         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3718                                ArgValueHi);
3719       } else {
3720         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3721         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3722                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3723         if (ValVT == MVT::i1)
3724           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3725       }
3726 
3727       InVals.push_back(ArgValue);
3728     } else {
3729       // Argument stored in memory.
3730       assert(VA.isMemLoc());
3731 
3732       // Get the extended size of the argument type in stack
3733       unsigned ArgSize = VA.getLocVT().getStoreSize();
3734       // Get the actual size of the argument type
3735       unsigned ObjSize = VA.getValVT().getStoreSize();
3736       unsigned ArgOffset = VA.getLocMemOffset();
3737       // Stack objects in PPC32 are right justified.
3738       ArgOffset += ArgSize - ObjSize;
3739       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3740 
3741       // Create load nodes to retrieve arguments from the stack.
3742       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3743       InVals.push_back(
3744           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3745     }
3746   }
3747 
3748   // Assign locations to all of the incoming aggregate by value arguments.
3749   // Aggregates passed by value are stored in the local variable space of the
3750   // caller's stack frame, right above the parameter list area.
3751   SmallVector<CCValAssign, 16> ByValArgLocs;
3752   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3753                       ByValArgLocs, *DAG.getContext());
3754 
3755   // Reserve stack space for the allocations in CCInfo.
3756   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3757 
3758   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3759 
3760   // Area that is at least reserved in the caller of this function.
3761   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3762   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3763 
3764   // Set the size that is at least reserved in caller of this function.  Tail
3765   // call optimized function's reserved stack space needs to be aligned so that
3766   // taking the difference between two stack areas will result in an aligned
3767   // stack.
3768   MinReservedArea =
3769       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3770   FuncInfo->setMinReservedArea(MinReservedArea);
3771 
3772   SmallVector<SDValue, 8> MemOps;
3773 
3774   // If the function takes variable number of arguments, make a frame index for
3775   // the start of the first vararg value... for expansion of llvm.va_start.
3776   if (isVarArg) {
3777     static const MCPhysReg GPArgRegs[] = {
3778       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3779       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3780     };
3781     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3782 
3783     static const MCPhysReg FPArgRegs[] = {
3784       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3785       PPC::F8
3786     };
3787     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3788 
3789     if (useSoftFloat() || hasSPE())
3790        NumFPArgRegs = 0;
3791 
3792     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3793     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3794 
3795     // Make room for NumGPArgRegs and NumFPArgRegs.
3796     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3797                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3798 
3799     FuncInfo->setVarArgsStackOffset(
3800       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3801                             CCInfo.getNextStackOffset(), true));
3802 
3803     FuncInfo->setVarArgsFrameIndex(
3804         MFI.CreateStackObject(Depth, Align(8), false));
3805     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3806 
3807     // The fixed integer arguments of a variadic function are stored to the
3808     // VarArgsFrameIndex on the stack so that they may be loaded by
3809     // dereferencing the result of va_next.
3810     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3811       // Get an existing live-in vreg, or add a new one.
3812       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3813       if (!VReg)
3814         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3815 
3816       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3817       SDValue Store =
3818           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3819       MemOps.push_back(Store);
3820       // Increment the address by four for the next argument to store
3821       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3822       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3823     }
3824 
3825     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3826     // is set.
3827     // The double arguments are stored to the VarArgsFrameIndex
3828     // on the stack.
3829     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3830       // Get an existing live-in vreg, or add a new one.
3831       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3832       if (!VReg)
3833         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3834 
3835       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3836       SDValue Store =
3837           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3838       MemOps.push_back(Store);
3839       // Increment the address by eight for the next argument to store
3840       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3841                                          PtrVT);
3842       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3843     }
3844   }
3845 
3846   if (!MemOps.empty())
3847     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3848 
3849   return Chain;
3850 }
3851 
3852 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3853 // value to MVT::i64 and then truncate to the correct register size.
3854 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3855                                              EVT ObjectVT, SelectionDAG &DAG,
3856                                              SDValue ArgVal,
3857                                              const SDLoc &dl) const {
3858   if (Flags.isSExt())
3859     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3860                          DAG.getValueType(ObjectVT));
3861   else if (Flags.isZExt())
3862     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3863                          DAG.getValueType(ObjectVT));
3864 
3865   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3866 }
3867 
3868 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3869     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3870     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3871     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3872   // TODO: add description of PPC stack frame format, or at least some docs.
3873   //
3874   bool isELFv2ABI = Subtarget.isELFv2ABI();
3875   bool isLittleEndian = Subtarget.isLittleEndian();
3876   MachineFunction &MF = DAG.getMachineFunction();
3877   MachineFrameInfo &MFI = MF.getFrameInfo();
3878   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3879 
3880   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3881          "fastcc not supported on varargs functions");
3882 
3883   EVT PtrVT = getPointerTy(MF.getDataLayout());
3884   // Potential tail calls could cause overwriting of argument stack slots.
3885   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3886                        (CallConv == CallingConv::Fast));
3887   unsigned PtrByteSize = 8;
3888   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3889 
3890   static const MCPhysReg GPR[] = {
3891     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3892     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3893   };
3894   static const MCPhysReg VR[] = {
3895     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3896     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3897   };
3898 
3899   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3900   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3901   const unsigned Num_VR_Regs  = array_lengthof(VR);
3902 
3903   // Do a first pass over the arguments to determine whether the ABI
3904   // guarantees that our caller has allocated the parameter save area
3905   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3906   // in the ELFv2 ABI, it is true if this is a vararg function or if
3907   // any parameter is located in a stack slot.
3908 
3909   bool HasParameterArea = !isELFv2ABI || isVarArg;
3910   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3911   unsigned NumBytes = LinkageSize;
3912   unsigned AvailableFPRs = Num_FPR_Regs;
3913   unsigned AvailableVRs = Num_VR_Regs;
3914   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3915     if (Ins[i].Flags.isNest())
3916       continue;
3917 
3918     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3919                                PtrByteSize, LinkageSize, ParamAreaSize,
3920                                NumBytes, AvailableFPRs, AvailableVRs))
3921       HasParameterArea = true;
3922   }
3923 
3924   // Add DAG nodes to load the arguments or copy them out of registers.  On
3925   // entry to a function on PPC, the arguments start after the linkage area,
3926   // although the first ones are often in registers.
3927 
3928   unsigned ArgOffset = LinkageSize;
3929   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3930   SmallVector<SDValue, 8> MemOps;
3931   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3932   unsigned CurArgIdx = 0;
3933   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3934     SDValue ArgVal;
3935     bool needsLoad = false;
3936     EVT ObjectVT = Ins[ArgNo].VT;
3937     EVT OrigVT = Ins[ArgNo].ArgVT;
3938     unsigned ObjSize = ObjectVT.getStoreSize();
3939     unsigned ArgSize = ObjSize;
3940     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3941     if (Ins[ArgNo].isOrigArg()) {
3942       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3943       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3944     }
3945     // We re-align the argument offset for each argument, except when using the
3946     // fast calling convention, when we need to make sure we do that only when
3947     // we'll actually use a stack slot.
3948     unsigned CurArgOffset;
3949     Align Alignment;
3950     auto ComputeArgOffset = [&]() {
3951       /* Respect alignment of argument on the stack.  */
3952       Alignment =
3953           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3954       ArgOffset = alignTo(ArgOffset, Alignment);
3955       CurArgOffset = ArgOffset;
3956     };
3957 
3958     if (CallConv != CallingConv::Fast) {
3959       ComputeArgOffset();
3960 
3961       /* Compute GPR index associated with argument offset.  */
3962       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3963       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3964     }
3965 
3966     // FIXME the codegen can be much improved in some cases.
3967     // We do not have to keep everything in memory.
3968     if (Flags.isByVal()) {
3969       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3970 
3971       if (CallConv == CallingConv::Fast)
3972         ComputeArgOffset();
3973 
3974       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3975       ObjSize = Flags.getByValSize();
3976       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3977       // Empty aggregate parameters do not take up registers.  Examples:
3978       //   struct { } a;
3979       //   union  { } b;
3980       //   int c[0];
3981       // etc.  However, we have to provide a place-holder in InVals, so
3982       // pretend we have an 8-byte item at the current address for that
3983       // purpose.
3984       if (!ObjSize) {
3985         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3986         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3987         InVals.push_back(FIN);
3988         continue;
3989       }
3990 
3991       // Create a stack object covering all stack doublewords occupied
3992       // by the argument.  If the argument is (fully or partially) on
3993       // the stack, or if the argument is fully in registers but the
3994       // caller has allocated the parameter save anyway, we can refer
3995       // directly to the caller's stack frame.  Otherwise, create a
3996       // local copy in our own frame.
3997       int FI;
3998       if (HasParameterArea ||
3999           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4000         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4001       else
4002         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4003       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4004 
4005       // Handle aggregates smaller than 8 bytes.
4006       if (ObjSize < PtrByteSize) {
4007         // The value of the object is its address, which differs from the
4008         // address of the enclosing doubleword on big-endian systems.
4009         SDValue Arg = FIN;
4010         if (!isLittleEndian) {
4011           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4012           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4013         }
4014         InVals.push_back(Arg);
4015 
4016         if (GPR_idx != Num_GPR_Regs) {
4017           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4018           FuncInfo->addLiveInAttr(VReg, Flags);
4019           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4020           SDValue Store;
4021 
4022           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4023             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4024                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4025             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4026                                       MachinePointerInfo(&*FuncArg), ObjType);
4027           } else {
4028             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4029             // store the whole register as-is to the parameter save area
4030             // slot.
4031             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4032                                  MachinePointerInfo(&*FuncArg));
4033           }
4034 
4035           MemOps.push_back(Store);
4036         }
4037         // Whether we copied from a register or not, advance the offset
4038         // into the parameter save area by a full doubleword.
4039         ArgOffset += PtrByteSize;
4040         continue;
4041       }
4042 
4043       // The value of the object is its address, which is the address of
4044       // its first stack doubleword.
4045       InVals.push_back(FIN);
4046 
4047       // Store whatever pieces of the object are in registers to memory.
4048       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4049         if (GPR_idx == Num_GPR_Regs)
4050           break;
4051 
4052         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4053         FuncInfo->addLiveInAttr(VReg, Flags);
4054         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4055         SDValue Addr = FIN;
4056         if (j) {
4057           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4058           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4059         }
4060         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4061                                      MachinePointerInfo(&*FuncArg, j));
4062         MemOps.push_back(Store);
4063         ++GPR_idx;
4064       }
4065       ArgOffset += ArgSize;
4066       continue;
4067     }
4068 
4069     switch (ObjectVT.getSimpleVT().SimpleTy) {
4070     default: llvm_unreachable("Unhandled argument type!");
4071     case MVT::i1:
4072     case MVT::i32:
4073     case MVT::i64:
4074       if (Flags.isNest()) {
4075         // The 'nest' parameter, if any, is passed in R11.
4076         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4077         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4078 
4079         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4080           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4081 
4082         break;
4083       }
4084 
4085       // These can be scalar arguments or elements of an integer array type
4086       // passed directly.  Clang may use those instead of "byval" aggregate
4087       // types to avoid forcing arguments to memory unnecessarily.
4088       if (GPR_idx != Num_GPR_Regs) {
4089         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4090         FuncInfo->addLiveInAttr(VReg, Flags);
4091         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4092 
4093         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4094           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4095           // value to MVT::i64 and then truncate to the correct register size.
4096           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4097       } else {
4098         if (CallConv == CallingConv::Fast)
4099           ComputeArgOffset();
4100 
4101         needsLoad = true;
4102         ArgSize = PtrByteSize;
4103       }
4104       if (CallConv != CallingConv::Fast || needsLoad)
4105         ArgOffset += 8;
4106       break;
4107 
4108     case MVT::f32:
4109     case MVT::f64:
4110       // These can be scalar arguments or elements of a float array type
4111       // passed directly.  The latter are used to implement ELFv2 homogenous
4112       // float aggregates.
4113       if (FPR_idx != Num_FPR_Regs) {
4114         unsigned VReg;
4115 
4116         if (ObjectVT == MVT::f32)
4117           VReg = MF.addLiveIn(FPR[FPR_idx],
4118                               Subtarget.hasP8Vector()
4119                                   ? &PPC::VSSRCRegClass
4120                                   : &PPC::F4RCRegClass);
4121         else
4122           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4123                                                 ? &PPC::VSFRCRegClass
4124                                                 : &PPC::F8RCRegClass);
4125 
4126         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4127         ++FPR_idx;
4128       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4129         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4130         // once we support fp <-> gpr moves.
4131 
4132         // This can only ever happen in the presence of f32 array types,
4133         // since otherwise we never run out of FPRs before running out
4134         // of GPRs.
4135         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4136         FuncInfo->addLiveInAttr(VReg, Flags);
4137         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4138 
4139         if (ObjectVT == MVT::f32) {
4140           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4141             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4142                                  DAG.getConstant(32, dl, MVT::i32));
4143           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4144         }
4145 
4146         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4147       } else {
4148         if (CallConv == CallingConv::Fast)
4149           ComputeArgOffset();
4150 
4151         needsLoad = true;
4152       }
4153 
4154       // When passing an array of floats, the array occupies consecutive
4155       // space in the argument area; only round up to the next doubleword
4156       // at the end of the array.  Otherwise, each float takes 8 bytes.
4157       if (CallConv != CallingConv::Fast || needsLoad) {
4158         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4159         ArgOffset += ArgSize;
4160         if (Flags.isInConsecutiveRegsLast())
4161           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4162       }
4163       break;
4164     case MVT::v4f32:
4165     case MVT::v4i32:
4166     case MVT::v8i16:
4167     case MVT::v16i8:
4168     case MVT::v2f64:
4169     case MVT::v2i64:
4170     case MVT::v1i128:
4171     case MVT::f128:
4172       // These can be scalar arguments or elements of a vector array type
4173       // passed directly.  The latter are used to implement ELFv2 homogenous
4174       // vector aggregates.
4175       if (VR_idx != Num_VR_Regs) {
4176         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4177         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4178         ++VR_idx;
4179       } else {
4180         if (CallConv == CallingConv::Fast)
4181           ComputeArgOffset();
4182         needsLoad = true;
4183       }
4184       if (CallConv != CallingConv::Fast || needsLoad)
4185         ArgOffset += 16;
4186       break;
4187     }
4188 
4189     // We need to load the argument to a virtual register if we determined
4190     // above that we ran out of physical registers of the appropriate type.
4191     if (needsLoad) {
4192       if (ObjSize < ArgSize && !isLittleEndian)
4193         CurArgOffset += ArgSize - ObjSize;
4194       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4195       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4196       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4197     }
4198 
4199     InVals.push_back(ArgVal);
4200   }
4201 
4202   // Area that is at least reserved in the caller of this function.
4203   unsigned MinReservedArea;
4204   if (HasParameterArea)
4205     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4206   else
4207     MinReservedArea = LinkageSize;
4208 
4209   // Set the size that is at least reserved in caller of this function.  Tail
4210   // call optimized functions' reserved stack space needs to be aligned so that
4211   // taking the difference between two stack areas will result in an aligned
4212   // stack.
4213   MinReservedArea =
4214       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4215   FuncInfo->setMinReservedArea(MinReservedArea);
4216 
4217   // If the function takes variable number of arguments, make a frame index for
4218   // the start of the first vararg value... for expansion of llvm.va_start.
4219   // On ELFv2ABI spec, it writes:
4220   // C programs that are intended to be *portable* across different compilers
4221   // and architectures must use the header file <stdarg.h> to deal with variable
4222   // argument lists.
4223   if (isVarArg && MFI.hasVAStart()) {
4224     int Depth = ArgOffset;
4225 
4226     FuncInfo->setVarArgsFrameIndex(
4227       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4228     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4229 
4230     // If this function is vararg, store any remaining integer argument regs
4231     // to their spots on the stack so that they may be loaded by dereferencing
4232     // the result of va_next.
4233     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4234          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4235       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4236       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4237       SDValue Store =
4238           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4239       MemOps.push_back(Store);
4240       // Increment the address by four for the next argument to store
4241       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4242       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4243     }
4244   }
4245 
4246   if (!MemOps.empty())
4247     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4248 
4249   return Chain;
4250 }
4251 
4252 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4253     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4254     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4255     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4256   // TODO: add description of PPC stack frame format, or at least some docs.
4257   //
4258   MachineFunction &MF = DAG.getMachineFunction();
4259   MachineFrameInfo &MFI = MF.getFrameInfo();
4260   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4261 
4262   EVT PtrVT = getPointerTy(MF.getDataLayout());
4263   bool isPPC64 = PtrVT == MVT::i64;
4264   // Potential tail calls could cause overwriting of argument stack slots.
4265   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4266                        (CallConv == CallingConv::Fast));
4267   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4268   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4269   unsigned ArgOffset = LinkageSize;
4270   // Area that is at least reserved in caller of this function.
4271   unsigned MinReservedArea = ArgOffset;
4272 
4273   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4274     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4275     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4276   };
4277   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4278     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4279     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4280   };
4281   static const MCPhysReg VR[] = {
4282     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4283     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4284   };
4285 
4286   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4287   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4288   const unsigned Num_VR_Regs  = array_lengthof( VR);
4289 
4290   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4291 
4292   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4293 
4294   // In 32-bit non-varargs functions, the stack space for vectors is after the
4295   // stack space for non-vectors.  We do not use this space unless we have
4296   // too many vectors to fit in registers, something that only occurs in
4297   // constructed examples:), but we have to walk the arglist to figure
4298   // that out...for the pathological case, compute VecArgOffset as the
4299   // start of the vector parameter area.  Computing VecArgOffset is the
4300   // entire point of the following loop.
4301   unsigned VecArgOffset = ArgOffset;
4302   if (!isVarArg && !isPPC64) {
4303     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4304          ++ArgNo) {
4305       EVT ObjectVT = Ins[ArgNo].VT;
4306       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4307 
4308       if (Flags.isByVal()) {
4309         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4310         unsigned ObjSize = Flags.getByValSize();
4311         unsigned ArgSize =
4312                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4313         VecArgOffset += ArgSize;
4314         continue;
4315       }
4316 
4317       switch(ObjectVT.getSimpleVT().SimpleTy) {
4318       default: llvm_unreachable("Unhandled argument type!");
4319       case MVT::i1:
4320       case MVT::i32:
4321       case MVT::f32:
4322         VecArgOffset += 4;
4323         break;
4324       case MVT::i64:  // PPC64
4325       case MVT::f64:
4326         // FIXME: We are guaranteed to be !isPPC64 at this point.
4327         // Does MVT::i64 apply?
4328         VecArgOffset += 8;
4329         break;
4330       case MVT::v4f32:
4331       case MVT::v4i32:
4332       case MVT::v8i16:
4333       case MVT::v16i8:
4334         // Nothing to do, we're only looking at Nonvector args here.
4335         break;
4336       }
4337     }
4338   }
4339   // We've found where the vector parameter area in memory is.  Skip the
4340   // first 12 parameters; these don't use that memory.
4341   VecArgOffset = ((VecArgOffset+15)/16)*16;
4342   VecArgOffset += 12*16;
4343 
4344   // Add DAG nodes to load the arguments or copy them out of registers.  On
4345   // entry to a function on PPC, the arguments start after the linkage area,
4346   // although the first ones are often in registers.
4347 
4348   SmallVector<SDValue, 8> MemOps;
4349   unsigned nAltivecParamsAtEnd = 0;
4350   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4351   unsigned CurArgIdx = 0;
4352   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4353     SDValue ArgVal;
4354     bool needsLoad = false;
4355     EVT ObjectVT = Ins[ArgNo].VT;
4356     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4357     unsigned ArgSize = ObjSize;
4358     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4359     if (Ins[ArgNo].isOrigArg()) {
4360       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4361       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4362     }
4363     unsigned CurArgOffset = ArgOffset;
4364 
4365     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4366     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4367         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4368       if (isVarArg || isPPC64) {
4369         MinReservedArea = ((MinReservedArea+15)/16)*16;
4370         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4371                                                   Flags,
4372                                                   PtrByteSize);
4373       } else  nAltivecParamsAtEnd++;
4374     } else
4375       // Calculate min reserved area.
4376       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4377                                                 Flags,
4378                                                 PtrByteSize);
4379 
4380     // FIXME the codegen can be much improved in some cases.
4381     // We do not have to keep everything in memory.
4382     if (Flags.isByVal()) {
4383       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4384 
4385       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4386       ObjSize = Flags.getByValSize();
4387       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4388       // Objects of size 1 and 2 are right justified, everything else is
4389       // left justified.  This means the memory address is adjusted forwards.
4390       if (ObjSize==1 || ObjSize==2) {
4391         CurArgOffset = CurArgOffset + (4 - ObjSize);
4392       }
4393       // The value of the object is its address.
4394       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4395       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4396       InVals.push_back(FIN);
4397       if (ObjSize==1 || ObjSize==2) {
4398         if (GPR_idx != Num_GPR_Regs) {
4399           unsigned VReg;
4400           if (isPPC64)
4401             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4402           else
4403             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4404           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4405           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4406           SDValue Store =
4407               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4408                                 MachinePointerInfo(&*FuncArg), ObjType);
4409           MemOps.push_back(Store);
4410           ++GPR_idx;
4411         }
4412 
4413         ArgOffset += PtrByteSize;
4414 
4415         continue;
4416       }
4417       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4418         // Store whatever pieces of the object are in registers
4419         // to memory.  ArgOffset will be the address of the beginning
4420         // of the object.
4421         if (GPR_idx != Num_GPR_Regs) {
4422           unsigned VReg;
4423           if (isPPC64)
4424             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4425           else
4426             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4427           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4428           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4429           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4430           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4431                                        MachinePointerInfo(&*FuncArg, j));
4432           MemOps.push_back(Store);
4433           ++GPR_idx;
4434           ArgOffset += PtrByteSize;
4435         } else {
4436           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4437           break;
4438         }
4439       }
4440       continue;
4441     }
4442 
4443     switch (ObjectVT.getSimpleVT().SimpleTy) {
4444     default: llvm_unreachable("Unhandled argument type!");
4445     case MVT::i1:
4446     case MVT::i32:
4447       if (!isPPC64) {
4448         if (GPR_idx != Num_GPR_Regs) {
4449           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4450           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4451 
4452           if (ObjectVT == MVT::i1)
4453             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4454 
4455           ++GPR_idx;
4456         } else {
4457           needsLoad = true;
4458           ArgSize = PtrByteSize;
4459         }
4460         // All int arguments reserve stack space in the Darwin ABI.
4461         ArgOffset += PtrByteSize;
4462         break;
4463       }
4464       LLVM_FALLTHROUGH;
4465     case MVT::i64:  // PPC64
4466       if (GPR_idx != Num_GPR_Regs) {
4467         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4468         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4469 
4470         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4471           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4472           // value to MVT::i64 and then truncate to the correct register size.
4473           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4474 
4475         ++GPR_idx;
4476       } else {
4477         needsLoad = true;
4478         ArgSize = PtrByteSize;
4479       }
4480       // All int arguments reserve stack space in the Darwin ABI.
4481       ArgOffset += 8;
4482       break;
4483 
4484     case MVT::f32:
4485     case MVT::f64:
4486       // Every 4 bytes of argument space consumes one of the GPRs available for
4487       // argument passing.
4488       if (GPR_idx != Num_GPR_Regs) {
4489         ++GPR_idx;
4490         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4491           ++GPR_idx;
4492       }
4493       if (FPR_idx != Num_FPR_Regs) {
4494         unsigned VReg;
4495 
4496         if (ObjectVT == MVT::f32)
4497           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4498         else
4499           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4500 
4501         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4502         ++FPR_idx;
4503       } else {
4504         needsLoad = true;
4505       }
4506 
4507       // All FP arguments reserve stack space in the Darwin ABI.
4508       ArgOffset += isPPC64 ? 8 : ObjSize;
4509       break;
4510     case MVT::v4f32:
4511     case MVT::v4i32:
4512     case MVT::v8i16:
4513     case MVT::v16i8:
4514       // Note that vector arguments in registers don't reserve stack space,
4515       // except in varargs functions.
4516       if (VR_idx != Num_VR_Regs) {
4517         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4518         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4519         if (isVarArg) {
4520           while ((ArgOffset % 16) != 0) {
4521             ArgOffset += PtrByteSize;
4522             if (GPR_idx != Num_GPR_Regs)
4523               GPR_idx++;
4524           }
4525           ArgOffset += 16;
4526           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4527         }
4528         ++VR_idx;
4529       } else {
4530         if (!isVarArg && !isPPC64) {
4531           // Vectors go after all the nonvectors.
4532           CurArgOffset = VecArgOffset;
4533           VecArgOffset += 16;
4534         } else {
4535           // Vectors are aligned.
4536           ArgOffset = ((ArgOffset+15)/16)*16;
4537           CurArgOffset = ArgOffset;
4538           ArgOffset += 16;
4539         }
4540         needsLoad = true;
4541       }
4542       break;
4543     }
4544 
4545     // We need to load the argument to a virtual register if we determined above
4546     // that we ran out of physical registers of the appropriate type.
4547     if (needsLoad) {
4548       int FI = MFI.CreateFixedObject(ObjSize,
4549                                      CurArgOffset + (ArgSize - ObjSize),
4550                                      isImmutable);
4551       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4552       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4553     }
4554 
4555     InVals.push_back(ArgVal);
4556   }
4557 
4558   // Allow for Altivec parameters at the end, if needed.
4559   if (nAltivecParamsAtEnd) {
4560     MinReservedArea = ((MinReservedArea+15)/16)*16;
4561     MinReservedArea += 16*nAltivecParamsAtEnd;
4562   }
4563 
4564   // Area that is at least reserved in the caller of this function.
4565   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4566 
4567   // Set the size that is at least reserved in caller of this function.  Tail
4568   // call optimized functions' reserved stack space needs to be aligned so that
4569   // taking the difference between two stack areas will result in an aligned
4570   // stack.
4571   MinReservedArea =
4572       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4573   FuncInfo->setMinReservedArea(MinReservedArea);
4574 
4575   // If the function takes variable number of arguments, make a frame index for
4576   // the start of the first vararg value... for expansion of llvm.va_start.
4577   if (isVarArg) {
4578     int Depth = ArgOffset;
4579 
4580     FuncInfo->setVarArgsFrameIndex(
4581       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4582                             Depth, true));
4583     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4584 
4585     // If this function is vararg, store any remaining integer argument regs
4586     // to their spots on the stack so that they may be loaded by dereferencing
4587     // the result of va_next.
4588     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4589       unsigned VReg;
4590 
4591       if (isPPC64)
4592         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4593       else
4594         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4595 
4596       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4597       SDValue Store =
4598           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4599       MemOps.push_back(Store);
4600       // Increment the address by four for the next argument to store
4601       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4602       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4603     }
4604   }
4605 
4606   if (!MemOps.empty())
4607     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4608 
4609   return Chain;
4610 }
4611 
4612 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4613 /// adjusted to accommodate the arguments for the tailcall.
4614 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4615                                    unsigned ParamSize) {
4616 
4617   if (!isTailCall) return 0;
4618 
4619   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4620   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4621   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4622   // Remember only if the new adjustment is bigger.
4623   if (SPDiff < FI->getTailCallSPDelta())
4624     FI->setTailCallSPDelta(SPDiff);
4625 
4626   return SPDiff;
4627 }
4628 
4629 static bool isFunctionGlobalAddress(SDValue Callee);
4630 
4631 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4632                               const TargetMachine &TM) {
4633   // It does not make sense to call callsShareTOCBase() with a caller that
4634   // is PC Relative since PC Relative callers do not have a TOC.
4635 #ifndef NDEBUG
4636   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4637   assert(!STICaller->isUsingPCRelativeCalls() &&
4638          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4639 #endif
4640 
4641   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4642   // don't have enough information to determine if the caller and callee share
4643   // the same  TOC base, so we have to pessimistically assume they don't for
4644   // correctness.
4645   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4646   if (!G)
4647     return false;
4648 
4649   const GlobalValue *GV = G->getGlobal();
4650 
4651   // If the callee is preemptable, then the static linker will use a plt-stub
4652   // which saves the toc to the stack, and needs a nop after the call
4653   // instruction to convert to a toc-restore.
4654   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4655     return false;
4656 
4657   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4658   // We may need a TOC restore in the situation where the caller requires a
4659   // valid TOC but the callee is PC Relative and does not.
4660   const Function *F = dyn_cast<Function>(GV);
4661   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4662 
4663   // If we have an Alias we can try to get the function from there.
4664   if (Alias) {
4665     const GlobalObject *GlobalObj = Alias->getBaseObject();
4666     F = dyn_cast<Function>(GlobalObj);
4667   }
4668 
4669   // If we still have no valid function pointer we do not have enough
4670   // information to determine if the callee uses PC Relative calls so we must
4671   // assume that it does.
4672   if (!F)
4673     return false;
4674 
4675   // If the callee uses PC Relative we cannot guarantee that the callee won't
4676   // clobber the TOC of the caller and so we must assume that the two
4677   // functions do not share a TOC base.
4678   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4679   if (STICallee->isUsingPCRelativeCalls())
4680     return false;
4681 
4682   // The medium and large code models are expected to provide a sufficiently
4683   // large TOC to provide all data addressing needs of a module with a
4684   // single TOC.
4685   if (CodeModel::Medium == TM.getCodeModel() ||
4686       CodeModel::Large == TM.getCodeModel())
4687     return true;
4688 
4689   // Otherwise we need to ensure callee and caller are in the same section,
4690   // since the linker may allocate multiple TOCs, and we don't know which
4691   // sections will belong to the same TOC base.
4692   if (!GV->isStrongDefinitionForLinker())
4693     return false;
4694 
4695   // Any explicitly-specified sections and section prefixes must also match.
4696   // Also, if we're using -ffunction-sections, then each function is always in
4697   // a different section (the same is true for COMDAT functions).
4698   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4699       GV->getSection() != Caller->getSection())
4700     return false;
4701   if (const auto *F = dyn_cast<Function>(GV)) {
4702     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4703       return false;
4704   }
4705 
4706   return true;
4707 }
4708 
4709 static bool
4710 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4711                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4712   assert(Subtarget.is64BitELFABI());
4713 
4714   const unsigned PtrByteSize = 8;
4715   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4716 
4717   static const MCPhysReg GPR[] = {
4718     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4719     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4720   };
4721   static const MCPhysReg VR[] = {
4722     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4723     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4724   };
4725 
4726   const unsigned NumGPRs = array_lengthof(GPR);
4727   const unsigned NumFPRs = 13;
4728   const unsigned NumVRs = array_lengthof(VR);
4729   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4730 
4731   unsigned NumBytes = LinkageSize;
4732   unsigned AvailableFPRs = NumFPRs;
4733   unsigned AvailableVRs = NumVRs;
4734 
4735   for (const ISD::OutputArg& Param : Outs) {
4736     if (Param.Flags.isNest()) continue;
4737 
4738     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4739                                LinkageSize, ParamAreaSize, NumBytes,
4740                                AvailableFPRs, AvailableVRs))
4741       return true;
4742   }
4743   return false;
4744 }
4745 
4746 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4747   if (CB.arg_size() != CallerFn->arg_size())
4748     return false;
4749 
4750   auto CalleeArgIter = CB.arg_begin();
4751   auto CalleeArgEnd = CB.arg_end();
4752   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4753 
4754   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4755     const Value* CalleeArg = *CalleeArgIter;
4756     const Value* CallerArg = &(*CallerArgIter);
4757     if (CalleeArg == CallerArg)
4758       continue;
4759 
4760     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4761     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4762     //      }
4763     // 1st argument of callee is undef and has the same type as caller.
4764     if (CalleeArg->getType() == CallerArg->getType() &&
4765         isa<UndefValue>(CalleeArg))
4766       continue;
4767 
4768     return false;
4769   }
4770 
4771   return true;
4772 }
4773 
4774 // Returns true if TCO is possible between the callers and callees
4775 // calling conventions.
4776 static bool
4777 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4778                                     CallingConv::ID CalleeCC) {
4779   // Tail calls are possible with fastcc and ccc.
4780   auto isTailCallableCC  = [] (CallingConv::ID CC){
4781       return  CC == CallingConv::C || CC == CallingConv::Fast;
4782   };
4783   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4784     return false;
4785 
4786   // We can safely tail call both fastcc and ccc callees from a c calling
4787   // convention caller. If the caller is fastcc, we may have less stack space
4788   // than a non-fastcc caller with the same signature so disable tail-calls in
4789   // that case.
4790   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4791 }
4792 
4793 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4794     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4795     const SmallVectorImpl<ISD::OutputArg> &Outs,
4796     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4797   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4798 
4799   if (DisableSCO && !TailCallOpt) return false;
4800 
4801   // Variadic argument functions are not supported.
4802   if (isVarArg) return false;
4803 
4804   auto &Caller = DAG.getMachineFunction().getFunction();
4805   // Check that the calling conventions are compatible for tco.
4806   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4807     return false;
4808 
4809   // Caller contains any byval parameter is not supported.
4810   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4811     return false;
4812 
4813   // Callee contains any byval parameter is not supported, too.
4814   // Note: This is a quick work around, because in some cases, e.g.
4815   // caller's stack size > callee's stack size, we are still able to apply
4816   // sibling call optimization. For example, gcc is able to do SCO for caller1
4817   // in the following example, but not for caller2.
4818   //   struct test {
4819   //     long int a;
4820   //     char ary[56];
4821   //   } gTest;
4822   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4823   //     b->a = v.a;
4824   //     return 0;
4825   //   }
4826   //   void caller1(struct test a, struct test c, struct test *b) {
4827   //     callee(gTest, b); }
4828   //   void caller2(struct test *b) { callee(gTest, b); }
4829   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4830     return false;
4831 
4832   // If callee and caller use different calling conventions, we cannot pass
4833   // parameters on stack since offsets for the parameter area may be different.
4834   if (Caller.getCallingConv() != CalleeCC &&
4835       needStackSlotPassParameters(Subtarget, Outs))
4836     return false;
4837 
4838   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4839   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4840   // callee potentially have different TOC bases then we cannot tail call since
4841   // we need to restore the TOC pointer after the call.
4842   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4843   // We cannot guarantee this for indirect calls or calls to external functions.
4844   // When PC-Relative addressing is used, the concept of the TOC is no longer
4845   // applicable so this check is not required.
4846   // Check first for indirect calls.
4847   if (!Subtarget.isUsingPCRelativeCalls() &&
4848       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4849     return false;
4850 
4851   // Check if we share the TOC base.
4852   if (!Subtarget.isUsingPCRelativeCalls() &&
4853       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4854     return false;
4855 
4856   // TCO allows altering callee ABI, so we don't have to check further.
4857   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4858     return true;
4859 
4860   if (DisableSCO) return false;
4861 
4862   // If callee use the same argument list that caller is using, then we can
4863   // apply SCO on this case. If it is not, then we need to check if callee needs
4864   // stack for passing arguments.
4865   // PC Relative tail calls may not have a CallBase.
4866   // If there is no CallBase we cannot verify if we have the same argument
4867   // list so assume that we don't have the same argument list.
4868   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4869       needStackSlotPassParameters(Subtarget, Outs))
4870     return false;
4871   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4872     return false;
4873 
4874   return true;
4875 }
4876 
4877 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4878 /// for tail call optimization. Targets which want to do tail call
4879 /// optimization should implement this function.
4880 bool
4881 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4882                                                      CallingConv::ID CalleeCC,
4883                                                      bool isVarArg,
4884                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4885                                                      SelectionDAG& DAG) const {
4886   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4887     return false;
4888 
4889   // Variable argument functions are not supported.
4890   if (isVarArg)
4891     return false;
4892 
4893   MachineFunction &MF = DAG.getMachineFunction();
4894   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4895   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4896     // Functions containing by val parameters are not supported.
4897     for (unsigned i = 0; i != Ins.size(); i++) {
4898        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4899        if (Flags.isByVal()) return false;
4900     }
4901 
4902     // Non-PIC/GOT tail calls are supported.
4903     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4904       return true;
4905 
4906     // At the moment we can only do local tail calls (in same module, hidden
4907     // or protected) if we are generating PIC.
4908     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4909       return G->getGlobal()->hasHiddenVisibility()
4910           || G->getGlobal()->hasProtectedVisibility();
4911   }
4912 
4913   return false;
4914 }
4915 
4916 /// isCallCompatibleAddress - Return the immediate to use if the specified
4917 /// 32-bit value is representable in the immediate field of a BxA instruction.
4918 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4919   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4920   if (!C) return nullptr;
4921 
4922   int Addr = C->getZExtValue();
4923   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4924       SignExtend32<26>(Addr) != Addr)
4925     return nullptr;  // Top 6 bits have to be sext of immediate.
4926 
4927   return DAG
4928       .getConstant(
4929           (int)C->getZExtValue() >> 2, SDLoc(Op),
4930           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4931       .getNode();
4932 }
4933 
4934 namespace {
4935 
4936 struct TailCallArgumentInfo {
4937   SDValue Arg;
4938   SDValue FrameIdxOp;
4939   int FrameIdx = 0;
4940 
4941   TailCallArgumentInfo() = default;
4942 };
4943 
4944 } // end anonymous namespace
4945 
4946 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4947 static void StoreTailCallArgumentsToStackSlot(
4948     SelectionDAG &DAG, SDValue Chain,
4949     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4950     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4951   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4952     SDValue Arg = TailCallArgs[i].Arg;
4953     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4954     int FI = TailCallArgs[i].FrameIdx;
4955     // Store relative to framepointer.
4956     MemOpChains.push_back(DAG.getStore(
4957         Chain, dl, Arg, FIN,
4958         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4959   }
4960 }
4961 
4962 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4963 /// the appropriate stack slot for the tail call optimized function call.
4964 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4965                                              SDValue OldRetAddr, SDValue OldFP,
4966                                              int SPDiff, const SDLoc &dl) {
4967   if (SPDiff) {
4968     // Calculate the new stack slot for the return address.
4969     MachineFunction &MF = DAG.getMachineFunction();
4970     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4971     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4972     bool isPPC64 = Subtarget.isPPC64();
4973     int SlotSize = isPPC64 ? 8 : 4;
4974     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4975     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4976                                                          NewRetAddrLoc, true);
4977     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4978     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4979     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4980                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4981   }
4982   return Chain;
4983 }
4984 
4985 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4986 /// the position of the argument.
4987 static void
4988 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4989                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4990                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4991   int Offset = ArgOffset + SPDiff;
4992   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4993   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4994   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4995   SDValue FIN = DAG.getFrameIndex(FI, VT);
4996   TailCallArgumentInfo Info;
4997   Info.Arg = Arg;
4998   Info.FrameIdxOp = FIN;
4999   Info.FrameIdx = FI;
5000   TailCallArguments.push_back(Info);
5001 }
5002 
5003 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
5004 /// stack slot. Returns the chain as result and the loaded frame pointers in
5005 /// LROpOut/FPOpout. Used when tail calling.
5006 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
5007     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
5008     SDValue &FPOpOut, const SDLoc &dl) const {
5009   if (SPDiff) {
5010     // Load the LR and FP stack slot for later adjusting.
5011     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5012     LROpOut = getReturnAddrFrameIndex(DAG);
5013     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
5014     Chain = SDValue(LROpOut.getNode(), 1);
5015   }
5016   return Chain;
5017 }
5018 
5019 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
5020 /// by "Src" to address "Dst" of size "Size".  Alignment information is
5021 /// specified by the specific parameter attribute. The copy will be passed as
5022 /// a byval function parameter.
5023 /// Sometimes what we are copying is the end of a larger object, the part that
5024 /// does not fit in registers.
5025 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
5026                                          SDValue Chain, ISD::ArgFlagsTy Flags,
5027                                          SelectionDAG &DAG, const SDLoc &dl) {
5028   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5029   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5030                        Flags.getNonZeroByValAlign(), false, false, false,
5031                        MachinePointerInfo(), MachinePointerInfo());
5032 }
5033 
5034 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5035 /// tail calls.
5036 static void LowerMemOpCallTo(
5037     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5038     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5039     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5040     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5041   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5042   if (!isTailCall) {
5043     if (isVector) {
5044       SDValue StackPtr;
5045       if (isPPC64)
5046         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5047       else
5048         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5049       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5050                            DAG.getConstant(ArgOffset, dl, PtrVT));
5051     }
5052     MemOpChains.push_back(
5053         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5054     // Calculate and remember argument location.
5055   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5056                                   TailCallArguments);
5057 }
5058 
5059 static void
5060 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
5061                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5062                 SDValue FPOp,
5063                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5064   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5065   // might overwrite each other in case of tail call optimization.
5066   SmallVector<SDValue, 8> MemOpChains2;
5067   // Do not flag preceding copytoreg stuff together with the following stuff.
5068   InFlag = SDValue();
5069   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5070                                     MemOpChains2, dl);
5071   if (!MemOpChains2.empty())
5072     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5073 
5074   // Store the return address to the appropriate stack slot.
5075   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5076 
5077   // Emit callseq_end just before tailcall node.
5078   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5079                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5080   InFlag = Chain.getValue(1);
5081 }
5082 
5083 // Is this global address that of a function that can be called by name? (as
5084 // opposed to something that must hold a descriptor for an indirect call).
5085 static bool isFunctionGlobalAddress(SDValue Callee) {
5086   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5087     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5088         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5089       return false;
5090 
5091     return G->getGlobal()->getValueType()->isFunctionTy();
5092   }
5093 
5094   return false;
5095 }
5096 
5097 SDValue PPCTargetLowering::LowerCallResult(
5098     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5099     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5100     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5101   SmallVector<CCValAssign, 16> RVLocs;
5102   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5103                     *DAG.getContext());
5104 
5105   CCRetInfo.AnalyzeCallResult(
5106       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5107                ? RetCC_PPC_Cold
5108                : RetCC_PPC);
5109 
5110   // Copy all of the result registers out of their specified physreg.
5111   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5112     CCValAssign &VA = RVLocs[i];
5113     assert(VA.isRegLoc() && "Can only return in registers!");
5114 
5115     SDValue Val;
5116 
5117     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5118       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5119                                       InFlag);
5120       Chain = Lo.getValue(1);
5121       InFlag = Lo.getValue(2);
5122       VA = RVLocs[++i]; // skip ahead to next loc
5123       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5124                                       InFlag);
5125       Chain = Hi.getValue(1);
5126       InFlag = Hi.getValue(2);
5127       if (!Subtarget.isLittleEndian())
5128         std::swap (Lo, Hi);
5129       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5130     } else {
5131       Val = DAG.getCopyFromReg(Chain, dl,
5132                                VA.getLocReg(), VA.getLocVT(), InFlag);
5133       Chain = Val.getValue(1);
5134       InFlag = Val.getValue(2);
5135     }
5136 
5137     switch (VA.getLocInfo()) {
5138     default: llvm_unreachable("Unknown loc info!");
5139     case CCValAssign::Full: break;
5140     case CCValAssign::AExt:
5141       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5142       break;
5143     case CCValAssign::ZExt:
5144       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5145                         DAG.getValueType(VA.getValVT()));
5146       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5147       break;
5148     case CCValAssign::SExt:
5149       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5150                         DAG.getValueType(VA.getValVT()));
5151       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5152       break;
5153     }
5154 
5155     InVals.push_back(Val);
5156   }
5157 
5158   return Chain;
5159 }
5160 
5161 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5162                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5163   // PatchPoint calls are not indirect.
5164   if (isPatchPoint)
5165     return false;
5166 
5167   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5168     return false;
5169 
5170   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5171   // becuase the immediate function pointer points to a descriptor instead of
5172   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5173   // pointer immediate points to the global entry point, while the BLA would
5174   // need to jump to the local entry point (see rL211174).
5175   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5176       isBLACompatibleAddress(Callee, DAG))
5177     return false;
5178 
5179   return true;
5180 }
5181 
5182 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5183 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5184   return Subtarget.isAIXABI() ||
5185          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5186 }
5187 
5188 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5189                               const Function &Caller,
5190                               const SDValue &Callee,
5191                               const PPCSubtarget &Subtarget,
5192                               const TargetMachine &TM) {
5193   if (CFlags.IsTailCall)
5194     return PPCISD::TC_RETURN;
5195 
5196   // This is a call through a function pointer.
5197   if (CFlags.IsIndirect) {
5198     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5199     // indirect calls. The save of the caller's TOC pointer to the stack will be
5200     // inserted into the DAG as part of call lowering. The restore of the TOC
5201     // pointer is modeled by using a pseudo instruction for the call opcode that
5202     // represents the 2 instruction sequence of an indirect branch and link,
5203     // immediately followed by a load of the TOC pointer from the the stack save
5204     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5205     // as it is not saved or used.
5206     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5207                                                : PPCISD::BCTRL;
5208   }
5209 
5210   if (Subtarget.isUsingPCRelativeCalls()) {
5211     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5212     return PPCISD::CALL_NOTOC;
5213   }
5214 
5215   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5216   // immediately following the call instruction if the caller and callee may
5217   // have different TOC bases. At link time if the linker determines the calls
5218   // may not share a TOC base, the call is redirected to a trampoline inserted
5219   // by the linker. The trampoline will (among other things) save the callers
5220   // TOC pointer at an ABI designated offset in the linkage area and the linker
5221   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5222   // into gpr2.
5223   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5224     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5225                                                   : PPCISD::CALL_NOP;
5226 
5227   return PPCISD::CALL;
5228 }
5229 
5230 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5231                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5232   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5233     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5234       return SDValue(Dest, 0);
5235 
5236   // Returns true if the callee is local, and false otherwise.
5237   auto isLocalCallee = [&]() {
5238     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5239     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5240     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5241 
5242     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5243            !dyn_cast_or_null<GlobalIFunc>(GV);
5244   };
5245 
5246   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5247   // a static relocation model causes some versions of GNU LD (2.17.50, at
5248   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5249   // built with secure-PLT.
5250   bool UsePlt =
5251       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5252       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5253 
5254   const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5255     const TargetMachine &TM = Subtarget.getTargetMachine();
5256     const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5257     MCSymbolXCOFF *S =
5258         cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5259 
5260     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5261     return DAG.getMCSymbol(S, PtrVT);
5262   };
5263 
5264   if (isFunctionGlobalAddress(Callee)) {
5265     const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5266 
5267     if (Subtarget.isAIXABI()) {
5268       assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5269       return getAIXFuncEntryPointSymbolSDNode(GV);
5270     }
5271     return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5272                                       UsePlt ? PPCII::MO_PLT : 0);
5273   }
5274 
5275   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5276     const char *SymName = S->getSymbol();
5277     if (Subtarget.isAIXABI()) {
5278       // If there exists a user-declared function whose name is the same as the
5279       // ExternalSymbol's, then we pick up the user-declared version.
5280       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5281       if (const Function *F =
5282               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5283         return getAIXFuncEntryPointSymbolSDNode(F);
5284 
5285       // On AIX, direct function calls reference the symbol for the function's
5286       // entry point, which is named by prepending a "." before the function's
5287       // C-linkage name. A Qualname is returned here because an external
5288       // function entry point is a csect with XTY_ER property.
5289       const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5290         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5291         MCSectionXCOFF *Sec = Context.getXCOFFSection(
5292             (Twine(".") + Twine(SymName)).str(), XCOFF::XMC_PR, XCOFF::XTY_ER,
5293             SectionKind::getMetadata());
5294         return Sec->getQualNameSymbol();
5295       };
5296 
5297       SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5298     }
5299     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5300                                        UsePlt ? PPCII::MO_PLT : 0);
5301   }
5302 
5303   // No transformation needed.
5304   assert(Callee.getNode() && "What no callee?");
5305   return Callee;
5306 }
5307 
5308 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5309   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5310          "Expected a CALLSEQ_STARTSDNode.");
5311 
5312   // The last operand is the chain, except when the node has glue. If the node
5313   // has glue, then the last operand is the glue, and the chain is the second
5314   // last operand.
5315   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5316   if (LastValue.getValueType() != MVT::Glue)
5317     return LastValue;
5318 
5319   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5320 }
5321 
5322 // Creates the node that moves a functions address into the count register
5323 // to prepare for an indirect call instruction.
5324 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5325                                 SDValue &Glue, SDValue &Chain,
5326                                 const SDLoc &dl) {
5327   SDValue MTCTROps[] = {Chain, Callee, Glue};
5328   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5329   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5330                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5331   // The glue is the second value produced.
5332   Glue = Chain.getValue(1);
5333 }
5334 
5335 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5336                                           SDValue &Glue, SDValue &Chain,
5337                                           SDValue CallSeqStart,
5338                                           const CallBase *CB, const SDLoc &dl,
5339                                           bool hasNest,
5340                                           const PPCSubtarget &Subtarget) {
5341   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5342   // entry point, but to the function descriptor (the function entry point
5343   // address is part of the function descriptor though).
5344   // The function descriptor is a three doubleword structure with the
5345   // following fields: function entry point, TOC base address and
5346   // environment pointer.
5347   // Thus for a call through a function pointer, the following actions need
5348   // to be performed:
5349   //   1. Save the TOC of the caller in the TOC save area of its stack
5350   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5351   //   2. Load the address of the function entry point from the function
5352   //      descriptor.
5353   //   3. Load the TOC of the callee from the function descriptor into r2.
5354   //   4. Load the environment pointer from the function descriptor into
5355   //      r11.
5356   //   5. Branch to the function entry point address.
5357   //   6. On return of the callee, the TOC of the caller needs to be
5358   //      restored (this is done in FinishCall()).
5359   //
5360   // The loads are scheduled at the beginning of the call sequence, and the
5361   // register copies are flagged together to ensure that no other
5362   // operations can be scheduled in between. E.g. without flagging the
5363   // copies together, a TOC access in the caller could be scheduled between
5364   // the assignment of the callee TOC and the branch to the callee, which leads
5365   // to incorrect code.
5366 
5367   // Start by loading the function address from the descriptor.
5368   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5369   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5370                       ? (MachineMemOperand::MODereferenceable |
5371                          MachineMemOperand::MOInvariant)
5372                       : MachineMemOperand::MONone;
5373 
5374   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5375 
5376   // Registers used in building the DAG.
5377   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5378   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5379 
5380   // Offsets of descriptor members.
5381   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5382   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5383 
5384   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5385   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5386 
5387   // One load for the functions entry point address.
5388   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5389                                     Alignment, MMOFlags);
5390 
5391   // One for loading the TOC anchor for the module that contains the called
5392   // function.
5393   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5394   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5395   SDValue TOCPtr =
5396       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5397                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5398 
5399   // One for loading the environment pointer.
5400   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5401   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5402   SDValue LoadEnvPtr =
5403       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5404                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5405 
5406 
5407   // Then copy the newly loaded TOC anchor to the TOC pointer.
5408   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5409   Chain = TOCVal.getValue(0);
5410   Glue = TOCVal.getValue(1);
5411 
5412   // If the function call has an explicit 'nest' parameter, it takes the
5413   // place of the environment pointer.
5414   assert((!hasNest || !Subtarget.isAIXABI()) &&
5415          "Nest parameter is not supported on AIX.");
5416   if (!hasNest) {
5417     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5418     Chain = EnvVal.getValue(0);
5419     Glue = EnvVal.getValue(1);
5420   }
5421 
5422   // The rest of the indirect call sequence is the same as the non-descriptor
5423   // DAG.
5424   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5425 }
5426 
5427 static void
5428 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5429                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5430                   SelectionDAG &DAG,
5431                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5432                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5433                   const PPCSubtarget &Subtarget) {
5434   const bool IsPPC64 = Subtarget.isPPC64();
5435   // MVT for a general purpose register.
5436   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5437 
5438   // First operand is always the chain.
5439   Ops.push_back(Chain);
5440 
5441   // If it's a direct call pass the callee as the second operand.
5442   if (!CFlags.IsIndirect)
5443     Ops.push_back(Callee);
5444   else {
5445     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5446 
5447     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5448     // on the stack (this would have been done in `LowerCall_64SVR4` or
5449     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5450     // represents both the indirect branch and a load that restores the TOC
5451     // pointer from the linkage area. The operand for the TOC restore is an add
5452     // of the TOC save offset to the stack pointer. This must be the second
5453     // operand: after the chain input but before any other variadic arguments.
5454     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5455     // saved or used.
5456     if (isTOCSaveRestoreRequired(Subtarget)) {
5457       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5458 
5459       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5460       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5461       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5462       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5463       Ops.push_back(AddTOC);
5464     }
5465 
5466     // Add the register used for the environment pointer.
5467     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5468       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5469                                     RegVT));
5470 
5471 
5472     // Add CTR register as callee so a bctr can be emitted later.
5473     if (CFlags.IsTailCall)
5474       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5475   }
5476 
5477   // If this is a tail call add stack pointer delta.
5478   if (CFlags.IsTailCall)
5479     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5480 
5481   // Add argument registers to the end of the list so that they are known live
5482   // into the call.
5483   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5484     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5485                                   RegsToPass[i].second.getValueType()));
5486 
5487   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5488   // no way to mark dependencies as implicit here.
5489   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5490   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5491        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5492     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5493 
5494   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5495   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5496     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5497 
5498   // Add a register mask operand representing the call-preserved registers.
5499   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5500   const uint32_t *Mask =
5501       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5502   assert(Mask && "Missing call preserved mask for calling convention");
5503   Ops.push_back(DAG.getRegisterMask(Mask));
5504 
5505   // If the glue is valid, it is the last operand.
5506   if (Glue.getNode())
5507     Ops.push_back(Glue);
5508 }
5509 
5510 SDValue PPCTargetLowering::FinishCall(
5511     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5512     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5513     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5514     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5515     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5516 
5517   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5518       Subtarget.isAIXABI())
5519     setUsesTOCBasePtr(DAG);
5520 
5521   unsigned CallOpc =
5522       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5523                     Subtarget, DAG.getTarget());
5524 
5525   if (!CFlags.IsIndirect)
5526     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5527   else if (Subtarget.usesFunctionDescriptors())
5528     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5529                                   dl, CFlags.HasNest, Subtarget);
5530   else
5531     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5532 
5533   // Build the operand list for the call instruction.
5534   SmallVector<SDValue, 8> Ops;
5535   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5536                     SPDiff, Subtarget);
5537 
5538   // Emit tail call.
5539   if (CFlags.IsTailCall) {
5540     // Indirect tail call when using PC Relative calls do not have the same
5541     // constraints.
5542     assert(((Callee.getOpcode() == ISD::Register &&
5543              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5544             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5545             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5546             isa<ConstantSDNode>(Callee) ||
5547             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5548            "Expecting a global address, external symbol, absolute value, "
5549            "register or an indirect tail call when PC Relative calls are "
5550            "used.");
5551     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5552     assert(CallOpc == PPCISD::TC_RETURN &&
5553            "Unexpected call opcode for a tail call.");
5554     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5555     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5556   }
5557 
5558   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5559   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5560   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5561   Glue = Chain.getValue(1);
5562 
5563   // When performing tail call optimization the callee pops its arguments off
5564   // the stack. Account for this here so these bytes can be pushed back on in
5565   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5566   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5567                          getTargetMachine().Options.GuaranteedTailCallOpt)
5568                             ? NumBytes
5569                             : 0;
5570 
5571   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5572                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5573                              Glue, dl);
5574   Glue = Chain.getValue(1);
5575 
5576   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5577                          DAG, InVals);
5578 }
5579 
5580 SDValue
5581 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5582                              SmallVectorImpl<SDValue> &InVals) const {
5583   SelectionDAG &DAG                     = CLI.DAG;
5584   SDLoc &dl                             = CLI.DL;
5585   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5586   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5587   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5588   SDValue Chain                         = CLI.Chain;
5589   SDValue Callee                        = CLI.Callee;
5590   bool &isTailCall                      = CLI.IsTailCall;
5591   CallingConv::ID CallConv              = CLI.CallConv;
5592   bool isVarArg                         = CLI.IsVarArg;
5593   bool isPatchPoint                     = CLI.IsPatchPoint;
5594   const CallBase *CB                    = CLI.CB;
5595 
5596   if (isTailCall) {
5597     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5598       isTailCall = false;
5599     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5600       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5601           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5602     else
5603       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5604                                                      Ins, DAG);
5605     if (isTailCall) {
5606       ++NumTailCalls;
5607       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5608         ++NumSiblingCalls;
5609 
5610       // PC Relative calls no longer guarantee that the callee is a Global
5611       // Address Node. The callee could be an indirect tail call in which
5612       // case the SDValue for the callee could be a load (to load the address
5613       // of a function pointer) or it may be a register copy (to move the
5614       // address of the callee from a function parameter into a virtual
5615       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5616       assert((Subtarget.isUsingPCRelativeCalls() ||
5617               isa<GlobalAddressSDNode>(Callee)) &&
5618              "Callee should be an llvm::Function object.");
5619 
5620       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5621                         << "\nTCO callee: ");
5622       LLVM_DEBUG(Callee.dump());
5623     }
5624   }
5625 
5626   if (!isTailCall && CB && CB->isMustTailCall())
5627     report_fatal_error("failed to perform tail call elimination on a call "
5628                        "site marked musttail");
5629 
5630   // When long calls (i.e. indirect calls) are always used, calls are always
5631   // made via function pointer. If we have a function name, first translate it
5632   // into a pointer.
5633   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5634       !isTailCall)
5635     Callee = LowerGlobalAddress(Callee, DAG);
5636 
5637   CallFlags CFlags(
5638       CallConv, isTailCall, isVarArg, isPatchPoint,
5639       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5640       // hasNest
5641       Subtarget.is64BitELFABI() &&
5642           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5643       CLI.NoMerge);
5644 
5645   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5646     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5647                             InVals, CB);
5648 
5649   if (Subtarget.isSVR4ABI())
5650     return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5651                             InVals, CB);
5652 
5653   if (Subtarget.isAIXABI())
5654     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5655                          InVals, CB);
5656 
5657   return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5658                           InVals, CB);
5659 }
5660 
5661 SDValue PPCTargetLowering::LowerCall_32SVR4(
5662     SDValue Chain, SDValue Callee, CallFlags CFlags,
5663     const SmallVectorImpl<ISD::OutputArg> &Outs,
5664     const SmallVectorImpl<SDValue> &OutVals,
5665     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5666     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5667     const CallBase *CB) const {
5668   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5669   // of the 32-bit SVR4 ABI stack frame layout.
5670 
5671   const CallingConv::ID CallConv = CFlags.CallConv;
5672   const bool IsVarArg = CFlags.IsVarArg;
5673   const bool IsTailCall = CFlags.IsTailCall;
5674 
5675   assert((CallConv == CallingConv::C ||
5676           CallConv == CallingConv::Cold ||
5677           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5678 
5679   const Align PtrAlign(4);
5680 
5681   MachineFunction &MF = DAG.getMachineFunction();
5682 
5683   // Mark this function as potentially containing a function that contains a
5684   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5685   // and restoring the callers stack pointer in this functions epilog. This is
5686   // done because by tail calling the called function might overwrite the value
5687   // in this function's (MF) stack pointer stack slot 0(SP).
5688   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5689       CallConv == CallingConv::Fast)
5690     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5691 
5692   // Count how many bytes are to be pushed on the stack, including the linkage
5693   // area, parameter list area and the part of the local variable space which
5694   // contains copies of aggregates which are passed by value.
5695 
5696   // Assign locations to all of the outgoing arguments.
5697   SmallVector<CCValAssign, 16> ArgLocs;
5698   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5699 
5700   // Reserve space for the linkage area on the stack.
5701   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5702                        PtrAlign);
5703   if (useSoftFloat())
5704     CCInfo.PreAnalyzeCallOperands(Outs);
5705 
5706   if (IsVarArg) {
5707     // Handle fixed and variable vector arguments differently.
5708     // Fixed vector arguments go into registers as long as registers are
5709     // available. Variable vector arguments always go into memory.
5710     unsigned NumArgs = Outs.size();
5711 
5712     for (unsigned i = 0; i != NumArgs; ++i) {
5713       MVT ArgVT = Outs[i].VT;
5714       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5715       bool Result;
5716 
5717       if (Outs[i].IsFixed) {
5718         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5719                                CCInfo);
5720       } else {
5721         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5722                                       ArgFlags, CCInfo);
5723       }
5724 
5725       if (Result) {
5726 #ifndef NDEBUG
5727         errs() << "Call operand #" << i << " has unhandled type "
5728              << EVT(ArgVT).getEVTString() << "\n";
5729 #endif
5730         llvm_unreachable(nullptr);
5731       }
5732     }
5733   } else {
5734     // All arguments are treated the same.
5735     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5736   }
5737   CCInfo.clearWasPPCF128();
5738 
5739   // Assign locations to all of the outgoing aggregate by value arguments.
5740   SmallVector<CCValAssign, 16> ByValArgLocs;
5741   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5742 
5743   // Reserve stack space for the allocations in CCInfo.
5744   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5745 
5746   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5747 
5748   // Size of the linkage area, parameter list area and the part of the local
5749   // space variable where copies of aggregates which are passed by value are
5750   // stored.
5751   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5752 
5753   // Calculate by how many bytes the stack has to be adjusted in case of tail
5754   // call optimization.
5755   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5756 
5757   // Adjust the stack pointer for the new arguments...
5758   // These operations are automatically eliminated by the prolog/epilog pass
5759   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5760   SDValue CallSeqStart = Chain;
5761 
5762   // Load the return address and frame pointer so it can be moved somewhere else
5763   // later.
5764   SDValue LROp, FPOp;
5765   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5766 
5767   // Set up a copy of the stack pointer for use loading and storing any
5768   // arguments that may not fit in the registers available for argument
5769   // passing.
5770   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5771 
5772   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5773   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5774   SmallVector<SDValue, 8> MemOpChains;
5775 
5776   bool seenFloatArg = false;
5777   // Walk the register/memloc assignments, inserting copies/loads.
5778   // i - Tracks the index into the list of registers allocated for the call
5779   // RealArgIdx - Tracks the index into the list of actual function arguments
5780   // j - Tracks the index into the list of byval arguments
5781   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5782        i != e;
5783        ++i, ++RealArgIdx) {
5784     CCValAssign &VA = ArgLocs[i];
5785     SDValue Arg = OutVals[RealArgIdx];
5786     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5787 
5788     if (Flags.isByVal()) {
5789       // Argument is an aggregate which is passed by value, thus we need to
5790       // create a copy of it in the local variable space of the current stack
5791       // frame (which is the stack frame of the caller) and pass the address of
5792       // this copy to the callee.
5793       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5794       CCValAssign &ByValVA = ByValArgLocs[j++];
5795       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5796 
5797       // Memory reserved in the local variable space of the callers stack frame.
5798       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5799 
5800       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5801       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5802                            StackPtr, PtrOff);
5803 
5804       // Create a copy of the argument in the local area of the current
5805       // stack frame.
5806       SDValue MemcpyCall =
5807         CreateCopyOfByValArgument(Arg, PtrOff,
5808                                   CallSeqStart.getNode()->getOperand(0),
5809                                   Flags, DAG, dl);
5810 
5811       // This must go outside the CALLSEQ_START..END.
5812       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5813                                                      SDLoc(MemcpyCall));
5814       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5815                              NewCallSeqStart.getNode());
5816       Chain = CallSeqStart = NewCallSeqStart;
5817 
5818       // Pass the address of the aggregate copy on the stack either in a
5819       // physical register or in the parameter list area of the current stack
5820       // frame to the callee.
5821       Arg = PtrOff;
5822     }
5823 
5824     // When useCRBits() is true, there can be i1 arguments.
5825     // It is because getRegisterType(MVT::i1) => MVT::i1,
5826     // and for other integer types getRegisterType() => MVT::i32.
5827     // Extend i1 and ensure callee will get i32.
5828     if (Arg.getValueType() == MVT::i1)
5829       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5830                         dl, MVT::i32, Arg);
5831 
5832     if (VA.isRegLoc()) {
5833       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5834       // Put argument in a physical register.
5835       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5836         bool IsLE = Subtarget.isLittleEndian();
5837         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5838                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5839         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5840         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5841                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5842         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5843                              SVal.getValue(0)));
5844       } else
5845         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5846     } else {
5847       // Put argument in the parameter list area of the current stack frame.
5848       assert(VA.isMemLoc());
5849       unsigned LocMemOffset = VA.getLocMemOffset();
5850 
5851       if (!IsTailCall) {
5852         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5853         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5854                              StackPtr, PtrOff);
5855 
5856         MemOpChains.push_back(
5857             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5858       } else {
5859         // Calculate and remember argument location.
5860         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5861                                  TailCallArguments);
5862       }
5863     }
5864   }
5865 
5866   if (!MemOpChains.empty())
5867     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5868 
5869   // Build a sequence of copy-to-reg nodes chained together with token chain
5870   // and flag operands which copy the outgoing args into the appropriate regs.
5871   SDValue InFlag;
5872   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5873     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5874                              RegsToPass[i].second, InFlag);
5875     InFlag = Chain.getValue(1);
5876   }
5877 
5878   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5879   // registers.
5880   if (IsVarArg) {
5881     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5882     SDValue Ops[] = { Chain, InFlag };
5883 
5884     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5885                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5886 
5887     InFlag = Chain.getValue(1);
5888   }
5889 
5890   if (IsTailCall)
5891     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5892                     TailCallArguments);
5893 
5894   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5895                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5896 }
5897 
5898 // Copy an argument into memory, being careful to do this outside the
5899 // call sequence for the call to which the argument belongs.
5900 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5901     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5902     SelectionDAG &DAG, const SDLoc &dl) const {
5903   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5904                         CallSeqStart.getNode()->getOperand(0),
5905                         Flags, DAG, dl);
5906   // The MEMCPY must go outside the CALLSEQ_START..END.
5907   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5908   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5909                                                  SDLoc(MemcpyCall));
5910   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5911                          NewCallSeqStart.getNode());
5912   return NewCallSeqStart;
5913 }
5914 
5915 SDValue PPCTargetLowering::LowerCall_64SVR4(
5916     SDValue Chain, SDValue Callee, CallFlags CFlags,
5917     const SmallVectorImpl<ISD::OutputArg> &Outs,
5918     const SmallVectorImpl<SDValue> &OutVals,
5919     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5920     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5921     const CallBase *CB) const {
5922   bool isELFv2ABI = Subtarget.isELFv2ABI();
5923   bool isLittleEndian = Subtarget.isLittleEndian();
5924   unsigned NumOps = Outs.size();
5925   bool IsSibCall = false;
5926   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5927 
5928   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5929   unsigned PtrByteSize = 8;
5930 
5931   MachineFunction &MF = DAG.getMachineFunction();
5932 
5933   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5934     IsSibCall = true;
5935 
5936   // Mark this function as potentially containing a function that contains a
5937   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5938   // and restoring the callers stack pointer in this functions epilog. This is
5939   // done because by tail calling the called function might overwrite the value
5940   // in this function's (MF) stack pointer stack slot 0(SP).
5941   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5942     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5943 
5944   assert(!(IsFastCall && CFlags.IsVarArg) &&
5945          "fastcc not supported on varargs functions");
5946 
5947   // Count how many bytes are to be pushed on the stack, including the linkage
5948   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5949   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5950   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5951   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5952   unsigned NumBytes = LinkageSize;
5953   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5954 
5955   static const MCPhysReg GPR[] = {
5956     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5957     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5958   };
5959   static const MCPhysReg VR[] = {
5960     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5961     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5962   };
5963 
5964   const unsigned NumGPRs = array_lengthof(GPR);
5965   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5966   const unsigned NumVRs  = array_lengthof(VR);
5967 
5968   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5969   // can be passed to the callee in registers.
5970   // For the fast calling convention, there is another check below.
5971   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5972   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5973   if (!HasParameterArea) {
5974     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5975     unsigned AvailableFPRs = NumFPRs;
5976     unsigned AvailableVRs = NumVRs;
5977     unsigned NumBytesTmp = NumBytes;
5978     for (unsigned i = 0; i != NumOps; ++i) {
5979       if (Outs[i].Flags.isNest()) continue;
5980       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5981                                  PtrByteSize, LinkageSize, ParamAreaSize,
5982                                  NumBytesTmp, AvailableFPRs, AvailableVRs))
5983         HasParameterArea = true;
5984     }
5985   }
5986 
5987   // When using the fast calling convention, we don't provide backing for
5988   // arguments that will be in registers.
5989   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5990 
5991   // Avoid allocating parameter area for fastcc functions if all the arguments
5992   // can be passed in the registers.
5993   if (IsFastCall)
5994     HasParameterArea = false;
5995 
5996   // Add up all the space actually used.
5997   for (unsigned i = 0; i != NumOps; ++i) {
5998     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5999     EVT ArgVT = Outs[i].VT;
6000     EVT OrigVT = Outs[i].ArgVT;
6001 
6002     if (Flags.isNest())
6003       continue;
6004 
6005     if (IsFastCall) {
6006       if (Flags.isByVal()) {
6007         NumGPRsUsed += (Flags.getByValSize()+7)/8;
6008         if (NumGPRsUsed > NumGPRs)
6009           HasParameterArea = true;
6010       } else {
6011         switch (ArgVT.getSimpleVT().SimpleTy) {
6012         default: llvm_unreachable("Unexpected ValueType for argument!");
6013         case MVT::i1:
6014         case MVT::i32:
6015         case MVT::i64:
6016           if (++NumGPRsUsed <= NumGPRs)
6017             continue;
6018           break;
6019         case MVT::v4i32:
6020         case MVT::v8i16:
6021         case MVT::v16i8:
6022         case MVT::v2f64:
6023         case MVT::v2i64:
6024         case MVT::v1i128:
6025         case MVT::f128:
6026           if (++NumVRsUsed <= NumVRs)
6027             continue;
6028           break;
6029         case MVT::v4f32:
6030           if (++NumVRsUsed <= NumVRs)
6031             continue;
6032           break;
6033         case MVT::f32:
6034         case MVT::f64:
6035           if (++NumFPRsUsed <= NumFPRs)
6036             continue;
6037           break;
6038         }
6039         HasParameterArea = true;
6040       }
6041     }
6042 
6043     /* Respect alignment of argument on the stack.  */
6044     auto Alignement =
6045         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6046     NumBytes = alignTo(NumBytes, Alignement);
6047 
6048     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6049     if (Flags.isInConsecutiveRegsLast())
6050       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6051   }
6052 
6053   unsigned NumBytesActuallyUsed = NumBytes;
6054 
6055   // In the old ELFv1 ABI,
6056   // the prolog code of the callee may store up to 8 GPR argument registers to
6057   // the stack, allowing va_start to index over them in memory if its varargs.
6058   // Because we cannot tell if this is needed on the caller side, we have to
6059   // conservatively assume that it is needed.  As such, make sure we have at
6060   // least enough stack space for the caller to store the 8 GPRs.
6061   // In the ELFv2 ABI, we allocate the parameter area iff a callee
6062   // really requires memory operands, e.g. a vararg function.
6063   if (HasParameterArea)
6064     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6065   else
6066     NumBytes = LinkageSize;
6067 
6068   // Tail call needs the stack to be aligned.
6069   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6070     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6071 
6072   int SPDiff = 0;
6073 
6074   // Calculate by how many bytes the stack has to be adjusted in case of tail
6075   // call optimization.
6076   if (!IsSibCall)
6077     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6078 
6079   // To protect arguments on the stack from being clobbered in a tail call,
6080   // force all the loads to happen before doing any other lowering.
6081   if (CFlags.IsTailCall)
6082     Chain = DAG.getStackArgumentTokenFactor(Chain);
6083 
6084   // Adjust the stack pointer for the new arguments...
6085   // These operations are automatically eliminated by the prolog/epilog pass
6086   if (!IsSibCall)
6087     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6088   SDValue CallSeqStart = Chain;
6089 
6090   // Load the return address and frame pointer so it can be move somewhere else
6091   // later.
6092   SDValue LROp, FPOp;
6093   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6094 
6095   // Set up a copy of the stack pointer for use loading and storing any
6096   // arguments that may not fit in the registers available for argument
6097   // passing.
6098   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6099 
6100   // Figure out which arguments are going to go in registers, and which in
6101   // memory.  Also, if this is a vararg function, floating point operations
6102   // must be stored to our stack, and loaded into integer regs as well, if
6103   // any integer regs are available for argument passing.
6104   unsigned ArgOffset = LinkageSize;
6105 
6106   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6107   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6108 
6109   SmallVector<SDValue, 8> MemOpChains;
6110   for (unsigned i = 0; i != NumOps; ++i) {
6111     SDValue Arg = OutVals[i];
6112     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6113     EVT ArgVT = Outs[i].VT;
6114     EVT OrigVT = Outs[i].ArgVT;
6115 
6116     // PtrOff will be used to store the current argument to the stack if a
6117     // register cannot be found for it.
6118     SDValue PtrOff;
6119 
6120     // We re-align the argument offset for each argument, except when using the
6121     // fast calling convention, when we need to make sure we do that only when
6122     // we'll actually use a stack slot.
6123     auto ComputePtrOff = [&]() {
6124       /* Respect alignment of argument on the stack.  */
6125       auto Alignment =
6126           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6127       ArgOffset = alignTo(ArgOffset, Alignment);
6128 
6129       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6130 
6131       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6132     };
6133 
6134     if (!IsFastCall) {
6135       ComputePtrOff();
6136 
6137       /* Compute GPR index associated with argument offset.  */
6138       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6139       GPR_idx = std::min(GPR_idx, NumGPRs);
6140     }
6141 
6142     // Promote integers to 64-bit values.
6143     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6144       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6145       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6146       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6147     }
6148 
6149     // FIXME memcpy is used way more than necessary.  Correctness first.
6150     // Note: "by value" is code for passing a structure by value, not
6151     // basic types.
6152     if (Flags.isByVal()) {
6153       // Note: Size includes alignment padding, so
6154       //   struct x { short a; char b; }
6155       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6156       // These are the proper values we need for right-justifying the
6157       // aggregate in a parameter register.
6158       unsigned Size = Flags.getByValSize();
6159 
6160       // An empty aggregate parameter takes up no storage and no
6161       // registers.
6162       if (Size == 0)
6163         continue;
6164 
6165       if (IsFastCall)
6166         ComputePtrOff();
6167 
6168       // All aggregates smaller than 8 bytes must be passed right-justified.
6169       if (Size==1 || Size==2 || Size==4) {
6170         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6171         if (GPR_idx != NumGPRs) {
6172           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6173                                         MachinePointerInfo(), VT);
6174           MemOpChains.push_back(Load.getValue(1));
6175           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6176 
6177           ArgOffset += PtrByteSize;
6178           continue;
6179         }
6180       }
6181 
6182       if (GPR_idx == NumGPRs && Size < 8) {
6183         SDValue AddPtr = PtrOff;
6184         if (!isLittleEndian) {
6185           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6186                                           PtrOff.getValueType());
6187           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6188         }
6189         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6190                                                           CallSeqStart,
6191                                                           Flags, DAG, dl);
6192         ArgOffset += PtrByteSize;
6193         continue;
6194       }
6195       // Copy entire object into memory.  There are cases where gcc-generated
6196       // code assumes it is there, even if it could be put entirely into
6197       // registers.  (This is not what the doc says.)
6198 
6199       // FIXME: The above statement is likely due to a misunderstanding of the
6200       // documents.  All arguments must be copied into the parameter area BY
6201       // THE CALLEE in the event that the callee takes the address of any
6202       // formal argument.  That has not yet been implemented.  However, it is
6203       // reasonable to use the stack area as a staging area for the register
6204       // load.
6205 
6206       // Skip this for small aggregates, as we will use the same slot for a
6207       // right-justified copy, below.
6208       if (Size >= 8)
6209         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6210                                                           CallSeqStart,
6211                                                           Flags, DAG, dl);
6212 
6213       // When a register is available, pass a small aggregate right-justified.
6214       if (Size < 8 && GPR_idx != NumGPRs) {
6215         // The easiest way to get this right-justified in a register
6216         // is to copy the structure into the rightmost portion of a
6217         // local variable slot, then load the whole slot into the
6218         // register.
6219         // FIXME: The memcpy seems to produce pretty awful code for
6220         // small aggregates, particularly for packed ones.
6221         // FIXME: It would be preferable to use the slot in the
6222         // parameter save area instead of a new local variable.
6223         SDValue AddPtr = PtrOff;
6224         if (!isLittleEndian) {
6225           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6226           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6227         }
6228         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6229                                                           CallSeqStart,
6230                                                           Flags, DAG, dl);
6231 
6232         // Load the slot into the register.
6233         SDValue Load =
6234             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6235         MemOpChains.push_back(Load.getValue(1));
6236         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6237 
6238         // Done with this argument.
6239         ArgOffset += PtrByteSize;
6240         continue;
6241       }
6242 
6243       // For aggregates larger than PtrByteSize, copy the pieces of the
6244       // object that fit into registers from the parameter save area.
6245       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6246         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6247         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6248         if (GPR_idx != NumGPRs) {
6249           SDValue Load =
6250               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6251           MemOpChains.push_back(Load.getValue(1));
6252           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6253           ArgOffset += PtrByteSize;
6254         } else {
6255           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6256           break;
6257         }
6258       }
6259       continue;
6260     }
6261 
6262     switch (Arg.getSimpleValueType().SimpleTy) {
6263     default: llvm_unreachable("Unexpected ValueType for argument!");
6264     case MVT::i1:
6265     case MVT::i32:
6266     case MVT::i64:
6267       if (Flags.isNest()) {
6268         // The 'nest' parameter, if any, is passed in R11.
6269         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6270         break;
6271       }
6272 
6273       // These can be scalar arguments or elements of an integer array type
6274       // passed directly.  Clang may use those instead of "byval" aggregate
6275       // types to avoid forcing arguments to memory unnecessarily.
6276       if (GPR_idx != NumGPRs) {
6277         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6278       } else {
6279         if (IsFastCall)
6280           ComputePtrOff();
6281 
6282         assert(HasParameterArea &&
6283                "Parameter area must exist to pass an argument in memory.");
6284         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6285                          true, CFlags.IsTailCall, false, MemOpChains,
6286                          TailCallArguments, dl);
6287         if (IsFastCall)
6288           ArgOffset += PtrByteSize;
6289       }
6290       if (!IsFastCall)
6291         ArgOffset += PtrByteSize;
6292       break;
6293     case MVT::f32:
6294     case MVT::f64: {
6295       // These can be scalar arguments or elements of a float array type
6296       // passed directly.  The latter are used to implement ELFv2 homogenous
6297       // float aggregates.
6298 
6299       // Named arguments go into FPRs first, and once they overflow, the
6300       // remaining arguments go into GPRs and then the parameter save area.
6301       // Unnamed arguments for vararg functions always go to GPRs and
6302       // then the parameter save area.  For now, put all arguments to vararg
6303       // routines always in both locations (FPR *and* GPR or stack slot).
6304       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6305       bool NeededLoad = false;
6306 
6307       // First load the argument into the next available FPR.
6308       if (FPR_idx != NumFPRs)
6309         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6310 
6311       // Next, load the argument into GPR or stack slot if needed.
6312       if (!NeedGPROrStack)
6313         ;
6314       else if (GPR_idx != NumGPRs && !IsFastCall) {
6315         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6316         // once we support fp <-> gpr moves.
6317 
6318         // In the non-vararg case, this can only ever happen in the
6319         // presence of f32 array types, since otherwise we never run
6320         // out of FPRs before running out of GPRs.
6321         SDValue ArgVal;
6322 
6323         // Double values are always passed in a single GPR.
6324         if (Arg.getValueType() != MVT::f32) {
6325           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6326 
6327         // Non-array float values are extended and passed in a GPR.
6328         } else if (!Flags.isInConsecutiveRegs()) {
6329           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6330           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6331 
6332         // If we have an array of floats, we collect every odd element
6333         // together with its predecessor into one GPR.
6334         } else if (ArgOffset % PtrByteSize != 0) {
6335           SDValue Lo, Hi;
6336           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6337           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6338           if (!isLittleEndian)
6339             std::swap(Lo, Hi);
6340           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6341 
6342         // The final element, if even, goes into the first half of a GPR.
6343         } else if (Flags.isInConsecutiveRegsLast()) {
6344           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6345           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6346           if (!isLittleEndian)
6347             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6348                                  DAG.getConstant(32, dl, MVT::i32));
6349 
6350         // Non-final even elements are skipped; they will be handled
6351         // together the with subsequent argument on the next go-around.
6352         } else
6353           ArgVal = SDValue();
6354 
6355         if (ArgVal.getNode())
6356           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6357       } else {
6358         if (IsFastCall)
6359           ComputePtrOff();
6360 
6361         // Single-precision floating-point values are mapped to the
6362         // second (rightmost) word of the stack doubleword.
6363         if (Arg.getValueType() == MVT::f32 &&
6364             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6365           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6366           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6367         }
6368 
6369         assert(HasParameterArea &&
6370                "Parameter area must exist to pass an argument in memory.");
6371         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6372                          true, CFlags.IsTailCall, false, MemOpChains,
6373                          TailCallArguments, dl);
6374 
6375         NeededLoad = true;
6376       }
6377       // When passing an array of floats, the array occupies consecutive
6378       // space in the argument area; only round up to the next doubleword
6379       // at the end of the array.  Otherwise, each float takes 8 bytes.
6380       if (!IsFastCall || NeededLoad) {
6381         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6382                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6383         if (Flags.isInConsecutiveRegsLast())
6384           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6385       }
6386       break;
6387     }
6388     case MVT::v4f32:
6389     case MVT::v4i32:
6390     case MVT::v8i16:
6391     case MVT::v16i8:
6392     case MVT::v2f64:
6393     case MVT::v2i64:
6394     case MVT::v1i128:
6395     case MVT::f128:
6396       // These can be scalar arguments or elements of a vector array type
6397       // passed directly.  The latter are used to implement ELFv2 homogenous
6398       // vector aggregates.
6399 
6400       // For a varargs call, named arguments go into VRs or on the stack as
6401       // usual; unnamed arguments always go to the stack or the corresponding
6402       // GPRs when within range.  For now, we always put the value in both
6403       // locations (or even all three).
6404       if (CFlags.IsVarArg) {
6405         assert(HasParameterArea &&
6406                "Parameter area must exist if we have a varargs call.");
6407         // We could elide this store in the case where the object fits
6408         // entirely in R registers.  Maybe later.
6409         SDValue Store =
6410             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6411         MemOpChains.push_back(Store);
6412         if (VR_idx != NumVRs) {
6413           SDValue Load =
6414               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6415           MemOpChains.push_back(Load.getValue(1));
6416           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6417         }
6418         ArgOffset += 16;
6419         for (unsigned i=0; i<16; i+=PtrByteSize) {
6420           if (GPR_idx == NumGPRs)
6421             break;
6422           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6423                                    DAG.getConstant(i, dl, PtrVT));
6424           SDValue Load =
6425               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6426           MemOpChains.push_back(Load.getValue(1));
6427           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6428         }
6429         break;
6430       }
6431 
6432       // Non-varargs Altivec params go into VRs or on the stack.
6433       if (VR_idx != NumVRs) {
6434         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6435       } else {
6436         if (IsFastCall)
6437           ComputePtrOff();
6438 
6439         assert(HasParameterArea &&
6440                "Parameter area must exist to pass an argument in memory.");
6441         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6442                          true, CFlags.IsTailCall, true, MemOpChains,
6443                          TailCallArguments, dl);
6444         if (IsFastCall)
6445           ArgOffset += 16;
6446       }
6447 
6448       if (!IsFastCall)
6449         ArgOffset += 16;
6450       break;
6451     }
6452   }
6453 
6454   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6455          "mismatch in size of parameter area");
6456   (void)NumBytesActuallyUsed;
6457 
6458   if (!MemOpChains.empty())
6459     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6460 
6461   // Check if this is an indirect call (MTCTR/BCTRL).
6462   // See prepareDescriptorIndirectCall and buildCallOperands for more
6463   // information about calls through function pointers in the 64-bit SVR4 ABI.
6464   if (CFlags.IsIndirect) {
6465     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6466     // caller in the TOC save area.
6467     if (isTOCSaveRestoreRequired(Subtarget)) {
6468       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6469       // Load r2 into a virtual register and store it to the TOC save area.
6470       setUsesTOCBasePtr(DAG);
6471       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6472       // TOC save area offset.
6473       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6474       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6475       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6476       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6477                            MachinePointerInfo::getStack(
6478                                DAG.getMachineFunction(), TOCSaveOffset));
6479     }
6480     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6481     // This does not mean the MTCTR instruction must use R12; it's easier
6482     // to model this as an extra parameter, so do that.
6483     if (isELFv2ABI && !CFlags.IsPatchPoint)
6484       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6485   }
6486 
6487   // Build a sequence of copy-to-reg nodes chained together with token chain
6488   // and flag operands which copy the outgoing args into the appropriate regs.
6489   SDValue InFlag;
6490   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6491     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6492                              RegsToPass[i].second, InFlag);
6493     InFlag = Chain.getValue(1);
6494   }
6495 
6496   if (CFlags.IsTailCall && !IsSibCall)
6497     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6498                     TailCallArguments);
6499 
6500   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6501                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6502 }
6503 
6504 SDValue PPCTargetLowering::LowerCall_Darwin(
6505     SDValue Chain, SDValue Callee, CallFlags CFlags,
6506     const SmallVectorImpl<ISD::OutputArg> &Outs,
6507     const SmallVectorImpl<SDValue> &OutVals,
6508     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6509     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6510     const CallBase *CB) const {
6511   unsigned NumOps = Outs.size();
6512 
6513   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6514   bool isPPC64 = PtrVT == MVT::i64;
6515   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6516 
6517   MachineFunction &MF = DAG.getMachineFunction();
6518 
6519   // Mark this function as potentially containing a function that contains a
6520   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6521   // and restoring the callers stack pointer in this functions epilog. This is
6522   // done because by tail calling the called function might overwrite the value
6523   // in this function's (MF) stack pointer stack slot 0(SP).
6524   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6525       CFlags.CallConv == CallingConv::Fast)
6526     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6527 
6528   // Count how many bytes are to be pushed on the stack, including the linkage
6529   // area, and parameter passing area.  We start with 24/48 bytes, which is
6530   // prereserved space for [SP][CR][LR][3 x unused].
6531   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6532   unsigned NumBytes = LinkageSize;
6533 
6534   // Add up all the space actually used.
6535   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6536   // they all go in registers, but we must reserve stack space for them for
6537   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6538   // assigned stack space in order, with padding so Altivec parameters are
6539   // 16-byte aligned.
6540   unsigned nAltivecParamsAtEnd = 0;
6541   for (unsigned i = 0; i != NumOps; ++i) {
6542     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6543     EVT ArgVT = Outs[i].VT;
6544     // Varargs Altivec parameters are padded to a 16 byte boundary.
6545     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6546         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6547         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6548       if (!CFlags.IsVarArg && !isPPC64) {
6549         // Non-varargs Altivec parameters go after all the non-Altivec
6550         // parameters; handle those later so we know how much padding we need.
6551         nAltivecParamsAtEnd++;
6552         continue;
6553       }
6554       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6555       NumBytes = ((NumBytes+15)/16)*16;
6556     }
6557     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6558   }
6559 
6560   // Allow for Altivec parameters at the end, if needed.
6561   if (nAltivecParamsAtEnd) {
6562     NumBytes = ((NumBytes+15)/16)*16;
6563     NumBytes += 16*nAltivecParamsAtEnd;
6564   }
6565 
6566   // The prolog code of the callee may store up to 8 GPR argument registers to
6567   // the stack, allowing va_start to index over them in memory if its varargs.
6568   // Because we cannot tell if this is needed on the caller side, we have to
6569   // conservatively assume that it is needed.  As such, make sure we have at
6570   // least enough stack space for the caller to store the 8 GPRs.
6571   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6572 
6573   // Tail call needs the stack to be aligned.
6574   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6575       CFlags.CallConv == CallingConv::Fast)
6576     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6577 
6578   // Calculate by how many bytes the stack has to be adjusted in case of tail
6579   // call optimization.
6580   int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6581 
6582   // To protect arguments on the stack from being clobbered in a tail call,
6583   // force all the loads to happen before doing any other lowering.
6584   if (CFlags.IsTailCall)
6585     Chain = DAG.getStackArgumentTokenFactor(Chain);
6586 
6587   // Adjust the stack pointer for the new arguments...
6588   // These operations are automatically eliminated by the prolog/epilog pass
6589   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6590   SDValue CallSeqStart = Chain;
6591 
6592   // Load the return address and frame pointer so it can be move somewhere else
6593   // later.
6594   SDValue LROp, FPOp;
6595   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6596 
6597   // Set up a copy of the stack pointer for use loading and storing any
6598   // arguments that may not fit in the registers available for argument
6599   // passing.
6600   SDValue StackPtr;
6601   if (isPPC64)
6602     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6603   else
6604     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6605 
6606   // Figure out which arguments are going to go in registers, and which in
6607   // memory.  Also, if this is a vararg function, floating point operations
6608   // must be stored to our stack, and loaded into integer regs as well, if
6609   // any integer regs are available for argument passing.
6610   unsigned ArgOffset = LinkageSize;
6611   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6612 
6613   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6614     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6615     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6616   };
6617   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6618     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6619     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6620   };
6621   static const MCPhysReg VR[] = {
6622     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6623     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6624   };
6625   const unsigned NumGPRs = array_lengthof(GPR_32);
6626   const unsigned NumFPRs = 13;
6627   const unsigned NumVRs  = array_lengthof(VR);
6628 
6629   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6630 
6631   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6632   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6633 
6634   SmallVector<SDValue, 8> MemOpChains;
6635   for (unsigned i = 0; i != NumOps; ++i) {
6636     SDValue Arg = OutVals[i];
6637     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6638 
6639     // PtrOff will be used to store the current argument to the stack if a
6640     // register cannot be found for it.
6641     SDValue PtrOff;
6642 
6643     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6644 
6645     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6646 
6647     // On PPC64, promote integers to 64-bit values.
6648     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6649       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6650       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6651       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6652     }
6653 
6654     // FIXME memcpy is used way more than necessary.  Correctness first.
6655     // Note: "by value" is code for passing a structure by value, not
6656     // basic types.
6657     if (Flags.isByVal()) {
6658       unsigned Size = Flags.getByValSize();
6659       // Very small objects are passed right-justified.  Everything else is
6660       // passed left-justified.
6661       if (Size==1 || Size==2) {
6662         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6663         if (GPR_idx != NumGPRs) {
6664           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6665                                         MachinePointerInfo(), VT);
6666           MemOpChains.push_back(Load.getValue(1));
6667           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6668 
6669           ArgOffset += PtrByteSize;
6670         } else {
6671           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6672                                           PtrOff.getValueType());
6673           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6674           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6675                                                             CallSeqStart,
6676                                                             Flags, DAG, dl);
6677           ArgOffset += PtrByteSize;
6678         }
6679         continue;
6680       }
6681       // Copy entire object into memory.  There are cases where gcc-generated
6682       // code assumes it is there, even if it could be put entirely into
6683       // registers.  (This is not what the doc says.)
6684       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6685                                                         CallSeqStart,
6686                                                         Flags, DAG, dl);
6687 
6688       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6689       // copy the pieces of the object that fit into registers from the
6690       // parameter save area.
6691       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6692         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6693         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6694         if (GPR_idx != NumGPRs) {
6695           SDValue Load =
6696               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6697           MemOpChains.push_back(Load.getValue(1));
6698           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6699           ArgOffset += PtrByteSize;
6700         } else {
6701           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6702           break;
6703         }
6704       }
6705       continue;
6706     }
6707 
6708     switch (Arg.getSimpleValueType().SimpleTy) {
6709     default: llvm_unreachable("Unexpected ValueType for argument!");
6710     case MVT::i1:
6711     case MVT::i32:
6712     case MVT::i64:
6713       if (GPR_idx != NumGPRs) {
6714         if (Arg.getValueType() == MVT::i1)
6715           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6716 
6717         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6718       } else {
6719         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6720                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6721                          TailCallArguments, dl);
6722       }
6723       ArgOffset += PtrByteSize;
6724       break;
6725     case MVT::f32:
6726     case MVT::f64:
6727       if (FPR_idx != NumFPRs) {
6728         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6729 
6730         if (CFlags.IsVarArg) {
6731           SDValue Store =
6732               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6733           MemOpChains.push_back(Store);
6734 
6735           // Float varargs are always shadowed in available integer registers
6736           if (GPR_idx != NumGPRs) {
6737             SDValue Load =
6738                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6739             MemOpChains.push_back(Load.getValue(1));
6740             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6741           }
6742           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6743             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6744             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6745             SDValue Load =
6746                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6747             MemOpChains.push_back(Load.getValue(1));
6748             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6749           }
6750         } else {
6751           // If we have any FPRs remaining, we may also have GPRs remaining.
6752           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6753           // GPRs.
6754           if (GPR_idx != NumGPRs)
6755             ++GPR_idx;
6756           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6757               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6758             ++GPR_idx;
6759         }
6760       } else
6761         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6762                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6763                          TailCallArguments, dl);
6764       if (isPPC64)
6765         ArgOffset += 8;
6766       else
6767         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6768       break;
6769     case MVT::v4f32:
6770     case MVT::v4i32:
6771     case MVT::v8i16:
6772     case MVT::v16i8:
6773       if (CFlags.IsVarArg) {
6774         // These go aligned on the stack, or in the corresponding R registers
6775         // when within range.  The Darwin PPC ABI doc claims they also go in
6776         // V registers; in fact gcc does this only for arguments that are
6777         // prototyped, not for those that match the ...  We do it for all
6778         // arguments, seems to work.
6779         while (ArgOffset % 16 !=0) {
6780           ArgOffset += PtrByteSize;
6781           if (GPR_idx != NumGPRs)
6782             GPR_idx++;
6783         }
6784         // We could elide this store in the case where the object fits
6785         // entirely in R registers.  Maybe later.
6786         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6787                              DAG.getConstant(ArgOffset, dl, PtrVT));
6788         SDValue Store =
6789             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6790         MemOpChains.push_back(Store);
6791         if (VR_idx != NumVRs) {
6792           SDValue Load =
6793               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6794           MemOpChains.push_back(Load.getValue(1));
6795           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6796         }
6797         ArgOffset += 16;
6798         for (unsigned i=0; i<16; i+=PtrByteSize) {
6799           if (GPR_idx == NumGPRs)
6800             break;
6801           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6802                                    DAG.getConstant(i, dl, PtrVT));
6803           SDValue Load =
6804               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6805           MemOpChains.push_back(Load.getValue(1));
6806           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6807         }
6808         break;
6809       }
6810 
6811       // Non-varargs Altivec params generally go in registers, but have
6812       // stack space allocated at the end.
6813       if (VR_idx != NumVRs) {
6814         // Doesn't have GPR space allocated.
6815         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6816       } else if (nAltivecParamsAtEnd==0) {
6817         // We are emitting Altivec params in order.
6818         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6819                          isPPC64, CFlags.IsTailCall, true, MemOpChains,
6820                          TailCallArguments, dl);
6821         ArgOffset += 16;
6822       }
6823       break;
6824     }
6825   }
6826   // If all Altivec parameters fit in registers, as they usually do,
6827   // they get stack space following the non-Altivec parameters.  We
6828   // don't track this here because nobody below needs it.
6829   // If there are more Altivec parameters than fit in registers emit
6830   // the stores here.
6831   if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
6832     unsigned j = 0;
6833     // Offset is aligned; skip 1st 12 params which go in V registers.
6834     ArgOffset = ((ArgOffset+15)/16)*16;
6835     ArgOffset += 12*16;
6836     for (unsigned i = 0; i != NumOps; ++i) {
6837       SDValue Arg = OutVals[i];
6838       EVT ArgType = Outs[i].VT;
6839       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6840           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6841         if (++j > NumVRs) {
6842           SDValue PtrOff;
6843           // We are emitting Altivec params in order.
6844           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6845                            isPPC64, CFlags.IsTailCall, true, MemOpChains,
6846                            TailCallArguments, dl);
6847           ArgOffset += 16;
6848         }
6849       }
6850     }
6851   }
6852 
6853   if (!MemOpChains.empty())
6854     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6855 
6856   // On Darwin, R12 must contain the address of an indirect callee.  This does
6857   // not mean the MTCTR instruction must use R12; it's easier to model this as
6858   // an extra parameter, so do that.
6859   if (CFlags.IsIndirect) {
6860     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
6861     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6862                                                    PPC::R12), Callee));
6863   }
6864 
6865   // Build a sequence of copy-to-reg nodes chained together with token chain
6866   // and flag operands which copy the outgoing args into the appropriate regs.
6867   SDValue InFlag;
6868   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6869     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6870                              RegsToPass[i].second, InFlag);
6871     InFlag = Chain.getValue(1);
6872   }
6873 
6874   if (CFlags.IsTailCall)
6875     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6876                     TailCallArguments);
6877 
6878   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6879                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6880 }
6881 
6882 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6883                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6884                    CCState &State) {
6885 
6886   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6887       State.getMachineFunction().getSubtarget());
6888   const bool IsPPC64 = Subtarget.isPPC64();
6889   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6890   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6891 
6892   assert((!ValVT.isInteger() ||
6893           (ValVT.getSizeInBits() <= RegVT.getSizeInBits())) &&
6894          "Integer argument exceeds register size: should have been legalized");
6895 
6896   if (ValVT == MVT::f128)
6897     report_fatal_error("f128 is unimplemented on AIX.");
6898 
6899   if (ArgFlags.isNest())
6900     report_fatal_error("Nest arguments are unimplemented.");
6901 
6902   if (ValVT.isVector() || LocVT.isVector())
6903     report_fatal_error("Vector arguments are unimplemented on AIX.");
6904 
6905   static const MCPhysReg GPR_32[] = {// 32-bit registers.
6906                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6907                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6908   static const MCPhysReg GPR_64[] = {// 64-bit registers.
6909                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6910                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6911 
6912   if (ArgFlags.isByVal()) {
6913     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6914       report_fatal_error("Pass-by-value arguments with alignment greater than "
6915                          "register width are not supported.");
6916 
6917     const unsigned ByValSize = ArgFlags.getByValSize();
6918 
6919     // An empty aggregate parameter takes up no storage and no registers,
6920     // but needs a MemLoc for a stack slot for the formal arguments side.
6921     if (ByValSize == 0) {
6922       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6923                                        State.getNextStackOffset(), RegVT,
6924                                        LocInfo));
6925       return false;
6926     }
6927 
6928     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6929     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6930     for (const unsigned E = Offset + StackSize; Offset < E;
6931          Offset += PtrAlign.value()) {
6932       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6933         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6934       else {
6935         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6936                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
6937                                          LocInfo));
6938         break;
6939       }
6940     }
6941     return false;
6942   }
6943 
6944   // Arguments always reserve parameter save area.
6945   switch (ValVT.SimpleTy) {
6946   default:
6947     report_fatal_error("Unhandled value type for argument.");
6948   case MVT::i64:
6949     // i64 arguments should have been split to i32 for PPC32.
6950     assert(IsPPC64 && "PPC32 should have split i64 values.");
6951     LLVM_FALLTHROUGH;
6952   case MVT::i1:
6953   case MVT::i32: {
6954     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6955     // AIX integer arguments are always passed in register width.
6956     if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
6957       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6958                                   : CCValAssign::LocInfo::ZExt;
6959     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6960       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6961     else
6962       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6963 
6964     return false;
6965   }
6966   case MVT::f32:
6967   case MVT::f64: {
6968     // Parameter save area (PSA) is reserved even if the float passes in fpr.
6969     const unsigned StoreSize = LocVT.getStoreSize();
6970     // Floats are always 4-byte aligned in the PSA on AIX.
6971     // This includes f64 in 64-bit mode for ABI compatibility.
6972     const unsigned Offset =
6973         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6974     unsigned FReg = State.AllocateReg(FPR);
6975     if (FReg)
6976       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6977 
6978     // Reserve and initialize GPRs or initialize the PSA as required.
6979     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6980       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6981         assert(FReg && "An FPR should be available when a GPR is reserved.");
6982         if (State.isVarArg()) {
6983           // Successfully reserved GPRs are only initialized for vararg calls.
6984           // Custom handling is required for:
6985           //   f64 in PPC32 needs to be split into 2 GPRs.
6986           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6987           State.addLoc(
6988               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6989         }
6990       } else {
6991         // If there are insufficient GPRs, the PSA needs to be initialized.
6992         // Initialization occurs even if an FPR was initialized for
6993         // compatibility with the AIX XL compiler. The full memory for the
6994         // argument will be initialized even if a prior word is saved in GPR.
6995         // A custom memLoc is used when the argument also passes in FPR so
6996         // that the callee handling can skip over it easily.
6997         State.addLoc(
6998             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6999                                              LocInfo)
7000                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
7001         break;
7002       }
7003     }
7004 
7005     return false;
7006   }
7007   }
7008   return true;
7009 }
7010 
7011 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
7012                                                     bool IsPPC64) {
7013   assert((IsPPC64 || SVT != MVT::i64) &&
7014          "i64 should have been split for 32-bit codegen.");
7015 
7016   switch (SVT) {
7017   default:
7018     report_fatal_error("Unexpected value type for formal argument");
7019   case MVT::i1:
7020   case MVT::i32:
7021   case MVT::i64:
7022     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7023   case MVT::f32:
7024     return &PPC::F4RCRegClass;
7025   case MVT::f64:
7026     return &PPC::F8RCRegClass;
7027   }
7028 }
7029 
7030 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
7031                                         SelectionDAG &DAG, SDValue ArgValue,
7032                                         MVT LocVT, const SDLoc &dl) {
7033   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
7034   assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
7035 
7036   if (Flags.isSExt())
7037     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
7038                            DAG.getValueType(ValVT));
7039   else if (Flags.isZExt())
7040     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
7041                            DAG.getValueType(ValVT));
7042 
7043   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
7044 }
7045 
7046 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
7047   const unsigned LASize = FL->getLinkageSize();
7048 
7049   if (PPC::GPRCRegClass.contains(Reg)) {
7050     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
7051            "Reg must be a valid argument register!");
7052     return LASize + 4 * (Reg - PPC::R3);
7053   }
7054 
7055   if (PPC::G8RCRegClass.contains(Reg)) {
7056     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
7057            "Reg must be a valid argument register!");
7058     return LASize + 8 * (Reg - PPC::X3);
7059   }
7060 
7061   llvm_unreachable("Only general purpose registers expected.");
7062 }
7063 
7064 //   AIX ABI Stack Frame Layout:
7065 //
7066 //   Low Memory +--------------------------------------------+
7067 //   SP   +---> | Back chain                                 | ---+
7068 //        |     +--------------------------------------------+    |
7069 //        |     | Saved Condition Register                   |    |
7070 //        |     +--------------------------------------------+    |
7071 //        |     | Saved Linkage Register                     |    |
7072 //        |     +--------------------------------------------+    | Linkage Area
7073 //        |     | Reserved for compilers                     |    |
7074 //        |     +--------------------------------------------+    |
7075 //        |     | Reserved for binders                       |    |
7076 //        |     +--------------------------------------------+    |
7077 //        |     | Saved TOC pointer                          | ---+
7078 //        |     +--------------------------------------------+
7079 //        |     | Parameter save area                        |
7080 //        |     +--------------------------------------------+
7081 //        |     | Alloca space                               |
7082 //        |     +--------------------------------------------+
7083 //        |     | Local variable space                       |
7084 //        |     +--------------------------------------------+
7085 //        |     | Float/int conversion temporary             |
7086 //        |     +--------------------------------------------+
7087 //        |     | Save area for AltiVec registers            |
7088 //        |     +--------------------------------------------+
7089 //        |     | AltiVec alignment padding                  |
7090 //        |     +--------------------------------------------+
7091 //        |     | Save area for VRSAVE register              |
7092 //        |     +--------------------------------------------+
7093 //        |     | Save area for General Purpose registers    |
7094 //        |     +--------------------------------------------+
7095 //        |     | Save area for Floating Point registers     |
7096 //        |     +--------------------------------------------+
7097 //        +---- | Back chain                                 |
7098 // High Memory  +--------------------------------------------+
7099 //
7100 //  Specifications:
7101 //  AIX 7.2 Assembler Language Reference
7102 //  Subroutine linkage convention
7103 
7104 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7105     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7106     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7107     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7108 
7109   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7110           CallConv == CallingConv::Fast) &&
7111          "Unexpected calling convention!");
7112 
7113   if (getTargetMachine().Options.GuaranteedTailCallOpt)
7114     report_fatal_error("Tail call support is unimplemented on AIX.");
7115 
7116   if (useSoftFloat())
7117     report_fatal_error("Soft float support is unimplemented on AIX.");
7118 
7119   const PPCSubtarget &Subtarget =
7120       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7121 
7122   const bool IsPPC64 = Subtarget.isPPC64();
7123   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7124 
7125   // Assign locations to all of the incoming arguments.
7126   SmallVector<CCValAssign, 16> ArgLocs;
7127   MachineFunction &MF = DAG.getMachineFunction();
7128   MachineFrameInfo &MFI = MF.getFrameInfo();
7129   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7130 
7131   const EVT PtrVT = getPointerTy(MF.getDataLayout());
7132   // Reserve space for the linkage area on the stack.
7133   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7134   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7135   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7136 
7137   SmallVector<SDValue, 8> MemOps;
7138 
7139   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
7140     CCValAssign &VA = ArgLocs[I++];
7141     MVT LocVT = VA.getLocVT();
7142     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7143 
7144     // For compatibility with the AIX XL compiler, the float args in the
7145     // parameter save area are initialized even if the argument is available
7146     // in register.  The caller is required to initialize both the register
7147     // and memory, however, the callee can choose to expect it in either.
7148     // The memloc is dismissed here because the argument is retrieved from
7149     // the register.
7150     if (VA.isMemLoc() && VA.needsCustom())
7151       continue;
7152 
7153     if (Flags.isByVal() && VA.isMemLoc()) {
7154       const unsigned Size =
7155           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7156                   PtrByteSize);
7157       const int FI = MF.getFrameInfo().CreateFixedObject(
7158           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7159           /* IsAliased */ true);
7160       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7161       InVals.push_back(FIN);
7162 
7163       continue;
7164     }
7165 
7166     if (Flags.isByVal()) {
7167       assert(VA.isRegLoc() && "MemLocs should already be handled.");
7168 
7169       const MCPhysReg ArgReg = VA.getLocReg();
7170       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7171 
7172       if (Flags.getNonZeroByValAlign() > PtrByteSize)
7173         report_fatal_error("Over aligned byvals not supported yet.");
7174 
7175       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7176       const int FI = MF.getFrameInfo().CreateFixedObject(
7177           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7178           /* IsAliased */ true);
7179       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7180       InVals.push_back(FIN);
7181 
7182       // Add live ins for all the RegLocs for the same ByVal.
7183       const TargetRegisterClass *RegClass =
7184           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7185 
7186       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7187                                                unsigned Offset) {
7188         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
7189         // Since the callers side has left justified the aggregate in the
7190         // register, we can simply store the entire register into the stack
7191         // slot.
7192         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7193         // The store to the fixedstack object is needed becuase accessing a
7194         // field of the ByVal will use a gep and load. Ideally we will optimize
7195         // to extracting the value from the register directly, and elide the
7196         // stores when the arguments address is not taken, but that will need to
7197         // be future work.
7198         SDValue Store = DAG.getStore(
7199             CopyFrom.getValue(1), dl, CopyFrom,
7200             DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
7201             MachinePointerInfo::getFixedStack(MF, FI, Offset));
7202 
7203         MemOps.push_back(Store);
7204       };
7205 
7206       unsigned Offset = 0;
7207       HandleRegLoc(VA.getLocReg(), Offset);
7208       Offset += PtrByteSize;
7209       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7210            Offset += PtrByteSize) {
7211         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7212                "RegLocs should be for ByVal argument.");
7213 
7214         const CCValAssign RL = ArgLocs[I++];
7215         HandleRegLoc(RL.getLocReg(), Offset);
7216       }
7217 
7218       if (Offset != StackSize) {
7219         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7220                "Expected MemLoc for remaining bytes.");
7221         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7222         // Consume the MemLoc.The InVal has already been emitted, so nothing
7223         // more needs to be done.
7224         ++I;
7225       }
7226 
7227       continue;
7228     }
7229 
7230     EVT ValVT = VA.getValVT();
7231     if (VA.isRegLoc() && !VA.needsCustom()) {
7232       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7233       unsigned VReg =
7234           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7235       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7236       if (ValVT.isScalarInteger() &&
7237           (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7238         ArgValue =
7239             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7240       }
7241       InVals.push_back(ArgValue);
7242       continue;
7243     }
7244     if (VA.isMemLoc()) {
7245       const unsigned LocSize = LocVT.getStoreSize();
7246       const unsigned ValSize = ValVT.getStoreSize();
7247       assert((ValSize <= LocSize) &&
7248              "Object size is larger than size of MemLoc");
7249       int CurArgOffset = VA.getLocMemOffset();
7250       // Objects are right-justified because AIX is big-endian.
7251       if (LocSize > ValSize)
7252         CurArgOffset += LocSize - ValSize;
7253       // Potential tail calls could cause overwriting of argument stack slots.
7254       const bool IsImmutable =
7255           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
7256             (CallConv == CallingConv::Fast));
7257       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7258       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7259       SDValue ArgValue =
7260           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7261       InVals.push_back(ArgValue);
7262       continue;
7263     }
7264   }
7265 
7266   // On AIX a minimum of 8 words is saved to the parameter save area.
7267   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7268   // Area that is at least reserved in the caller of this function.
7269   unsigned CallerReservedArea =
7270       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7271 
7272   // Set the size that is at least reserved in caller of this function. Tail
7273   // call optimized function's reserved stack space needs to be aligned so
7274   // that taking the difference between two stack areas will result in an
7275   // aligned stack.
7276   CallerReservedArea =
7277       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7278   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7279   FuncInfo->setMinReservedArea(CallerReservedArea);
7280 
7281   if (isVarArg) {
7282     FuncInfo->setVarArgsFrameIndex(
7283         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7284     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7285 
7286     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7287                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7288 
7289     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7290                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7291     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7292 
7293     // The fixed integer arguments of a variadic function are stored to the
7294     // VarArgsFrameIndex on the stack so that they may be loaded by
7295     // dereferencing the result of va_next.
7296     for (unsigned GPRIndex =
7297              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7298          GPRIndex < NumGPArgRegs; ++GPRIndex) {
7299 
7300       const unsigned VReg =
7301           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7302                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7303 
7304       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7305       SDValue Store =
7306           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7307       MemOps.push_back(Store);
7308       // Increment the address for the next argument to store.
7309       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7310       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7311     }
7312   }
7313 
7314   if (!MemOps.empty())
7315     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7316 
7317   return Chain;
7318 }
7319 
7320 SDValue PPCTargetLowering::LowerCall_AIX(
7321     SDValue Chain, SDValue Callee, CallFlags CFlags,
7322     const SmallVectorImpl<ISD::OutputArg> &Outs,
7323     const SmallVectorImpl<SDValue> &OutVals,
7324     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7325     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7326     const CallBase *CB) const {
7327   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7328   // AIX ABI stack frame layout.
7329 
7330   assert((CFlags.CallConv == CallingConv::C ||
7331           CFlags.CallConv == CallingConv::Cold ||
7332           CFlags.CallConv == CallingConv::Fast) &&
7333          "Unexpected calling convention!");
7334 
7335   if (CFlags.IsPatchPoint)
7336     report_fatal_error("This call type is unimplemented on AIX.");
7337 
7338   const PPCSubtarget& Subtarget =
7339       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7340   if (Subtarget.hasAltivec())
7341     report_fatal_error("Altivec support is unimplemented on AIX.");
7342 
7343   MachineFunction &MF = DAG.getMachineFunction();
7344   SmallVector<CCValAssign, 16> ArgLocs;
7345   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7346                  *DAG.getContext());
7347 
7348   // Reserve space for the linkage save area (LSA) on the stack.
7349   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7350   //   [SP][CR][LR][2 x reserved][TOC].
7351   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7352   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7353   const bool IsPPC64 = Subtarget.isPPC64();
7354   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7355   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7356   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7357   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7358 
7359   // The prolog code of the callee may store up to 8 GPR argument registers to
7360   // the stack, allowing va_start to index over them in memory if the callee
7361   // is variadic.
7362   // Because we cannot tell if this is needed on the caller side, we have to
7363   // conservatively assume that it is needed.  As such, make sure we have at
7364   // least enough stack space for the caller to store the 8 GPRs.
7365   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7366   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7367                                      CCInfo.getNextStackOffset());
7368 
7369   // Adjust the stack pointer for the new arguments...
7370   // These operations are automatically eliminated by the prolog/epilog pass.
7371   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7372   SDValue CallSeqStart = Chain;
7373 
7374   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7375   SmallVector<SDValue, 8> MemOpChains;
7376 
7377   // Set up a copy of the stack pointer for loading and storing any
7378   // arguments that may not fit in the registers available for argument
7379   // passing.
7380   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7381                                    : DAG.getRegister(PPC::R1, MVT::i32);
7382 
7383   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7384     const unsigned ValNo = ArgLocs[I].getValNo();
7385     SDValue Arg = OutVals[ValNo];
7386     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7387 
7388     if (Flags.isByVal()) {
7389       const unsigned ByValSize = Flags.getByValSize();
7390 
7391       // Nothing to do for zero-sized ByVals on the caller side.
7392       if (!ByValSize) {
7393         ++I;
7394         continue;
7395       }
7396 
7397       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7398         return DAG.getExtLoad(
7399             ISD::ZEXTLOAD, dl, PtrVT, Chain,
7400             (LoadOffset != 0)
7401                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7402                 : Arg,
7403             MachinePointerInfo(), VT);
7404       };
7405 
7406       unsigned LoadOffset = 0;
7407 
7408       // Initialize registers, which are fully occupied by the by-val argument.
7409       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7410         SDValue Load = GetLoad(PtrVT, LoadOffset);
7411         MemOpChains.push_back(Load.getValue(1));
7412         LoadOffset += PtrByteSize;
7413         const CCValAssign &ByValVA = ArgLocs[I++];
7414         assert(ByValVA.getValNo() == ValNo &&
7415                "Unexpected location for pass-by-value argument.");
7416         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7417       }
7418 
7419       if (LoadOffset == ByValSize)
7420         continue;
7421 
7422       // There must be one more loc to handle the remainder.
7423       assert(ArgLocs[I].getValNo() == ValNo &&
7424              "Expected additional location for by-value argument.");
7425 
7426       if (ArgLocs[I].isMemLoc()) {
7427         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7428         const CCValAssign &ByValVA = ArgLocs[I++];
7429         ISD::ArgFlagsTy MemcpyFlags = Flags;
7430         // Only memcpy the bytes that don't pass in register.
7431         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7432         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7433             (LoadOffset != 0)
7434                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7435                 : Arg,
7436             DAG.getObjectPtrOffset(dl, StackPtr,
7437                                    TypeSize::Fixed(ByValVA.getLocMemOffset())),
7438             CallSeqStart, MemcpyFlags, DAG, dl);
7439         continue;
7440       }
7441 
7442       // Initialize the final register residue.
7443       // Any residue that occupies the final by-val arg register must be
7444       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7445       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7446       // 2 and 1 byte loads.
7447       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7448       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7449              "Unexpected register residue for by-value argument.");
7450       SDValue ResidueVal;
7451       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7452         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7453         const MVT VT =
7454             N == 1 ? MVT::i8
7455                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7456         SDValue Load = GetLoad(VT, LoadOffset);
7457         MemOpChains.push_back(Load.getValue(1));
7458         LoadOffset += N;
7459         Bytes += N;
7460 
7461         // By-val arguments are passed left-justfied in register.
7462         // Every load here needs to be shifted, otherwise a full register load
7463         // should have been used.
7464         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7465                "Unexpected load emitted during handling of pass-by-value "
7466                "argument.");
7467         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7468         EVT ShiftAmountTy =
7469             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7470         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7471         SDValue ShiftedLoad =
7472             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7473         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7474                                               ShiftedLoad)
7475                                 : ShiftedLoad;
7476       }
7477 
7478       const CCValAssign &ByValVA = ArgLocs[I++];
7479       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7480       continue;
7481     }
7482 
7483     CCValAssign &VA = ArgLocs[I++];
7484     const MVT LocVT = VA.getLocVT();
7485     const MVT ValVT = VA.getValVT();
7486 
7487     switch (VA.getLocInfo()) {
7488     default:
7489       report_fatal_error("Unexpected argument extension type.");
7490     case CCValAssign::Full:
7491       break;
7492     case CCValAssign::ZExt:
7493       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7494       break;
7495     case CCValAssign::SExt:
7496       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7497       break;
7498     }
7499 
7500     if (VA.isRegLoc() && !VA.needsCustom()) {
7501       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7502       continue;
7503     }
7504 
7505     if (VA.isMemLoc()) {
7506       SDValue PtrOff =
7507           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7508       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7509       MemOpChains.push_back(
7510           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7511 
7512       continue;
7513     }
7514 
7515     // Custom handling is used for GPR initializations for vararg float
7516     // arguments.
7517     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7518            ValVT.isFloatingPoint() && LocVT.isInteger() &&
7519            "Unexpected register handling for calling convention.");
7520 
7521     SDValue ArgAsInt =
7522         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7523 
7524     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7525       // f32 in 32-bit GPR
7526       // f64 in 64-bit GPR
7527       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7528     else if (Arg.getValueType().getSizeInBits() < LocVT.getSizeInBits())
7529       // f32 in 64-bit GPR.
7530       RegsToPass.push_back(std::make_pair(
7531           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7532     else {
7533       // f64 in two 32-bit GPRs
7534       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7535       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7536              "Unexpected custom register for argument!");
7537       CCValAssign &GPR1 = VA;
7538       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7539                                      DAG.getConstant(32, dl, MVT::i8));
7540       RegsToPass.push_back(std::make_pair(
7541           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7542 
7543       if (I != E) {
7544         // If only 1 GPR was available, there will only be one custom GPR and
7545         // the argument will also pass in memory.
7546         CCValAssign &PeekArg = ArgLocs[I];
7547         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7548           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7549           CCValAssign &GPR2 = ArgLocs[I++];
7550           RegsToPass.push_back(std::make_pair(
7551               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7552         }
7553       }
7554     }
7555   }
7556 
7557   if (!MemOpChains.empty())
7558     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7559 
7560   // For indirect calls, we need to save the TOC base to the stack for
7561   // restoration after the call.
7562   if (CFlags.IsIndirect) {
7563     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7564     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7565     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7566     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7567     const unsigned TOCSaveOffset =
7568         Subtarget.getFrameLowering()->getTOCSaveOffset();
7569 
7570     setUsesTOCBasePtr(DAG);
7571     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7572     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7573     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7574     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7575     Chain = DAG.getStore(
7576         Val.getValue(1), dl, Val, AddPtr,
7577         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7578   }
7579 
7580   // Build a sequence of copy-to-reg nodes chained together with token chain
7581   // and flag operands which copy the outgoing args into the appropriate regs.
7582   SDValue InFlag;
7583   for (auto Reg : RegsToPass) {
7584     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7585     InFlag = Chain.getValue(1);
7586   }
7587 
7588   const int SPDiff = 0;
7589   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7590                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7591 }
7592 
7593 bool
7594 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7595                                   MachineFunction &MF, bool isVarArg,
7596                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7597                                   LLVMContext &Context) const {
7598   SmallVector<CCValAssign, 16> RVLocs;
7599   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7600   return CCInfo.CheckReturn(
7601       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7602                 ? RetCC_PPC_Cold
7603                 : RetCC_PPC);
7604 }
7605 
7606 SDValue
7607 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7608                                bool isVarArg,
7609                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7610                                const SmallVectorImpl<SDValue> &OutVals,
7611                                const SDLoc &dl, SelectionDAG &DAG) const {
7612   SmallVector<CCValAssign, 16> RVLocs;
7613   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7614                  *DAG.getContext());
7615   CCInfo.AnalyzeReturn(Outs,
7616                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7617                            ? RetCC_PPC_Cold
7618                            : RetCC_PPC);
7619 
7620   SDValue Flag;
7621   SmallVector<SDValue, 4> RetOps(1, Chain);
7622 
7623   // Copy the result values into the output registers.
7624   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7625     CCValAssign &VA = RVLocs[i];
7626     assert(VA.isRegLoc() && "Can only return in registers!");
7627 
7628     SDValue Arg = OutVals[RealResIdx];
7629 
7630     switch (VA.getLocInfo()) {
7631     default: llvm_unreachable("Unknown loc info!");
7632     case CCValAssign::Full: break;
7633     case CCValAssign::AExt:
7634       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7635       break;
7636     case CCValAssign::ZExt:
7637       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7638       break;
7639     case CCValAssign::SExt:
7640       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7641       break;
7642     }
7643     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7644       bool isLittleEndian = Subtarget.isLittleEndian();
7645       // Legalize ret f64 -> ret 2 x i32.
7646       SDValue SVal =
7647           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7648                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7649       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7650       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7651       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7652                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7653       Flag = Chain.getValue(1);
7654       VA = RVLocs[++i]; // skip ahead to next loc
7655       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7656     } else
7657       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7658     Flag = Chain.getValue(1);
7659     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7660   }
7661 
7662   RetOps[0] = Chain;  // Update chain.
7663 
7664   // Add the flag if we have it.
7665   if (Flag.getNode())
7666     RetOps.push_back(Flag);
7667 
7668   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7669 }
7670 
7671 SDValue
7672 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7673                                                 SelectionDAG &DAG) const {
7674   SDLoc dl(Op);
7675 
7676   // Get the correct type for integers.
7677   EVT IntVT = Op.getValueType();
7678 
7679   // Get the inputs.
7680   SDValue Chain = Op.getOperand(0);
7681   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7682   // Build a DYNAREAOFFSET node.
7683   SDValue Ops[2] = {Chain, FPSIdx};
7684   SDVTList VTs = DAG.getVTList(IntVT);
7685   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7686 }
7687 
7688 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7689                                              SelectionDAG &DAG) const {
7690   // When we pop the dynamic allocation we need to restore the SP link.
7691   SDLoc dl(Op);
7692 
7693   // Get the correct type for pointers.
7694   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7695 
7696   // Construct the stack pointer operand.
7697   bool isPPC64 = Subtarget.isPPC64();
7698   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7699   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7700 
7701   // Get the operands for the STACKRESTORE.
7702   SDValue Chain = Op.getOperand(0);
7703   SDValue SaveSP = Op.getOperand(1);
7704 
7705   // Load the old link SP.
7706   SDValue LoadLinkSP =
7707       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7708 
7709   // Restore the stack pointer.
7710   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7711 
7712   // Store the old link SP.
7713   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7714 }
7715 
7716 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7717   MachineFunction &MF = DAG.getMachineFunction();
7718   bool isPPC64 = Subtarget.isPPC64();
7719   EVT PtrVT = getPointerTy(MF.getDataLayout());
7720 
7721   // Get current frame pointer save index.  The users of this index will be
7722   // primarily DYNALLOC instructions.
7723   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7724   int RASI = FI->getReturnAddrSaveIndex();
7725 
7726   // If the frame pointer save index hasn't been defined yet.
7727   if (!RASI) {
7728     // Find out what the fix offset of the frame pointer save area.
7729     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7730     // Allocate the frame index for frame pointer save area.
7731     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7732     // Save the result.
7733     FI->setReturnAddrSaveIndex(RASI);
7734   }
7735   return DAG.getFrameIndex(RASI, PtrVT);
7736 }
7737 
7738 SDValue
7739 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7740   MachineFunction &MF = DAG.getMachineFunction();
7741   bool isPPC64 = Subtarget.isPPC64();
7742   EVT PtrVT = getPointerTy(MF.getDataLayout());
7743 
7744   // Get current frame pointer save index.  The users of this index will be
7745   // primarily DYNALLOC instructions.
7746   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7747   int FPSI = FI->getFramePointerSaveIndex();
7748 
7749   // If the frame pointer save index hasn't been defined yet.
7750   if (!FPSI) {
7751     // Find out what the fix offset of the frame pointer save area.
7752     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7753     // Allocate the frame index for frame pointer save area.
7754     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7755     // Save the result.
7756     FI->setFramePointerSaveIndex(FPSI);
7757   }
7758   return DAG.getFrameIndex(FPSI, PtrVT);
7759 }
7760 
7761 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7762                                                    SelectionDAG &DAG) const {
7763   MachineFunction &MF = DAG.getMachineFunction();
7764   // Get the inputs.
7765   SDValue Chain = Op.getOperand(0);
7766   SDValue Size  = Op.getOperand(1);
7767   SDLoc dl(Op);
7768 
7769   // Get the correct type for pointers.
7770   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7771   // Negate the size.
7772   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7773                                 DAG.getConstant(0, dl, PtrVT), Size);
7774   // Construct a node for the frame pointer save index.
7775   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7776   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7777   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7778   if (hasInlineStackProbe(MF))
7779     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7780   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7781 }
7782 
7783 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7784                                                      SelectionDAG &DAG) const {
7785   MachineFunction &MF = DAG.getMachineFunction();
7786 
7787   bool isPPC64 = Subtarget.isPPC64();
7788   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7789 
7790   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7791   return DAG.getFrameIndex(FI, PtrVT);
7792 }
7793 
7794 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7795                                                SelectionDAG &DAG) const {
7796   SDLoc DL(Op);
7797   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7798                      DAG.getVTList(MVT::i32, MVT::Other),
7799                      Op.getOperand(0), Op.getOperand(1));
7800 }
7801 
7802 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7803                                                 SelectionDAG &DAG) const {
7804   SDLoc DL(Op);
7805   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7806                      Op.getOperand(0), Op.getOperand(1));
7807 }
7808 
7809 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7810 
7811   assert(Op.getValueType() == MVT::i1 &&
7812          "Custom lowering only for i1 loads");
7813 
7814   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7815 
7816   SDLoc dl(Op);
7817   LoadSDNode *LD = cast<LoadSDNode>(Op);
7818 
7819   SDValue Chain = LD->getChain();
7820   SDValue BasePtr = LD->getBasePtr();
7821   MachineMemOperand *MMO = LD->getMemOperand();
7822 
7823   SDValue NewLD =
7824       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7825                      BasePtr, MVT::i8, MMO);
7826   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7827 
7828   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7829   return DAG.getMergeValues(Ops, dl);
7830 }
7831 
7832 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7833   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7834          "Custom lowering only for i1 stores");
7835 
7836   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7837 
7838   SDLoc dl(Op);
7839   StoreSDNode *ST = cast<StoreSDNode>(Op);
7840 
7841   SDValue Chain = ST->getChain();
7842   SDValue BasePtr = ST->getBasePtr();
7843   SDValue Value = ST->getValue();
7844   MachineMemOperand *MMO = ST->getMemOperand();
7845 
7846   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7847                       Value);
7848   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7849 }
7850 
7851 // FIXME: Remove this once the ANDI glue bug is fixed:
7852 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7853   assert(Op.getValueType() == MVT::i1 &&
7854          "Custom lowering only for i1 results");
7855 
7856   SDLoc DL(Op);
7857   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7858 }
7859 
7860 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7861                                                SelectionDAG &DAG) const {
7862 
7863   // Implements a vector truncate that fits in a vector register as a shuffle.
7864   // We want to legalize vector truncates down to where the source fits in
7865   // a vector register (and target is therefore smaller than vector register
7866   // size).  At that point legalization will try to custom lower the sub-legal
7867   // result and get here - where we can contain the truncate as a single target
7868   // operation.
7869 
7870   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7871   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7872   //
7873   // We will implement it for big-endian ordering as this (where x denotes
7874   // undefined):
7875   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7876   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7877   //
7878   // The same operation in little-endian ordering will be:
7879   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7880   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7881 
7882   EVT TrgVT = Op.getValueType();
7883   assert(TrgVT.isVector() && "Vector type expected.");
7884   unsigned TrgNumElts = TrgVT.getVectorNumElements();
7885   EVT EltVT = TrgVT.getVectorElementType();
7886   if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
7887       TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
7888       !isPowerOf2_32(EltVT.getSizeInBits()))
7889     return SDValue();
7890 
7891   SDValue N1 = Op.getOperand(0);
7892   EVT SrcVT = N1.getValueType();
7893   unsigned SrcSize = SrcVT.getSizeInBits();
7894   if (SrcSize > 256 ||
7895       !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
7896       !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
7897     return SDValue();
7898   if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
7899     return SDValue();
7900 
7901   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7902   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7903 
7904   SDLoc DL(Op);
7905   SDValue Op1, Op2;
7906   if (SrcSize == 256) {
7907     EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
7908     EVT SplitVT =
7909         N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
7910     unsigned SplitNumElts = SplitVT.getVectorNumElements();
7911     Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7912                       DAG.getConstant(0, DL, VecIdxTy));
7913     Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7914                       DAG.getConstant(SplitNumElts, DL, VecIdxTy));
7915   }
7916   else {
7917     Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7918     Op2 = DAG.getUNDEF(WideVT);
7919   }
7920 
7921   // First list the elements we want to keep.
7922   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7923   SmallVector<int, 16> ShuffV;
7924   if (Subtarget.isLittleEndian())
7925     for (unsigned i = 0; i < TrgNumElts; ++i)
7926       ShuffV.push_back(i * SizeMult);
7927   else
7928     for (unsigned i = 1; i <= TrgNumElts; ++i)
7929       ShuffV.push_back(i * SizeMult - 1);
7930 
7931   // Populate the remaining elements with undefs.
7932   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7933     // ShuffV.push_back(i + WideNumElts);
7934     ShuffV.push_back(WideNumElts + 1);
7935 
7936   Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
7937   Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
7938   return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
7939 }
7940 
7941 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7942 /// possible.
7943 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7944   // Not FP, or using SPE? Not a fsel.
7945   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
7946       !Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.hasSPE())
7947     return Op;
7948 
7949   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7950 
7951   EVT ResVT = Op.getValueType();
7952   EVT CmpVT = Op.getOperand(0).getValueType();
7953   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7954   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
7955   SDLoc dl(Op);
7956   SDNodeFlags Flags = Op.getNode()->getFlags();
7957 
7958   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7959   // presence of infinities.
7960   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7961     switch (CC) {
7962     default:
7963       break;
7964     case ISD::SETOGT:
7965     case ISD::SETGT:
7966       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7967     case ISD::SETOLT:
7968     case ISD::SETLT:
7969       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7970     }
7971   }
7972 
7973   // We might be able to do better than this under some circumstances, but in
7974   // general, fsel-based lowering of select is a finite-math-only optimization.
7975   // For more information, see section F.3 of the 2.06 ISA specification.
7976   // With ISA 3.0
7977   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
7978       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
7979     return Op;
7980 
7981   // If the RHS of the comparison is a 0.0, we don't need to do the
7982   // subtraction at all.
7983   SDValue Sel1;
7984   if (isFloatingPointZero(RHS))
7985     switch (CC) {
7986     default: break;       // SETUO etc aren't handled by fsel.
7987     case ISD::SETNE:
7988       std::swap(TV, FV);
7989       LLVM_FALLTHROUGH;
7990     case ISD::SETEQ:
7991       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7992         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7993       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7994       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7995         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7996       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7997                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7998     case ISD::SETULT:
7999     case ISD::SETLT:
8000       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8001       LLVM_FALLTHROUGH;
8002     case ISD::SETOGE:
8003     case ISD::SETGE:
8004       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8005         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8006       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8007     case ISD::SETUGT:
8008     case ISD::SETGT:
8009       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8010       LLVM_FALLTHROUGH;
8011     case ISD::SETOLE:
8012     case ISD::SETLE:
8013       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8014         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8015       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8016                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
8017     }
8018 
8019   SDValue Cmp;
8020   switch (CC) {
8021   default: break;       // SETUO etc aren't handled by fsel.
8022   case ISD::SETNE:
8023     std::swap(TV, FV);
8024     LLVM_FALLTHROUGH;
8025   case ISD::SETEQ:
8026     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8027     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8028       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8029     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8030     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8031       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8032     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8033                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
8034   case ISD::SETULT:
8035   case ISD::SETLT:
8036     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8037     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8038       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8039     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8040   case ISD::SETOGE:
8041   case ISD::SETGE:
8042     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8043     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8044       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8045     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8046   case ISD::SETUGT:
8047   case ISD::SETGT:
8048     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8049     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8050       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8051     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8052   case ISD::SETOLE:
8053   case ISD::SETLE:
8054     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8055     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8056       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8057     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8058   }
8059   return Op;
8060 }
8061 
8062 static unsigned getPPCStrictOpcode(unsigned Opc) {
8063   switch (Opc) {
8064   default:
8065     llvm_unreachable("No strict version of this opcode!");
8066   case PPCISD::FCTIDZ:
8067     return PPCISD::STRICT_FCTIDZ;
8068   case PPCISD::FCTIWZ:
8069     return PPCISD::STRICT_FCTIWZ;
8070   case PPCISD::FCTIDUZ:
8071     return PPCISD::STRICT_FCTIDUZ;
8072   case PPCISD::FCTIWUZ:
8073     return PPCISD::STRICT_FCTIWUZ;
8074   case PPCISD::FCFID:
8075     return PPCISD::STRICT_FCFID;
8076   case PPCISD::FCFIDU:
8077     return PPCISD::STRICT_FCFIDU;
8078   case PPCISD::FCFIDS:
8079     return PPCISD::STRICT_FCFIDS;
8080   case PPCISD::FCFIDUS:
8081     return PPCISD::STRICT_FCFIDUS;
8082   }
8083 }
8084 
8085 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
8086                               const PPCSubtarget &Subtarget) {
8087   SDLoc dl(Op);
8088   bool IsStrict = Op->isStrictFPOpcode();
8089   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8090                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8091   // For strict nodes, source is the second operand.
8092   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8093   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
8094   assert(Src.getValueType().isFloatingPoint());
8095   if (Src.getValueType() == MVT::f32) {
8096     if (IsStrict) {
8097       Src = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f64, MVT::Other},
8098                         {Chain, Src});
8099       Chain = Src.getValue(1);
8100     } else
8101       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8102   }
8103   SDValue Conv;
8104   unsigned Opc = ISD::DELETED_NODE;
8105   switch (Op.getSimpleValueType().SimpleTy) {
8106   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8107   case MVT::i32:
8108     Opc = IsSigned ? PPCISD::FCTIWZ
8109                    : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
8110     break;
8111   case MVT::i64:
8112     assert((IsSigned || Subtarget.hasFPCVT()) &&
8113            "i64 FP_TO_UINT is supported only with FPCVT");
8114     Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
8115   }
8116   if (IsStrict) {
8117     Opc = getPPCStrictOpcode(Opc);
8118     Conv = DAG.getNode(Opc, dl, {MVT::f64, MVT::Other}, {Chain, Src});
8119   } else {
8120     Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
8121   }
8122   return Conv;
8123 }
8124 
8125 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
8126                                                SelectionDAG &DAG,
8127                                                const SDLoc &dl) const {
8128   SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
8129   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8130                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8131   bool IsStrict = Op->isStrictFPOpcode();
8132 
8133   // Convert the FP value to an int value through memory.
8134   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8135                   (IsSigned || Subtarget.hasFPCVT());
8136   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8137   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8138   MachinePointerInfo MPI =
8139       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
8140 
8141   // Emit a store to the stack slot.
8142   SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
8143   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8144   if (i32Stack) {
8145     MachineFunction &MF = DAG.getMachineFunction();
8146     Alignment = Align(4);
8147     MachineMemOperand *MMO =
8148         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8149     SDValue Ops[] = { Chain, Tmp, FIPtr };
8150     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8151               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8152   } else
8153     Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
8154 
8155   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
8156   // add in a bias on big endian.
8157   if (Op.getValueType() == MVT::i32 && !i32Stack) {
8158     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8159                         DAG.getConstant(4, dl, FIPtr.getValueType()));
8160     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8161   }
8162 
8163   RLI.Chain = Chain;
8164   RLI.Ptr = FIPtr;
8165   RLI.MPI = MPI;
8166   RLI.Alignment = Alignment;
8167 }
8168 
8169 /// Custom lowers floating point to integer conversions to use
8170 /// the direct move instructions available in ISA 2.07 to avoid the
8171 /// need for load/store combinations.
8172 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8173                                                     SelectionDAG &DAG,
8174                                                     const SDLoc &dl) const {
8175   SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
8176   SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
8177   if (Op->isStrictFPOpcode())
8178     return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
8179   else
8180     return Mov;
8181 }
8182 
8183 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8184                                           const SDLoc &dl) const {
8185   bool IsStrict = Op->isStrictFPOpcode();
8186   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8187                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8188   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8189   EVT SrcVT = Src.getValueType();
8190   EVT DstVT = Op.getValueType();
8191 
8192   // FP to INT conversions are legal for f128.
8193   if (SrcVT == MVT::f128)
8194     return Op;
8195 
8196   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8197   // PPC (the libcall is not available).
8198   if (SrcVT == MVT::ppcf128) {
8199     if (DstVT == MVT::i32) {
8200       // TODO: Conservatively pass only nofpexcept flag here. Need to check and
8201       // set other fast-math flags to FP operations in both strict and
8202       // non-strict cases. (FP_TO_SINT, FSUB)
8203       SDNodeFlags Flags;
8204       Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8205 
8206       if (IsSigned) {
8207         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8208                                  DAG.getIntPtrConstant(0, dl));
8209         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8210                                  DAG.getIntPtrConstant(1, dl));
8211 
8212         // Add the two halves of the long double in round-to-zero mode, and use
8213         // a smaller FP_TO_SINT.
8214         if (IsStrict) {
8215           SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl,
8216                                     DAG.getVTList(MVT::f64, MVT::Other),
8217                                     {Op.getOperand(0), Lo, Hi}, Flags);
8218           return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8219                              DAG.getVTList(MVT::i32, MVT::Other),
8220                              {Res.getValue(1), Res}, Flags);
8221         } else {
8222           SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8223           return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8224         }
8225       } else {
8226         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8227         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8228         SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
8229         SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
8230         if (IsStrict) {
8231           // Sel = Src < 0x80000000
8232           // FltOfs = select Sel, 0.0, 0x80000000
8233           // IntOfs = select Sel, 0, 0x80000000
8234           // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
8235           SDValue Chain = Op.getOperand(0);
8236           EVT SetCCVT =
8237               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
8238           EVT DstSetCCVT =
8239               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
8240           SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
8241                                      SDNodeFlags(), Chain, true);
8242           Chain = Sel.getValue(1);
8243 
8244           SDValue FltOfs = DAG.getSelect(
8245               dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
8246           Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
8247 
8248           SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
8249                                     DAG.getVTList(SrcVT, MVT::Other),
8250                                     {Chain, Src, FltOfs}, Flags);
8251           Chain = Val.getValue(1);
8252           SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8253                                      DAG.getVTList(DstVT, MVT::Other),
8254                                      {Chain, Val}, Flags);
8255           Chain = SInt.getValue(1);
8256           SDValue IntOfs = DAG.getSelect(
8257               dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
8258           SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
8259           return DAG.getMergeValues({Result, Chain}, dl);
8260         } else {
8261           // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8262           // FIXME: generated code sucks.
8263           SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
8264           True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8265           True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
8266           SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
8267           return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
8268         }
8269       }
8270     }
8271 
8272     return SDValue();
8273   }
8274 
8275   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8276     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8277 
8278   ReuseLoadInfo RLI;
8279   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8280 
8281   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8282                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8283 }
8284 
8285 // We're trying to insert a regular store, S, and then a load, L. If the
8286 // incoming value, O, is a load, we might just be able to have our load use the
8287 // address used by O. However, we don't know if anything else will store to
8288 // that address before we can load from it. To prevent this situation, we need
8289 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8290 // the same chain operand as O, we create a token factor from the chain results
8291 // of O and L, and we replace all uses of O's chain result with that token
8292 // factor (see spliceIntoChain below for this last part).
8293 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8294                                             ReuseLoadInfo &RLI,
8295                                             SelectionDAG &DAG,
8296                                             ISD::LoadExtType ET) const {
8297   // Conservatively skip reusing for constrained FP nodes.
8298   if (Op->isStrictFPOpcode())
8299     return false;
8300 
8301   SDLoc dl(Op);
8302   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8303                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8304   if (ET == ISD::NON_EXTLOAD &&
8305       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8306       isOperationLegalOrCustom(Op.getOpcode(),
8307                                Op.getOperand(0).getValueType())) {
8308 
8309     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8310     return true;
8311   }
8312 
8313   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8314   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8315       LD->isNonTemporal())
8316     return false;
8317   if (LD->getMemoryVT() != MemVT)
8318     return false;
8319 
8320   RLI.Ptr = LD->getBasePtr();
8321   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8322     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8323            "Non-pre-inc AM on PPC?");
8324     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8325                           LD->getOffset());
8326   }
8327 
8328   RLI.Chain = LD->getChain();
8329   RLI.MPI = LD->getPointerInfo();
8330   RLI.IsDereferenceable = LD->isDereferenceable();
8331   RLI.IsInvariant = LD->isInvariant();
8332   RLI.Alignment = LD->getAlign();
8333   RLI.AAInfo = LD->getAAInfo();
8334   RLI.Ranges = LD->getRanges();
8335 
8336   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8337   return true;
8338 }
8339 
8340 // Given the head of the old chain, ResChain, insert a token factor containing
8341 // it and NewResChain, and make users of ResChain now be users of that token
8342 // factor.
8343 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8344 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8345                                         SDValue NewResChain,
8346                                         SelectionDAG &DAG) const {
8347   if (!ResChain)
8348     return;
8349 
8350   SDLoc dl(NewResChain);
8351 
8352   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8353                            NewResChain, DAG.getUNDEF(MVT::Other));
8354   assert(TF.getNode() != NewResChain.getNode() &&
8355          "A new TF really is required here");
8356 
8357   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8358   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8359 }
8360 
8361 /// Analyze profitability of direct move
8362 /// prefer float load to int load plus direct move
8363 /// when there is no integer use of int load
8364 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8365   SDNode *Origin = Op.getOperand(0).getNode();
8366   if (Origin->getOpcode() != ISD::LOAD)
8367     return true;
8368 
8369   // If there is no LXSIBZX/LXSIHZX, like Power8,
8370   // prefer direct move if the memory size is 1 or 2 bytes.
8371   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8372   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8373     return true;
8374 
8375   for (SDNode::use_iterator UI = Origin->use_begin(),
8376                             UE = Origin->use_end();
8377        UI != UE; ++UI) {
8378 
8379     // Only look at the users of the loaded value.
8380     if (UI.getUse().get().getResNo() != 0)
8381       continue;
8382 
8383     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8384         UI->getOpcode() != ISD::UINT_TO_FP &&
8385         UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
8386         UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
8387       return true;
8388   }
8389 
8390   return false;
8391 }
8392 
8393 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
8394                               const PPCSubtarget &Subtarget,
8395                               SDValue Chain = SDValue()) {
8396   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8397                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8398   SDLoc dl(Op);
8399   // If we have FCFIDS, then use it when converting to single-precision.
8400   // Otherwise, convert to double-precision and then round.
8401   bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8402   unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
8403                               : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
8404   EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8405   if (Op->isStrictFPOpcode()) {
8406     if (!Chain)
8407       Chain = Op.getOperand(0);
8408     return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl, {ConvTy, MVT::Other},
8409                        {Chain, Src});
8410   } else
8411     return DAG.getNode(ConvOpc, dl, ConvTy, Src);
8412 }
8413 
8414 /// Custom lowers integer to floating point conversions to use
8415 /// the direct move instructions available in ISA 2.07 to avoid the
8416 /// need for load/store combinations.
8417 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8418                                                     SelectionDAG &DAG,
8419                                                     const SDLoc &dl) const {
8420   assert((Op.getValueType() == MVT::f32 ||
8421           Op.getValueType() == MVT::f64) &&
8422          "Invalid floating point type as target of conversion");
8423   assert(Subtarget.hasFPCVT() &&
8424          "Int to FP conversions with direct moves require FPCVT");
8425   SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
8426   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8427   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
8428                 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8429   unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
8430   SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
8431   return convertIntToFP(Op, Mov, DAG, Subtarget);
8432 }
8433 
8434 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8435 
8436   EVT VecVT = Vec.getValueType();
8437   assert(VecVT.isVector() && "Expected a vector type.");
8438   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8439 
8440   EVT EltVT = VecVT.getVectorElementType();
8441   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8442   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8443 
8444   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8445   SmallVector<SDValue, 16> Ops(NumConcat);
8446   Ops[0] = Vec;
8447   SDValue UndefVec = DAG.getUNDEF(VecVT);
8448   for (unsigned i = 1; i < NumConcat; ++i)
8449     Ops[i] = UndefVec;
8450 
8451   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8452 }
8453 
8454 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8455                                                 const SDLoc &dl) const {
8456   bool IsStrict = Op->isStrictFPOpcode();
8457   unsigned Opc = Op.getOpcode();
8458   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8459   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||
8460           Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
8461          "Unexpected conversion type");
8462   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8463          "Supports conversions to v2f64/v4f32 only.");
8464 
8465   bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
8466   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8467 
8468   SDValue Wide = widenVec(DAG, Src, dl);
8469   EVT WideVT = Wide.getValueType();
8470   unsigned WideNumElts = WideVT.getVectorNumElements();
8471   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8472 
8473   SmallVector<int, 16> ShuffV;
8474   for (unsigned i = 0; i < WideNumElts; ++i)
8475     ShuffV.push_back(i + WideNumElts);
8476 
8477   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8478   int SaveElts = FourEltRes ? 4 : 2;
8479   if (Subtarget.isLittleEndian())
8480     for (int i = 0; i < SaveElts; i++)
8481       ShuffV[i * Stride] = i;
8482   else
8483     for (int i = 1; i <= SaveElts; i++)
8484       ShuffV[i * Stride - 1] = i - 1;
8485 
8486   SDValue ShuffleSrc2 =
8487       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8488   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8489 
8490   SDValue Extend;
8491   if (SignedConv) {
8492     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8493     EVT ExtVT = Src.getValueType();
8494     if (Subtarget.hasP9Altivec())
8495       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8496                                IntermediateVT.getVectorNumElements());
8497 
8498     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8499                          DAG.getValueType(ExtVT));
8500   } else
8501     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8502 
8503   if (IsStrict)
8504     return DAG.getNode(Opc, dl, {Op.getValueType(), MVT::Other},
8505                        {Op.getOperand(0), Extend});
8506 
8507   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8508 }
8509 
8510 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8511                                           SelectionDAG &DAG) const {
8512   SDLoc dl(Op);
8513   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8514                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8515   bool IsStrict = Op->isStrictFPOpcode();
8516   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8517   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
8518 
8519   EVT InVT = Src.getValueType();
8520   EVT OutVT = Op.getValueType();
8521   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8522       isOperationCustom(Op.getOpcode(), InVT))
8523     return LowerINT_TO_FPVector(Op, DAG, dl);
8524 
8525   // Conversions to f128 are legal.
8526   if (Op.getValueType() == MVT::f128)
8527     return Op;
8528 
8529   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8530   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8531     return SDValue();
8532 
8533   if (Src.getValueType() == MVT::i1)
8534     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
8535                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
8536                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
8537 
8538   // If we have direct moves, we can do all the conversion, skip the store/load
8539   // however, without FPCVT we can't do most conversions.
8540   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8541       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8542     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8543 
8544   assert((IsSigned || Subtarget.hasFPCVT()) &&
8545          "UINT_TO_FP is supported only with FPCVT");
8546 
8547   if (Src.getValueType() == MVT::i64) {
8548     SDValue SINT = Src;
8549     // When converting to single-precision, we actually need to convert
8550     // to double-precision first and then round to single-precision.
8551     // To avoid double-rounding effects during that operation, we have
8552     // to prepare the input operand.  Bits that might be truncated when
8553     // converting to double-precision are replaced by a bit that won't
8554     // be lost at this stage, but is below the single-precision rounding
8555     // position.
8556     //
8557     // However, if -enable-unsafe-fp-math is in effect, accept double
8558     // rounding to avoid the extra overhead.
8559     if (Op.getValueType() == MVT::f32 &&
8560         !Subtarget.hasFPCVT() &&
8561         !DAG.getTarget().Options.UnsafeFPMath) {
8562 
8563       // Twiddle input to make sure the low 11 bits are zero.  (If this
8564       // is the case, we are guaranteed the value will fit into the 53 bit
8565       // mantissa of an IEEE double-precision value without rounding.)
8566       // If any of those low 11 bits were not zero originally, make sure
8567       // bit 12 (value 2048) is set instead, so that the final rounding
8568       // to single-precision gets the correct result.
8569       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8570                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8571       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8572                           Round, DAG.getConstant(2047, dl, MVT::i64));
8573       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8574       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8575                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8576 
8577       // However, we cannot use that value unconditionally: if the magnitude
8578       // of the input value is small, the bit-twiddling we did above might
8579       // end up visibly changing the output.  Fortunately, in that case, we
8580       // don't need to twiddle bits since the original input will convert
8581       // exactly to double-precision floating-point already.  Therefore,
8582       // construct a conditional to use the original value if the top 11
8583       // bits are all sign-bit copies, and use the rounded value computed
8584       // above otherwise.
8585       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8586                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8587       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8588                          Cond, DAG.getConstant(1, dl, MVT::i64));
8589       Cond = DAG.getSetCC(
8590           dl,
8591           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8592           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8593 
8594       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8595     }
8596 
8597     ReuseLoadInfo RLI;
8598     SDValue Bits;
8599 
8600     MachineFunction &MF = DAG.getMachineFunction();
8601     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8602       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8603                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8604       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8605     } else if (Subtarget.hasLFIWAX() &&
8606                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8607       MachineMemOperand *MMO =
8608         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8609                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8610       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8611       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8612                                      DAG.getVTList(MVT::f64, MVT::Other),
8613                                      Ops, MVT::i32, MMO);
8614       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8615     } else if (Subtarget.hasFPCVT() &&
8616                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8617       MachineMemOperand *MMO =
8618         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8619                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8620       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8621       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8622                                      DAG.getVTList(MVT::f64, MVT::Other),
8623                                      Ops, MVT::i32, MMO);
8624       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8625     } else if (((Subtarget.hasLFIWAX() &&
8626                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8627                 (Subtarget.hasFPCVT() &&
8628                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8629                SINT.getOperand(0).getValueType() == MVT::i32) {
8630       MachineFrameInfo &MFI = MF.getFrameInfo();
8631       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8632 
8633       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8634       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8635 
8636       SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8637                                    MachinePointerInfo::getFixedStack(
8638                                        DAG.getMachineFunction(), FrameIdx));
8639       Chain = Store;
8640 
8641       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8642              "Expected an i32 store");
8643 
8644       RLI.Ptr = FIdx;
8645       RLI.Chain = Chain;
8646       RLI.MPI =
8647           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8648       RLI.Alignment = Align(4);
8649 
8650       MachineMemOperand *MMO =
8651         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8652                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8653       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8654       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8655                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8656                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8657                                      Ops, MVT::i32, MMO);
8658       Chain = Bits.getValue(1);
8659     } else
8660       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8661 
8662     SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8663     if (IsStrict)
8664       Chain = FP.getValue(1);
8665 
8666     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8667       if (IsStrict)
8668         FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, {MVT::f32, MVT::Other},
8669                          {Chain, FP, DAG.getIntPtrConstant(0, dl)});
8670       else
8671         FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8672                          DAG.getIntPtrConstant(0, dl));
8673     }
8674     return FP;
8675   }
8676 
8677   assert(Src.getValueType() == MVT::i32 &&
8678          "Unhandled INT_TO_FP type in custom expander!");
8679   // Since we only generate this in 64-bit mode, we can take advantage of
8680   // 64-bit registers.  In particular, sign extend the input value into the
8681   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8682   // then lfd it and fcfid it.
8683   MachineFunction &MF = DAG.getMachineFunction();
8684   MachineFrameInfo &MFI = MF.getFrameInfo();
8685   EVT PtrVT = getPointerTy(MF.getDataLayout());
8686 
8687   SDValue Ld;
8688   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8689     ReuseLoadInfo RLI;
8690     bool ReusingLoad;
8691     if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8692       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8693       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8694 
8695       SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8696                                    MachinePointerInfo::getFixedStack(
8697                                        DAG.getMachineFunction(), FrameIdx));
8698       Chain = Store;
8699 
8700       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8701              "Expected an i32 store");
8702 
8703       RLI.Ptr = FIdx;
8704       RLI.Chain = Chain;
8705       RLI.MPI =
8706           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8707       RLI.Alignment = Align(4);
8708     }
8709 
8710     MachineMemOperand *MMO =
8711       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8712                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8713     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8714     Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8715                                  DAG.getVTList(MVT::f64, MVT::Other), Ops,
8716                                  MVT::i32, MMO);
8717     Chain = Ld.getValue(1);
8718     if (ReusingLoad)
8719       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8720   } else {
8721     assert(Subtarget.isPPC64() &&
8722            "i32->FP without LFIWAX supported only on PPC64");
8723 
8724     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8725     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8726 
8727     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8728 
8729     // STD the extended value into the stack slot.
8730     SDValue Store = DAG.getStore(
8731         Chain, dl, Ext64, FIdx,
8732         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8733     Chain = Store;
8734 
8735     // Load the value as a double.
8736     Ld = DAG.getLoad(
8737         MVT::f64, dl, Chain, FIdx,
8738         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8739     Chain = Ld.getValue(1);
8740   }
8741 
8742   // FCFID it and return it.
8743   SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8744   if (IsStrict)
8745     Chain = FP.getValue(1);
8746   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8747     if (IsStrict)
8748       FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, {MVT::f32, MVT::Other},
8749                        {Chain, FP, DAG.getIntPtrConstant(0, dl)});
8750     else
8751       FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8752                        DAG.getIntPtrConstant(0, dl));
8753   }
8754   return FP;
8755 }
8756 
8757 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8758                                             SelectionDAG &DAG) const {
8759   SDLoc dl(Op);
8760   /*
8761    The rounding mode is in bits 30:31 of FPSR, and has the following
8762    settings:
8763      00 Round to nearest
8764      01 Round to 0
8765      10 Round to +inf
8766      11 Round to -inf
8767 
8768   FLT_ROUNDS, on the other hand, expects the following:
8769     -1 Undefined
8770      0 Round to 0
8771      1 Round to nearest
8772      2 Round to +inf
8773      3 Round to -inf
8774 
8775   To perform the conversion, we do:
8776     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8777   */
8778 
8779   MachineFunction &MF = DAG.getMachineFunction();
8780   EVT VT = Op.getValueType();
8781   EVT PtrVT = getPointerTy(MF.getDataLayout());
8782 
8783   // Save FP Control Word to register
8784   SDValue Chain = Op.getOperand(0);
8785   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8786   Chain = MFFS.getValue(1);
8787 
8788   // Save FP register to stack slot
8789   int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8790   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8791   Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8792 
8793   // Load FP Control Word from low 32 bits of stack slot.
8794   SDValue Four = DAG.getConstant(4, dl, PtrVT);
8795   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8796   SDValue CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8797   Chain = CWD.getValue(1);
8798 
8799   // Transform as necessary
8800   SDValue CWD1 =
8801     DAG.getNode(ISD::AND, dl, MVT::i32,
8802                 CWD, DAG.getConstant(3, dl, MVT::i32));
8803   SDValue CWD2 =
8804     DAG.getNode(ISD::SRL, dl, MVT::i32,
8805                 DAG.getNode(ISD::AND, dl, MVT::i32,
8806                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8807                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8808                             DAG.getConstant(3, dl, MVT::i32)),
8809                 DAG.getConstant(1, dl, MVT::i32));
8810 
8811   SDValue RetVal =
8812     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8813 
8814   RetVal =
8815       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8816                   dl, VT, RetVal);
8817 
8818   return DAG.getMergeValues({RetVal, Chain}, dl);
8819 }
8820 
8821 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8822   EVT VT = Op.getValueType();
8823   unsigned BitWidth = VT.getSizeInBits();
8824   SDLoc dl(Op);
8825   assert(Op.getNumOperands() == 3 &&
8826          VT == Op.getOperand(1).getValueType() &&
8827          "Unexpected SHL!");
8828 
8829   // Expand into a bunch of logical ops.  Note that these ops
8830   // depend on the PPC behavior for oversized shift amounts.
8831   SDValue Lo = Op.getOperand(0);
8832   SDValue Hi = Op.getOperand(1);
8833   SDValue Amt = Op.getOperand(2);
8834   EVT AmtVT = Amt.getValueType();
8835 
8836   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8837                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8838   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8839   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8840   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8841   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8842                              DAG.getConstant(-BitWidth, dl, AmtVT));
8843   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8844   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8845   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8846   SDValue OutOps[] = { OutLo, OutHi };
8847   return DAG.getMergeValues(OutOps, dl);
8848 }
8849 
8850 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8851   EVT VT = Op.getValueType();
8852   SDLoc dl(Op);
8853   unsigned BitWidth = VT.getSizeInBits();
8854   assert(Op.getNumOperands() == 3 &&
8855          VT == Op.getOperand(1).getValueType() &&
8856          "Unexpected SRL!");
8857 
8858   // Expand into a bunch of logical ops.  Note that these ops
8859   // depend on the PPC behavior for oversized shift amounts.
8860   SDValue Lo = Op.getOperand(0);
8861   SDValue Hi = Op.getOperand(1);
8862   SDValue Amt = Op.getOperand(2);
8863   EVT AmtVT = Amt.getValueType();
8864 
8865   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8866                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8867   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8868   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8869   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8870   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8871                              DAG.getConstant(-BitWidth, dl, AmtVT));
8872   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8873   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8874   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8875   SDValue OutOps[] = { OutLo, OutHi };
8876   return DAG.getMergeValues(OutOps, dl);
8877 }
8878 
8879 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8880   SDLoc dl(Op);
8881   EVT VT = Op.getValueType();
8882   unsigned BitWidth = VT.getSizeInBits();
8883   assert(Op.getNumOperands() == 3 &&
8884          VT == Op.getOperand(1).getValueType() &&
8885          "Unexpected SRA!");
8886 
8887   // Expand into a bunch of logical ops, followed by a select_cc.
8888   SDValue Lo = Op.getOperand(0);
8889   SDValue Hi = Op.getOperand(1);
8890   SDValue Amt = Op.getOperand(2);
8891   EVT AmtVT = Amt.getValueType();
8892 
8893   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8894                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8895   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8896   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8897   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8898   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8899                              DAG.getConstant(-BitWidth, dl, AmtVT));
8900   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8901   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8902   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8903                                   Tmp4, Tmp6, ISD::SETLE);
8904   SDValue OutOps[] = { OutLo, OutHi };
8905   return DAG.getMergeValues(OutOps, dl);
8906 }
8907 
8908 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
8909                                             SelectionDAG &DAG) const {
8910   SDLoc dl(Op);
8911   EVT VT = Op.getValueType();
8912   unsigned BitWidth = VT.getSizeInBits();
8913 
8914   bool IsFSHL = Op.getOpcode() == ISD::FSHL;
8915   SDValue X = Op.getOperand(0);
8916   SDValue Y = Op.getOperand(1);
8917   SDValue Z = Op.getOperand(2);
8918   EVT AmtVT = Z.getValueType();
8919 
8920   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8921   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8922   // This is simpler than TargetLowering::expandFunnelShift because we can rely
8923   // on PowerPC shift by BW being well defined.
8924   Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
8925                   DAG.getConstant(BitWidth - 1, dl, AmtVT));
8926   SDValue SubZ =
8927       DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
8928   X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
8929   Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
8930   return DAG.getNode(ISD::OR, dl, VT, X, Y);
8931 }
8932 
8933 //===----------------------------------------------------------------------===//
8934 // Vector related lowering.
8935 //
8936 
8937 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8938 /// element size of SplatSize. Cast the result to VT.
8939 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8940                                       SelectionDAG &DAG, const SDLoc &dl) {
8941   static const MVT VTys[] = { // canonical VT to use for each size.
8942     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8943   };
8944 
8945   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8946 
8947   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8948   if (Val == ((1LU << (SplatSize * 8)) - 1)) {
8949     SplatSize = 1;
8950     Val = 0xFF;
8951   }
8952 
8953   EVT CanonicalVT = VTys[SplatSize-1];
8954 
8955   // Build a canonical splat for this value.
8956   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8957 }
8958 
8959 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8960 /// specified intrinsic ID.
8961 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8962                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
8963   if (DestVT == MVT::Other) DestVT = Op.getValueType();
8964   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8965                      DAG.getConstant(IID, dl, MVT::i32), Op);
8966 }
8967 
8968 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8969 /// specified intrinsic ID.
8970 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8971                                 SelectionDAG &DAG, const SDLoc &dl,
8972                                 EVT DestVT = MVT::Other) {
8973   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8974   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8975                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8976 }
8977 
8978 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8979 /// specified intrinsic ID.
8980 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8981                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8982                                 EVT DestVT = MVT::Other) {
8983   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8984   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8985                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8986 }
8987 
8988 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8989 /// amount.  The result has the specified value type.
8990 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8991                            SelectionDAG &DAG, const SDLoc &dl) {
8992   // Force LHS/RHS to be the right type.
8993   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8994   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8995 
8996   int Ops[16];
8997   for (unsigned i = 0; i != 16; ++i)
8998     Ops[i] = i + Amt;
8999   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
9000   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9001 }
9002 
9003 /// Do we have an efficient pattern in a .td file for this node?
9004 ///
9005 /// \param V - pointer to the BuildVectorSDNode being matched
9006 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
9007 ///
9008 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
9009 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
9010 /// the opposite is true (expansion is beneficial) are:
9011 /// - The node builds a vector out of integers that are not 32 or 64-bits
9012 /// - The node builds a vector out of constants
9013 /// - The node is a "load-and-splat"
9014 /// In all other cases, we will choose to keep the BUILD_VECTOR.
9015 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
9016                                             bool HasDirectMove,
9017                                             bool HasP8Vector) {
9018   EVT VecVT = V->getValueType(0);
9019   bool RightType = VecVT == MVT::v2f64 ||
9020     (HasP8Vector && VecVT == MVT::v4f32) ||
9021     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
9022   if (!RightType)
9023     return false;
9024 
9025   bool IsSplat = true;
9026   bool IsLoad = false;
9027   SDValue Op0 = V->getOperand(0);
9028 
9029   // This function is called in a block that confirms the node is not a constant
9030   // splat. So a constant BUILD_VECTOR here means the vector is built out of
9031   // different constants.
9032   if (V->isConstant())
9033     return false;
9034   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
9035     if (V->getOperand(i).isUndef())
9036       return false;
9037     // We want to expand nodes that represent load-and-splat even if the
9038     // loaded value is a floating point truncation or conversion to int.
9039     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
9040         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
9041          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9042         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
9043          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9044         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
9045          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
9046       IsLoad = true;
9047     // If the operands are different or the input is not a load and has more
9048     // uses than just this BV node, then it isn't a splat.
9049     if (V->getOperand(i) != Op0 ||
9050         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
9051       IsSplat = false;
9052   }
9053   return !(IsSplat && IsLoad);
9054 }
9055 
9056 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
9057 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
9058 
9059   SDLoc dl(Op);
9060   SDValue Op0 = Op->getOperand(0);
9061 
9062   if ((Op.getValueType() != MVT::f128) ||
9063       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
9064       (Op0.getOperand(0).getValueType() != MVT::i64) ||
9065       (Op0.getOperand(1).getValueType() != MVT::i64))
9066     return SDValue();
9067 
9068   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
9069                      Op0.getOperand(1));
9070 }
9071 
9072 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
9073   const SDValue *InputLoad = &Op;
9074   if (InputLoad->getOpcode() == ISD::BITCAST)
9075     InputLoad = &InputLoad->getOperand(0);
9076   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
9077       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
9078     IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
9079     InputLoad = &InputLoad->getOperand(0);
9080   }
9081   if (InputLoad->getOpcode() != ISD::LOAD)
9082     return nullptr;
9083   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9084   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
9085 }
9086 
9087 // Convert the argument APFloat to a single precision APFloat if there is no
9088 // loss in information during the conversion to single precision APFloat and the
9089 // resulting number is not a denormal number. Return true if successful.
9090 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
9091   APFloat APFloatToConvert = ArgAPFloat;
9092   bool LosesInfo = true;
9093   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9094                            &LosesInfo);
9095   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
9096   if (Success)
9097     ArgAPFloat = APFloatToConvert;
9098   return Success;
9099 }
9100 
9101 // Bitcast the argument APInt to a double and convert it to a single precision
9102 // APFloat, bitcast the APFloat to an APInt and assign it to the original
9103 // argument if there is no loss in information during the conversion from
9104 // double to single precision APFloat and the resulting number is not a denormal
9105 // number. Return true if successful.
9106 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
9107   double DpValue = ArgAPInt.bitsToDouble();
9108   APFloat APFloatDp(DpValue);
9109   bool Success = convertToNonDenormSingle(APFloatDp);
9110   if (Success)
9111     ArgAPInt = APFloatDp.bitcastToAPInt();
9112   return Success;
9113 }
9114 
9115 // If this is a case we can't handle, return null and let the default
9116 // expansion code take care of it.  If we CAN select this case, and if it
9117 // selects to a single instruction, return Op.  Otherwise, if we can codegen
9118 // this case more efficiently than a constant pool load, lower it to the
9119 // sequence of ops that should be used.
9120 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9121                                              SelectionDAG &DAG) const {
9122   SDLoc dl(Op);
9123   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9124   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9125 
9126   // Check if this is a splat of a constant value.
9127   APInt APSplatBits, APSplatUndef;
9128   unsigned SplatBitSize;
9129   bool HasAnyUndefs;
9130   bool BVNIsConstantSplat =
9131       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9132                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9133 
9134   // If it is a splat of a double, check if we can shrink it to a 32 bit
9135   // non-denormal float which when converted back to double gives us the same
9136   // double. This is to exploit the XXSPLTIDP instruction.
9137   if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
9138       (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
9139       convertToNonDenormSingle(APSplatBits)) {
9140     SDValue SplatNode = DAG.getNode(
9141         PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9142         DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9143     return DAG.getBitcast(Op.getValueType(), SplatNode);
9144   }
9145 
9146   if (!BVNIsConstantSplat || SplatBitSize > 32) {
9147 
9148     bool IsPermutedLoad = false;
9149     const SDValue *InputLoad =
9150         getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
9151     // Handle load-and-splat patterns as we have instructions that will do this
9152     // in one go.
9153     if (InputLoad && DAG.isSplatValue(Op, true)) {
9154       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9155 
9156       // We have handling for 4 and 8 byte elements.
9157       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9158 
9159       // Checking for a single use of this load, we have to check for vector
9160       // width (128 bits) / ElementSize uses (since each operand of the
9161       // BUILD_VECTOR is a separate use of the value.
9162       if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
9163           ((Subtarget.hasVSX() && ElementSize == 64) ||
9164            (Subtarget.hasP9Vector() && ElementSize == 32))) {
9165         SDValue Ops[] = {
9166           LD->getChain(),    // Chain
9167           LD->getBasePtr(),  // Ptr
9168           DAG.getValueType(Op.getValueType()) // VT
9169         };
9170         return
9171           DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
9172                                   DAG.getVTList(Op.getValueType(), MVT::Other),
9173                                   Ops, LD->getMemoryVT(), LD->getMemOperand());
9174       }
9175     }
9176 
9177     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
9178     // lowered to VSX instructions under certain conditions.
9179     // Without VSX, there is no pattern more efficient than expanding the node.
9180     if (Subtarget.hasVSX() &&
9181         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9182                                         Subtarget.hasP8Vector()))
9183       return Op;
9184     return SDValue();
9185   }
9186 
9187   uint64_t SplatBits = APSplatBits.getZExtValue();
9188   uint64_t SplatUndef = APSplatUndef.getZExtValue();
9189   unsigned SplatSize = SplatBitSize / 8;
9190 
9191   // First, handle single instruction cases.
9192 
9193   // All zeros?
9194   if (SplatBits == 0) {
9195     // Canonicalize all zero vectors to be v4i32.
9196     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9197       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9198       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9199     }
9200     return Op;
9201   }
9202 
9203   // We have XXSPLTIW for constant splats four bytes wide.
9204   // Given vector length is a multiple of 4, 2-byte splats can be replaced
9205   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9206   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9207   // turned into a 4-byte splat of 0xABABABAB.
9208   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9209     return getCanonicalConstSplat((SplatBits |= SplatBits << 16), SplatSize * 2,
9210                                   Op.getValueType(), DAG, dl);
9211 
9212   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9213     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9214                                   dl);
9215 
9216   // We have XXSPLTIB for constant splats one byte wide.
9217   if (Subtarget.hasP9Vector() && SplatSize == 1)
9218     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9219                                   dl);
9220 
9221   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9222   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9223                     (32-SplatBitSize));
9224   if (SextVal >= -16 && SextVal <= 15)
9225     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9226                                   dl);
9227 
9228   // Two instruction sequences.
9229 
9230   // If this value is in the range [-32,30] and is even, use:
9231   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9232   // If this value is in the range [17,31] and is odd, use:
9233   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9234   // If this value is in the range [-31,-17] and is odd, use:
9235   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9236   // Note the last two are three-instruction sequences.
9237   if (SextVal >= -32 && SextVal <= 31) {
9238     // To avoid having these optimizations undone by constant folding,
9239     // we convert to a pseudo that will be expanded later into one of
9240     // the above forms.
9241     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9242     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9243               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9244     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9245     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9246     if (VT == Op.getValueType())
9247       return RetVal;
9248     else
9249       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9250   }
9251 
9252   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9253   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9254   // for fneg/fabs.
9255   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9256     // Make -1 and vspltisw -1:
9257     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9258 
9259     // Make the VSLW intrinsic, computing 0x8000_0000.
9260     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9261                                    OnesV, DAG, dl);
9262 
9263     // xor by OnesV to invert it.
9264     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9265     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9266   }
9267 
9268   // Check to see if this is a wide variety of vsplti*, binop self cases.
9269   static const signed char SplatCsts[] = {
9270     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9271     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9272   };
9273 
9274   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9275     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9276     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9277     int i = SplatCsts[idx];
9278 
9279     // Figure out what shift amount will be used by altivec if shifted by i in
9280     // this splat size.
9281     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9282 
9283     // vsplti + shl self.
9284     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9285       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9286       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9287         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9288         Intrinsic::ppc_altivec_vslw
9289       };
9290       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9291       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9292     }
9293 
9294     // vsplti + srl self.
9295     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9296       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9297       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9298         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9299         Intrinsic::ppc_altivec_vsrw
9300       };
9301       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9302       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9303     }
9304 
9305     // vsplti + sra self.
9306     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9307       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9308       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9309         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
9310         Intrinsic::ppc_altivec_vsraw
9311       };
9312       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9313       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9314     }
9315 
9316     // vsplti + rol self.
9317     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9318                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9319       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9320       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9321         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9322         Intrinsic::ppc_altivec_vrlw
9323       };
9324       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9325       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9326     }
9327 
9328     // t = vsplti c, result = vsldoi t, t, 1
9329     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9330       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9331       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9332       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9333     }
9334     // t = vsplti c, result = vsldoi t, t, 2
9335     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9336       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9337       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9338       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9339     }
9340     // t = vsplti c, result = vsldoi t, t, 3
9341     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9342       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9343       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9344       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9345     }
9346   }
9347 
9348   return SDValue();
9349 }
9350 
9351 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9352 /// the specified operations to build the shuffle.
9353 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9354                                       SDValue RHS, SelectionDAG &DAG,
9355                                       const SDLoc &dl) {
9356   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9357   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9358   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9359 
9360   enum {
9361     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9362     OP_VMRGHW,
9363     OP_VMRGLW,
9364     OP_VSPLTISW0,
9365     OP_VSPLTISW1,
9366     OP_VSPLTISW2,
9367     OP_VSPLTISW3,
9368     OP_VSLDOI4,
9369     OP_VSLDOI8,
9370     OP_VSLDOI12
9371   };
9372 
9373   if (OpNum == OP_COPY) {
9374     if (LHSID == (1*9+2)*9+3) return LHS;
9375     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9376     return RHS;
9377   }
9378 
9379   SDValue OpLHS, OpRHS;
9380   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9381   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9382 
9383   int ShufIdxs[16];
9384   switch (OpNum) {
9385   default: llvm_unreachable("Unknown i32 permute!");
9386   case OP_VMRGHW:
9387     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9388     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9389     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9390     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9391     break;
9392   case OP_VMRGLW:
9393     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9394     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9395     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9396     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9397     break;
9398   case OP_VSPLTISW0:
9399     for (unsigned i = 0; i != 16; ++i)
9400       ShufIdxs[i] = (i&3)+0;
9401     break;
9402   case OP_VSPLTISW1:
9403     for (unsigned i = 0; i != 16; ++i)
9404       ShufIdxs[i] = (i&3)+4;
9405     break;
9406   case OP_VSPLTISW2:
9407     for (unsigned i = 0; i != 16; ++i)
9408       ShufIdxs[i] = (i&3)+8;
9409     break;
9410   case OP_VSPLTISW3:
9411     for (unsigned i = 0; i != 16; ++i)
9412       ShufIdxs[i] = (i&3)+12;
9413     break;
9414   case OP_VSLDOI4:
9415     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9416   case OP_VSLDOI8:
9417     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9418   case OP_VSLDOI12:
9419     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9420   }
9421   EVT VT = OpLHS.getValueType();
9422   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9423   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9424   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9425   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9426 }
9427 
9428 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9429 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9430 /// SDValue.
9431 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9432                                            SelectionDAG &DAG) const {
9433   const unsigned BytesInVector = 16;
9434   bool IsLE = Subtarget.isLittleEndian();
9435   SDLoc dl(N);
9436   SDValue V1 = N->getOperand(0);
9437   SDValue V2 = N->getOperand(1);
9438   unsigned ShiftElts = 0, InsertAtByte = 0;
9439   bool Swap = false;
9440 
9441   // Shifts required to get the byte we want at element 7.
9442   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9443                                    0, 15, 14, 13, 12, 11, 10, 9};
9444   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9445                                 1, 2,  3,  4,  5,  6,  7,  8};
9446 
9447   ArrayRef<int> Mask = N->getMask();
9448   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9449 
9450   // For each mask element, find out if we're just inserting something
9451   // from V2 into V1 or vice versa.
9452   // Possible permutations inserting an element from V2 into V1:
9453   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9454   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9455   //   ...
9456   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9457   // Inserting from V1 into V2 will be similar, except mask range will be
9458   // [16,31].
9459 
9460   bool FoundCandidate = false;
9461   // If both vector operands for the shuffle are the same vector, the mask
9462   // will contain only elements from the first one and the second one will be
9463   // undef.
9464   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9465   // Go through the mask of half-words to find an element that's being moved
9466   // from one vector to the other.
9467   for (unsigned i = 0; i < BytesInVector; ++i) {
9468     unsigned CurrentElement = Mask[i];
9469     // If 2nd operand is undefined, we should only look for element 7 in the
9470     // Mask.
9471     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9472       continue;
9473 
9474     bool OtherElementsInOrder = true;
9475     // Examine the other elements in the Mask to see if they're in original
9476     // order.
9477     for (unsigned j = 0; j < BytesInVector; ++j) {
9478       if (j == i)
9479         continue;
9480       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9481       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9482       // in which we always assume we're always picking from the 1st operand.
9483       int MaskOffset =
9484           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9485       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9486         OtherElementsInOrder = false;
9487         break;
9488       }
9489     }
9490     // If other elements are in original order, we record the number of shifts
9491     // we need to get the element we want into element 7. Also record which byte
9492     // in the vector we should insert into.
9493     if (OtherElementsInOrder) {
9494       // If 2nd operand is undefined, we assume no shifts and no swapping.
9495       if (V2.isUndef()) {
9496         ShiftElts = 0;
9497         Swap = false;
9498       } else {
9499         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9500         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9501                          : BigEndianShifts[CurrentElement & 0xF];
9502         Swap = CurrentElement < BytesInVector;
9503       }
9504       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9505       FoundCandidate = true;
9506       break;
9507     }
9508   }
9509 
9510   if (!FoundCandidate)
9511     return SDValue();
9512 
9513   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9514   // optionally with VECSHL if shift is required.
9515   if (Swap)
9516     std::swap(V1, V2);
9517   if (V2.isUndef())
9518     V2 = V1;
9519   if (ShiftElts) {
9520     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9521                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9522     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9523                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9524   }
9525   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9526                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9527 }
9528 
9529 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9530 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9531 /// SDValue.
9532 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9533                                            SelectionDAG &DAG) const {
9534   const unsigned NumHalfWords = 8;
9535   const unsigned BytesInVector = NumHalfWords * 2;
9536   // Check that the shuffle is on half-words.
9537   if (!isNByteElemShuffleMask(N, 2, 1))
9538     return SDValue();
9539 
9540   bool IsLE = Subtarget.isLittleEndian();
9541   SDLoc dl(N);
9542   SDValue V1 = N->getOperand(0);
9543   SDValue V2 = N->getOperand(1);
9544   unsigned ShiftElts = 0, InsertAtByte = 0;
9545   bool Swap = false;
9546 
9547   // Shifts required to get the half-word we want at element 3.
9548   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9549   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9550 
9551   uint32_t Mask = 0;
9552   uint32_t OriginalOrderLow = 0x1234567;
9553   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9554   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9555   // 32-bit space, only need 4-bit nibbles per element.
9556   for (unsigned i = 0; i < NumHalfWords; ++i) {
9557     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9558     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9559   }
9560 
9561   // For each mask element, find out if we're just inserting something
9562   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9563   // from V2 into V1:
9564   //   X, 1, 2, 3, 4, 5, 6, 7
9565   //   0, X, 2, 3, 4, 5, 6, 7
9566   //   0, 1, X, 3, 4, 5, 6, 7
9567   //   0, 1, 2, X, 4, 5, 6, 7
9568   //   0, 1, 2, 3, X, 5, 6, 7
9569   //   0, 1, 2, 3, 4, X, 6, 7
9570   //   0, 1, 2, 3, 4, 5, X, 7
9571   //   0, 1, 2, 3, 4, 5, 6, X
9572   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9573 
9574   bool FoundCandidate = false;
9575   // Go through the mask of half-words to find an element that's being moved
9576   // from one vector to the other.
9577   for (unsigned i = 0; i < NumHalfWords; ++i) {
9578     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9579     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9580     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9581     uint32_t TargetOrder = 0x0;
9582 
9583     // If both vector operands for the shuffle are the same vector, the mask
9584     // will contain only elements from the first one and the second one will be
9585     // undef.
9586     if (V2.isUndef()) {
9587       ShiftElts = 0;
9588       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9589       TargetOrder = OriginalOrderLow;
9590       Swap = false;
9591       // Skip if not the correct element or mask of other elements don't equal
9592       // to our expected order.
9593       if (MaskOneElt == VINSERTHSrcElem &&
9594           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9595         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9596         FoundCandidate = true;
9597         break;
9598       }
9599     } else { // If both operands are defined.
9600       // Target order is [8,15] if the current mask is between [0,7].
9601       TargetOrder =
9602           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9603       // Skip if mask of other elements don't equal our expected order.
9604       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9605         // We only need the last 3 bits for the number of shifts.
9606         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9607                          : BigEndianShifts[MaskOneElt & 0x7];
9608         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9609         Swap = MaskOneElt < NumHalfWords;
9610         FoundCandidate = true;
9611         break;
9612       }
9613     }
9614   }
9615 
9616   if (!FoundCandidate)
9617     return SDValue();
9618 
9619   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9620   // optionally with VECSHL if shift is required.
9621   if (Swap)
9622     std::swap(V1, V2);
9623   if (V2.isUndef())
9624     V2 = V1;
9625   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9626   if (ShiftElts) {
9627     // Double ShiftElts because we're left shifting on v16i8 type.
9628     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9629                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9630     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9631     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9632                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9633     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9634   }
9635   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9636   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9637                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9638   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9639 }
9640 
9641 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9642 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9643 /// return the default SDValue.
9644 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9645                                               SelectionDAG &DAG) const {
9646   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9647   // to v16i8. Peek through the bitcasts to get the actual operands.
9648   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9649   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9650 
9651   auto ShuffleMask = SVN->getMask();
9652   SDValue VecShuffle(SVN, 0);
9653   SDLoc DL(SVN);
9654 
9655   // Check that we have a four byte shuffle.
9656   if (!isNByteElemShuffleMask(SVN, 4, 1))
9657     return SDValue();
9658 
9659   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9660   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9661     std::swap(LHS, RHS);
9662     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9663     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9664   }
9665 
9666   // Ensure that the RHS is a vector of constants.
9667   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9668   if (!BVN)
9669     return SDValue();
9670 
9671   // Check if RHS is a splat of 4-bytes (or smaller).
9672   APInt APSplatValue, APSplatUndef;
9673   unsigned SplatBitSize;
9674   bool HasAnyUndefs;
9675   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9676                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9677       SplatBitSize > 32)
9678     return SDValue();
9679 
9680   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9681   // The instruction splats a constant C into two words of the source vector
9682   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9683   // Thus we check that the shuffle mask is the equivalent  of
9684   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9685   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9686   // within each word are consecutive, so we only need to check the first byte.
9687   SDValue Index;
9688   bool IsLE = Subtarget.isLittleEndian();
9689   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9690       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9691        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9692     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9693   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9694            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9695             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9696     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9697   else
9698     return SDValue();
9699 
9700   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9701   // for XXSPLTI32DX.
9702   unsigned SplatVal = APSplatValue.getZExtValue();
9703   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9704     SplatVal |= (SplatVal << SplatBitSize);
9705 
9706   SDValue SplatNode = DAG.getNode(
9707       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9708       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9709   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9710 }
9711 
9712 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9713 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9714 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9715 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9716 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9717   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9718   assert(Op.getValueType() == MVT::v1i128 &&
9719          "Only set v1i128 as custom, other type shouldn't reach here!");
9720   SDLoc dl(Op);
9721   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9722   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9723   unsigned SHLAmt = N1.getConstantOperandVal(0);
9724   if (SHLAmt % 8 == 0) {
9725     SmallVector<int, 16> Mask(16, 0);
9726     std::iota(Mask.begin(), Mask.end(), 0);
9727     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9728     if (SDValue Shuffle =
9729             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9730                                  DAG.getUNDEF(MVT::v16i8), Mask))
9731       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9732   }
9733   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9734   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9735                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9736   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9737                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9738   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9739   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9740 }
9741 
9742 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9743 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9744 /// return the code it can be lowered into.  Worst case, it can always be
9745 /// lowered into a vperm.
9746 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9747                                                SelectionDAG &DAG) const {
9748   SDLoc dl(Op);
9749   SDValue V1 = Op.getOperand(0);
9750   SDValue V2 = Op.getOperand(1);
9751   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9752 
9753   // Any nodes that were combined in the target-independent combiner prior
9754   // to vector legalization will not be sent to the target combine. Try to
9755   // combine it here.
9756   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9757     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9758       return NewShuffle;
9759     Op = NewShuffle;
9760     SVOp = cast<ShuffleVectorSDNode>(Op);
9761     V1 = Op.getOperand(0);
9762     V2 = Op.getOperand(1);
9763   }
9764   EVT VT = Op.getValueType();
9765   bool isLittleEndian = Subtarget.isLittleEndian();
9766 
9767   unsigned ShiftElts, InsertAtByte;
9768   bool Swap = false;
9769 
9770   // If this is a load-and-splat, we can do that with a single instruction
9771   // in some cases. However if the load has multiple uses, we don't want to
9772   // combine it because that will just produce multiple loads.
9773   bool IsPermutedLoad = false;
9774   const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
9775   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9776       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9777       InputLoad->hasOneUse()) {
9778     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9779     int SplatIdx =
9780       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9781 
9782     // The splat index for permuted loads will be in the left half of the vector
9783     // which is strictly wider than the loaded value by 8 bytes. So we need to
9784     // adjust the splat index to point to the correct address in memory.
9785     if (IsPermutedLoad) {
9786       assert(isLittleEndian && "Unexpected permuted load on big endian target");
9787       SplatIdx += IsFourByte ? 2 : 1;
9788       assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
9789              "Splat of a value outside of the loaded memory");
9790     }
9791 
9792     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9793     // For 4-byte load-and-splat, we need Power9.
9794     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9795       uint64_t Offset = 0;
9796       if (IsFourByte)
9797         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9798       else
9799         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9800 
9801       SDValue BasePtr = LD->getBasePtr();
9802       if (Offset != 0)
9803         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9804                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9805       SDValue Ops[] = {
9806         LD->getChain(),    // Chain
9807         BasePtr,           // BasePtr
9808         DAG.getValueType(Op.getValueType()) // VT
9809       };
9810       SDVTList VTL =
9811         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9812       SDValue LdSplt =
9813         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9814                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9815       if (LdSplt.getValueType() != SVOp->getValueType(0))
9816         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9817       return LdSplt;
9818     }
9819   }
9820   if (Subtarget.hasP9Vector() &&
9821       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9822                            isLittleEndian)) {
9823     if (Swap)
9824       std::swap(V1, V2);
9825     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9826     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9827     if (ShiftElts) {
9828       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9829                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9830       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9831                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9832       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9833     }
9834     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9835                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9836     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9837   }
9838 
9839   if (Subtarget.hasPrefixInstrs()) {
9840     SDValue SplatInsertNode;
9841     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9842       return SplatInsertNode;
9843   }
9844 
9845   if (Subtarget.hasP9Altivec()) {
9846     SDValue NewISDNode;
9847     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9848       return NewISDNode;
9849 
9850     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9851       return NewISDNode;
9852   }
9853 
9854   if (Subtarget.hasVSX() &&
9855       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9856     if (Swap)
9857       std::swap(V1, V2);
9858     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9859     SDValue Conv2 =
9860         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9861 
9862     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9863                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9864     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9865   }
9866 
9867   if (Subtarget.hasVSX() &&
9868     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9869     if (Swap)
9870       std::swap(V1, V2);
9871     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9872     SDValue Conv2 =
9873         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9874 
9875     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9876                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9877     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9878   }
9879 
9880   if (Subtarget.hasP9Vector()) {
9881      if (PPC::isXXBRHShuffleMask(SVOp)) {
9882       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9883       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9884       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9885     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9886       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9887       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9888       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9889     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9890       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9891       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9892       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9893     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9894       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9895       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9896       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9897     }
9898   }
9899 
9900   if (Subtarget.hasVSX()) {
9901     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9902       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9903 
9904       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9905       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9906                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
9907       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9908     }
9909 
9910     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9911     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9912       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9913       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9914       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9915     }
9916   }
9917 
9918   // Cases that are handled by instructions that take permute immediates
9919   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9920   // selected by the instruction selector.
9921   if (V2.isUndef()) {
9922     if (PPC::isSplatShuffleMask(SVOp, 1) ||
9923         PPC::isSplatShuffleMask(SVOp, 2) ||
9924         PPC::isSplatShuffleMask(SVOp, 4) ||
9925         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9926         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9927         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9928         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9929         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9930         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9931         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9932         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9933         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9934         (Subtarget.hasP8Altivec() && (
9935          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9936          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9937          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9938       return Op;
9939     }
9940   }
9941 
9942   // Altivec has a variety of "shuffle immediates" that take two vector inputs
9943   // and produce a fixed permutation.  If any of these match, do not lower to
9944   // VPERM.
9945   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9946   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9947       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9948       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9949       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9950       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9951       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9952       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9953       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9954       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9955       (Subtarget.hasP8Altivec() && (
9956        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9957        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9958        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9959     return Op;
9960 
9961   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
9962   // perfect shuffle table to emit an optimal matching sequence.
9963   ArrayRef<int> PermMask = SVOp->getMask();
9964 
9965   unsigned PFIndexes[4];
9966   bool isFourElementShuffle = true;
9967   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9968     unsigned EltNo = 8;   // Start out undef.
9969     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
9970       if (PermMask[i*4+j] < 0)
9971         continue;   // Undef, ignore it.
9972 
9973       unsigned ByteSource = PermMask[i*4+j];
9974       if ((ByteSource & 3) != j) {
9975         isFourElementShuffle = false;
9976         break;
9977       }
9978 
9979       if (EltNo == 8) {
9980         EltNo = ByteSource/4;
9981       } else if (EltNo != ByteSource/4) {
9982         isFourElementShuffle = false;
9983         break;
9984       }
9985     }
9986     PFIndexes[i] = EltNo;
9987   }
9988 
9989   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9990   // perfect shuffle vector to determine if it is cost effective to do this as
9991   // discrete instructions, or whether we should use a vperm.
9992   // For now, we skip this for little endian until such time as we have a
9993   // little-endian perfect shuffle table.
9994   if (isFourElementShuffle && !isLittleEndian) {
9995     // Compute the index in the perfect shuffle table.
9996     unsigned PFTableIndex =
9997       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9998 
9999     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10000     unsigned Cost  = (PFEntry >> 30);
10001 
10002     // Determining when to avoid vperm is tricky.  Many things affect the cost
10003     // of vperm, particularly how many times the perm mask needs to be computed.
10004     // For example, if the perm mask can be hoisted out of a loop or is already
10005     // used (perhaps because there are multiple permutes with the same shuffle
10006     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
10007     // the loop requires an extra register.
10008     //
10009     // As a compromise, we only emit discrete instructions if the shuffle can be
10010     // generated in 3 or fewer operations.  When we have loop information
10011     // available, if this block is within a loop, we should avoid using vperm
10012     // for 3-operation perms and use a constant pool load instead.
10013     if (Cost < 3)
10014       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
10015   }
10016 
10017   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
10018   // vector that will get spilled to the constant pool.
10019   if (V2.isUndef()) V2 = V1;
10020 
10021   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
10022   // that it is in input element units, not in bytes.  Convert now.
10023 
10024   // For little endian, the order of the input vectors is reversed, and
10025   // the permutation mask is complemented with respect to 31.  This is
10026   // necessary to produce proper semantics with the big-endian-biased vperm
10027   // instruction.
10028   EVT EltVT = V1.getValueType().getVectorElementType();
10029   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
10030 
10031   SmallVector<SDValue, 16> ResultMask;
10032   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
10033     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
10034 
10035     for (unsigned j = 0; j != BytesPerElement; ++j)
10036       if (isLittleEndian)
10037         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
10038                                              dl, MVT::i32));
10039       else
10040         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
10041                                              MVT::i32));
10042   }
10043 
10044   ShufflesHandledWithVPERM++;
10045   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
10046   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
10047   LLVM_DEBUG(SVOp->dump());
10048   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
10049   LLVM_DEBUG(VPermMask.dump());
10050 
10051   if (isLittleEndian)
10052     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10053                        V2, V1, VPermMask);
10054   else
10055     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10056                        V1, V2, VPermMask);
10057 }
10058 
10059 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
10060 /// vector comparison.  If it is, return true and fill in Opc/isDot with
10061 /// information about the intrinsic.
10062 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
10063                                  bool &isDot, const PPCSubtarget &Subtarget) {
10064   unsigned IntrinsicID =
10065       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
10066   CompareOpc = -1;
10067   isDot = false;
10068   switch (IntrinsicID) {
10069   default:
10070     return false;
10071   // Comparison predicates.
10072   case Intrinsic::ppc_altivec_vcmpbfp_p:
10073     CompareOpc = 966;
10074     isDot = true;
10075     break;
10076   case Intrinsic::ppc_altivec_vcmpeqfp_p:
10077     CompareOpc = 198;
10078     isDot = true;
10079     break;
10080   case Intrinsic::ppc_altivec_vcmpequb_p:
10081     CompareOpc = 6;
10082     isDot = true;
10083     break;
10084   case Intrinsic::ppc_altivec_vcmpequh_p:
10085     CompareOpc = 70;
10086     isDot = true;
10087     break;
10088   case Intrinsic::ppc_altivec_vcmpequw_p:
10089     CompareOpc = 134;
10090     isDot = true;
10091     break;
10092   case Intrinsic::ppc_altivec_vcmpequd_p:
10093     if (Subtarget.hasP8Altivec()) {
10094       CompareOpc = 199;
10095       isDot = true;
10096     } else
10097       return false;
10098     break;
10099   case Intrinsic::ppc_altivec_vcmpneb_p:
10100   case Intrinsic::ppc_altivec_vcmpneh_p:
10101   case Intrinsic::ppc_altivec_vcmpnew_p:
10102   case Intrinsic::ppc_altivec_vcmpnezb_p:
10103   case Intrinsic::ppc_altivec_vcmpnezh_p:
10104   case Intrinsic::ppc_altivec_vcmpnezw_p:
10105     if (Subtarget.hasP9Altivec()) {
10106       switch (IntrinsicID) {
10107       default:
10108         llvm_unreachable("Unknown comparison intrinsic.");
10109       case Intrinsic::ppc_altivec_vcmpneb_p:
10110         CompareOpc = 7;
10111         break;
10112       case Intrinsic::ppc_altivec_vcmpneh_p:
10113         CompareOpc = 71;
10114         break;
10115       case Intrinsic::ppc_altivec_vcmpnew_p:
10116         CompareOpc = 135;
10117         break;
10118       case Intrinsic::ppc_altivec_vcmpnezb_p:
10119         CompareOpc = 263;
10120         break;
10121       case Intrinsic::ppc_altivec_vcmpnezh_p:
10122         CompareOpc = 327;
10123         break;
10124       case Intrinsic::ppc_altivec_vcmpnezw_p:
10125         CompareOpc = 391;
10126         break;
10127       }
10128       isDot = true;
10129     } else
10130       return false;
10131     break;
10132   case Intrinsic::ppc_altivec_vcmpgefp_p:
10133     CompareOpc = 454;
10134     isDot = true;
10135     break;
10136   case Intrinsic::ppc_altivec_vcmpgtfp_p:
10137     CompareOpc = 710;
10138     isDot = true;
10139     break;
10140   case Intrinsic::ppc_altivec_vcmpgtsb_p:
10141     CompareOpc = 774;
10142     isDot = true;
10143     break;
10144   case Intrinsic::ppc_altivec_vcmpgtsh_p:
10145     CompareOpc = 838;
10146     isDot = true;
10147     break;
10148   case Intrinsic::ppc_altivec_vcmpgtsw_p:
10149     CompareOpc = 902;
10150     isDot = true;
10151     break;
10152   case Intrinsic::ppc_altivec_vcmpgtsd_p:
10153     if (Subtarget.hasP8Altivec()) {
10154       CompareOpc = 967;
10155       isDot = true;
10156     } else
10157       return false;
10158     break;
10159   case Intrinsic::ppc_altivec_vcmpgtub_p:
10160     CompareOpc = 518;
10161     isDot = true;
10162     break;
10163   case Intrinsic::ppc_altivec_vcmpgtuh_p:
10164     CompareOpc = 582;
10165     isDot = true;
10166     break;
10167   case Intrinsic::ppc_altivec_vcmpgtuw_p:
10168     CompareOpc = 646;
10169     isDot = true;
10170     break;
10171   case Intrinsic::ppc_altivec_vcmpgtud_p:
10172     if (Subtarget.hasP8Altivec()) {
10173       CompareOpc = 711;
10174       isDot = true;
10175     } else
10176       return false;
10177     break;
10178 
10179   // VSX predicate comparisons use the same infrastructure
10180   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10181   case Intrinsic::ppc_vsx_xvcmpgedp_p:
10182   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10183   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10184   case Intrinsic::ppc_vsx_xvcmpgesp_p:
10185   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10186     if (Subtarget.hasVSX()) {
10187       switch (IntrinsicID) {
10188       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10189         CompareOpc = 99;
10190         break;
10191       case Intrinsic::ppc_vsx_xvcmpgedp_p:
10192         CompareOpc = 115;
10193         break;
10194       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10195         CompareOpc = 107;
10196         break;
10197       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10198         CompareOpc = 67;
10199         break;
10200       case Intrinsic::ppc_vsx_xvcmpgesp_p:
10201         CompareOpc = 83;
10202         break;
10203       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10204         CompareOpc = 75;
10205         break;
10206       }
10207       isDot = true;
10208     } else
10209       return false;
10210     break;
10211 
10212   // Normal Comparisons.
10213   case Intrinsic::ppc_altivec_vcmpbfp:
10214     CompareOpc = 966;
10215     break;
10216   case Intrinsic::ppc_altivec_vcmpeqfp:
10217     CompareOpc = 198;
10218     break;
10219   case Intrinsic::ppc_altivec_vcmpequb:
10220     CompareOpc = 6;
10221     break;
10222   case Intrinsic::ppc_altivec_vcmpequh:
10223     CompareOpc = 70;
10224     break;
10225   case Intrinsic::ppc_altivec_vcmpequw:
10226     CompareOpc = 134;
10227     break;
10228   case Intrinsic::ppc_altivec_vcmpequd:
10229     if (Subtarget.hasP8Altivec())
10230       CompareOpc = 199;
10231     else
10232       return false;
10233     break;
10234   case Intrinsic::ppc_altivec_vcmpneb:
10235   case Intrinsic::ppc_altivec_vcmpneh:
10236   case Intrinsic::ppc_altivec_vcmpnew:
10237   case Intrinsic::ppc_altivec_vcmpnezb:
10238   case Intrinsic::ppc_altivec_vcmpnezh:
10239   case Intrinsic::ppc_altivec_vcmpnezw:
10240     if (Subtarget.hasP9Altivec())
10241       switch (IntrinsicID) {
10242       default:
10243         llvm_unreachable("Unknown comparison intrinsic.");
10244       case Intrinsic::ppc_altivec_vcmpneb:
10245         CompareOpc = 7;
10246         break;
10247       case Intrinsic::ppc_altivec_vcmpneh:
10248         CompareOpc = 71;
10249         break;
10250       case Intrinsic::ppc_altivec_vcmpnew:
10251         CompareOpc = 135;
10252         break;
10253       case Intrinsic::ppc_altivec_vcmpnezb:
10254         CompareOpc = 263;
10255         break;
10256       case Intrinsic::ppc_altivec_vcmpnezh:
10257         CompareOpc = 327;
10258         break;
10259       case Intrinsic::ppc_altivec_vcmpnezw:
10260         CompareOpc = 391;
10261         break;
10262       }
10263     else
10264       return false;
10265     break;
10266   case Intrinsic::ppc_altivec_vcmpgefp:
10267     CompareOpc = 454;
10268     break;
10269   case Intrinsic::ppc_altivec_vcmpgtfp:
10270     CompareOpc = 710;
10271     break;
10272   case Intrinsic::ppc_altivec_vcmpgtsb:
10273     CompareOpc = 774;
10274     break;
10275   case Intrinsic::ppc_altivec_vcmpgtsh:
10276     CompareOpc = 838;
10277     break;
10278   case Intrinsic::ppc_altivec_vcmpgtsw:
10279     CompareOpc = 902;
10280     break;
10281   case Intrinsic::ppc_altivec_vcmpgtsd:
10282     if (Subtarget.hasP8Altivec())
10283       CompareOpc = 967;
10284     else
10285       return false;
10286     break;
10287   case Intrinsic::ppc_altivec_vcmpgtub:
10288     CompareOpc = 518;
10289     break;
10290   case Intrinsic::ppc_altivec_vcmpgtuh:
10291     CompareOpc = 582;
10292     break;
10293   case Intrinsic::ppc_altivec_vcmpgtuw:
10294     CompareOpc = 646;
10295     break;
10296   case Intrinsic::ppc_altivec_vcmpgtud:
10297     if (Subtarget.hasP8Altivec())
10298       CompareOpc = 711;
10299     else
10300       return false;
10301     break;
10302   }
10303   return true;
10304 }
10305 
10306 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10307 /// lower, do it, otherwise return null.
10308 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10309                                                    SelectionDAG &DAG) const {
10310   unsigned IntrinsicID =
10311     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10312 
10313   SDLoc dl(Op);
10314 
10315   if (IntrinsicID == Intrinsic::thread_pointer) {
10316     // Reads the thread pointer register, used for __builtin_thread_pointer.
10317     if (Subtarget.isPPC64())
10318       return DAG.getRegister(PPC::X13, MVT::i64);
10319     return DAG.getRegister(PPC::R2, MVT::i32);
10320   }
10321 
10322   // If this is a lowered altivec predicate compare, CompareOpc is set to the
10323   // opcode number of the comparison.
10324   int CompareOpc;
10325   bool isDot;
10326   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10327     return SDValue();    // Don't custom lower most intrinsics.
10328 
10329   // If this is a non-dot comparison, make the VCMP node and we are done.
10330   if (!isDot) {
10331     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10332                               Op.getOperand(1), Op.getOperand(2),
10333                               DAG.getConstant(CompareOpc, dl, MVT::i32));
10334     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10335   }
10336 
10337   // Create the PPCISD altivec 'dot' comparison node.
10338   SDValue Ops[] = {
10339     Op.getOperand(2),  // LHS
10340     Op.getOperand(3),  // RHS
10341     DAG.getConstant(CompareOpc, dl, MVT::i32)
10342   };
10343   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10344   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
10345 
10346   // Now that we have the comparison, emit a copy from the CR to a GPR.
10347   // This is flagged to the above dot comparison.
10348   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10349                                 DAG.getRegister(PPC::CR6, MVT::i32),
10350                                 CompNode.getValue(1));
10351 
10352   // Unpack the result based on how the target uses it.
10353   unsigned BitNo;   // Bit # of CR6.
10354   bool InvertBit;   // Invert result?
10355   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10356   default:  // Can't happen, don't crash on invalid number though.
10357   case 0:   // Return the value of the EQ bit of CR6.
10358     BitNo = 0; InvertBit = false;
10359     break;
10360   case 1:   // Return the inverted value of the EQ bit of CR6.
10361     BitNo = 0; InvertBit = true;
10362     break;
10363   case 2:   // Return the value of the LT bit of CR6.
10364     BitNo = 2; InvertBit = false;
10365     break;
10366   case 3:   // Return the inverted value of the LT bit of CR6.
10367     BitNo = 2; InvertBit = true;
10368     break;
10369   }
10370 
10371   // Shift the bit into the low position.
10372   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10373                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10374   // Isolate the bit.
10375   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10376                       DAG.getConstant(1, dl, MVT::i32));
10377 
10378   // If we are supposed to, toggle the bit.
10379   if (InvertBit)
10380     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10381                         DAG.getConstant(1, dl, MVT::i32));
10382   return Flags;
10383 }
10384 
10385 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10386                                                SelectionDAG &DAG) const {
10387   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10388   // the beginning of the argument list.
10389   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10390   SDLoc DL(Op);
10391   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10392   case Intrinsic::ppc_cfence: {
10393     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10394     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10395     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10396                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10397                                                   Op.getOperand(ArgStart + 1)),
10398                                       Op.getOperand(0)),
10399                    0);
10400   }
10401   default:
10402     break;
10403   }
10404   return SDValue();
10405 }
10406 
10407 // Lower scalar BSWAP64 to xxbrd.
10408 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10409   SDLoc dl(Op);
10410   // MTVSRDD
10411   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10412                    Op.getOperand(0));
10413   // XXBRD
10414   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10415   // MFVSRD
10416   int VectorIndex = 0;
10417   if (Subtarget.isLittleEndian())
10418     VectorIndex = 1;
10419   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10420                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10421   return Op;
10422 }
10423 
10424 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10425 // compared to a value that is atomically loaded (atomic loads zero-extend).
10426 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10427                                                 SelectionDAG &DAG) const {
10428   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10429          "Expecting an atomic compare-and-swap here.");
10430   SDLoc dl(Op);
10431   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10432   EVT MemVT = AtomicNode->getMemoryVT();
10433   if (MemVT.getSizeInBits() >= 32)
10434     return Op;
10435 
10436   SDValue CmpOp = Op.getOperand(2);
10437   // If this is already correctly zero-extended, leave it alone.
10438   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10439   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10440     return Op;
10441 
10442   // Clear the high bits of the compare operand.
10443   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10444   SDValue NewCmpOp =
10445     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10446                 DAG.getConstant(MaskVal, dl, MVT::i32));
10447 
10448   // Replace the existing compare operand with the properly zero-extended one.
10449   SmallVector<SDValue, 4> Ops;
10450   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10451     Ops.push_back(AtomicNode->getOperand(i));
10452   Ops[2] = NewCmpOp;
10453   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10454   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10455   auto NodeTy =
10456     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10457   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10458 }
10459 
10460 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10461                                                  SelectionDAG &DAG) const {
10462   SDLoc dl(Op);
10463   // Create a stack slot that is 16-byte aligned.
10464   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10465   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10466   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10467   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10468 
10469   // Store the input value into Value#0 of the stack slot.
10470   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10471                                MachinePointerInfo());
10472   // Load it out.
10473   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10474 }
10475 
10476 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10477                                                   SelectionDAG &DAG) const {
10478   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10479          "Should only be called for ISD::INSERT_VECTOR_ELT");
10480 
10481   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10482   // We have legal lowering for constant indices but not for variable ones.
10483   if (!C)
10484     return SDValue();
10485 
10486   EVT VT = Op.getValueType();
10487   SDLoc dl(Op);
10488   SDValue V1 = Op.getOperand(0);
10489   SDValue V2 = Op.getOperand(1);
10490   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10491   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10492     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10493     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10494     unsigned InsertAtElement = C->getZExtValue();
10495     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10496     if (Subtarget.isLittleEndian()) {
10497       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10498     }
10499     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10500                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10501   }
10502   return Op;
10503 }
10504 
10505 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10506   SDLoc dl(Op);
10507   if (Op.getValueType() == MVT::v4i32) {
10508     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10509 
10510     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10511     // +16 as shift amt.
10512     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10513     SDValue RHSSwap =   // = vrlw RHS, 16
10514       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10515 
10516     // Shrinkify inputs to v8i16.
10517     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10518     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10519     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10520 
10521     // Low parts multiplied together, generating 32-bit results (we ignore the
10522     // top parts).
10523     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10524                                         LHS, RHS, DAG, dl, MVT::v4i32);
10525 
10526     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10527                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10528     // Shift the high parts up 16 bits.
10529     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10530                               Neg16, DAG, dl);
10531     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10532   } else if (Op.getValueType() == MVT::v16i8) {
10533     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10534     bool isLittleEndian = Subtarget.isLittleEndian();
10535 
10536     // Multiply the even 8-bit parts, producing 16-bit sums.
10537     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10538                                            LHS, RHS, DAG, dl, MVT::v8i16);
10539     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10540 
10541     // Multiply the odd 8-bit parts, producing 16-bit sums.
10542     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10543                                           LHS, RHS, DAG, dl, MVT::v8i16);
10544     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10545 
10546     // Merge the results together.  Because vmuleub and vmuloub are
10547     // instructions with a big-endian bias, we must reverse the
10548     // element numbering and reverse the meaning of "odd" and "even"
10549     // when generating little endian code.
10550     int Ops[16];
10551     for (unsigned i = 0; i != 8; ++i) {
10552       if (isLittleEndian) {
10553         Ops[i*2  ] = 2*i;
10554         Ops[i*2+1] = 2*i+16;
10555       } else {
10556         Ops[i*2  ] = 2*i+1;
10557         Ops[i*2+1] = 2*i+1+16;
10558       }
10559     }
10560     if (isLittleEndian)
10561       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10562     else
10563       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10564   } else {
10565     llvm_unreachable("Unknown mul to lower!");
10566   }
10567 }
10568 
10569 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
10570 
10571   assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
10572 
10573   EVT VT = Op.getValueType();
10574   assert(VT.isVector() &&
10575          "Only set vector abs as custom, scalar abs shouldn't reach here!");
10576   assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
10577           VT == MVT::v16i8) &&
10578          "Unexpected vector element type!");
10579   assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
10580          "Current subtarget doesn't support smax v2i64!");
10581 
10582   // For vector abs, it can be lowered to:
10583   // abs x
10584   // ==>
10585   // y = -x
10586   // smax(x, y)
10587 
10588   SDLoc dl(Op);
10589   SDValue X = Op.getOperand(0);
10590   SDValue Zero = DAG.getConstant(0, dl, VT);
10591   SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
10592 
10593   // SMAX patch https://reviews.llvm.org/D47332
10594   // hasn't landed yet, so use intrinsic first here.
10595   // TODO: Should use SMAX directly once SMAX patch landed
10596   Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
10597   if (VT == MVT::v2i64)
10598     BifID = Intrinsic::ppc_altivec_vmaxsd;
10599   else if (VT == MVT::v8i16)
10600     BifID = Intrinsic::ppc_altivec_vmaxsh;
10601   else if (VT == MVT::v16i8)
10602     BifID = Intrinsic::ppc_altivec_vmaxsb;
10603 
10604   return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
10605 }
10606 
10607 // Custom lowering for fpext vf32 to v2f64
10608 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10609 
10610   assert(Op.getOpcode() == ISD::FP_EXTEND &&
10611          "Should only be called for ISD::FP_EXTEND");
10612 
10613   // FIXME: handle extends from half precision float vectors on P9.
10614   // We only want to custom lower an extend from v2f32 to v2f64.
10615   if (Op.getValueType() != MVT::v2f64 ||
10616       Op.getOperand(0).getValueType() != MVT::v2f32)
10617     return SDValue();
10618 
10619   SDLoc dl(Op);
10620   SDValue Op0 = Op.getOperand(0);
10621 
10622   switch (Op0.getOpcode()) {
10623   default:
10624     return SDValue();
10625   case ISD::EXTRACT_SUBVECTOR: {
10626     assert(Op0.getNumOperands() == 2 &&
10627            isa<ConstantSDNode>(Op0->getOperand(1)) &&
10628            "Node should have 2 operands with second one being a constant!");
10629 
10630     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10631       return SDValue();
10632 
10633     // Custom lower is only done for high or low doubleword.
10634     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10635     if (Idx % 2 != 0)
10636       return SDValue();
10637 
10638     // Since input is v4f32, at this point Idx is either 0 or 2.
10639     // Shift to get the doubleword position we want.
10640     int DWord = Idx >> 1;
10641 
10642     // High and low word positions are different on little endian.
10643     if (Subtarget.isLittleEndian())
10644       DWord ^= 0x1;
10645 
10646     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10647                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10648   }
10649   case ISD::FADD:
10650   case ISD::FMUL:
10651   case ISD::FSUB: {
10652     SDValue NewLoad[2];
10653     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10654       // Ensure both input are loads.
10655       SDValue LdOp = Op0.getOperand(i);
10656       if (LdOp.getOpcode() != ISD::LOAD)
10657         return SDValue();
10658       // Generate new load node.
10659       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10660       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10661       NewLoad[i] = DAG.getMemIntrinsicNode(
10662           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10663           LD->getMemoryVT(), LD->getMemOperand());
10664     }
10665     SDValue NewOp =
10666         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10667                     NewLoad[1], Op0.getNode()->getFlags());
10668     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10669                        DAG.getConstant(0, dl, MVT::i32));
10670   }
10671   case ISD::LOAD: {
10672     LoadSDNode *LD = cast<LoadSDNode>(Op0);
10673     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10674     SDValue NewLd = DAG.getMemIntrinsicNode(
10675         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10676         LD->getMemoryVT(), LD->getMemOperand());
10677     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10678                        DAG.getConstant(0, dl, MVT::i32));
10679   }
10680   }
10681   llvm_unreachable("ERROR:Should return for all cases within swtich.");
10682 }
10683 
10684 /// LowerOperation - Provide custom lowering hooks for some operations.
10685 ///
10686 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10687   switch (Op.getOpcode()) {
10688   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10689   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
10690   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
10691   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
10692   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
10693   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
10694   case ISD::SETCC:              return LowerSETCC(Op, DAG);
10695   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
10696   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
10697 
10698   // Variable argument lowering.
10699   case ISD::VASTART:            return LowerVASTART(Op, DAG);
10700   case ISD::VAARG:              return LowerVAARG(Op, DAG);
10701   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
10702 
10703   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
10704   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10705   case ISD::GET_DYNAMIC_AREA_OFFSET:
10706     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10707 
10708   // Exception handling lowering.
10709   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
10710   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
10711   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
10712 
10713   case ISD::LOAD:               return LowerLOAD(Op, DAG);
10714   case ISD::STORE:              return LowerSTORE(Op, DAG);
10715   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
10716   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
10717   case ISD::STRICT_FP_TO_UINT:
10718   case ISD::STRICT_FP_TO_SINT:
10719   case ISD::FP_TO_UINT:
10720   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10721   case ISD::STRICT_UINT_TO_FP:
10722   case ISD::STRICT_SINT_TO_FP:
10723   case ISD::UINT_TO_FP:
10724   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
10725   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
10726 
10727   // Lower 64-bit shifts.
10728   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
10729   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
10730   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
10731 
10732   case ISD::FSHL:               return LowerFunnelShift(Op, DAG);
10733   case ISD::FSHR:               return LowerFunnelShift(Op, DAG);
10734 
10735   // Vector-related lowering.
10736   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
10737   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
10738   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10739   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
10740   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
10741   case ISD::MUL:                return LowerMUL(Op, DAG);
10742   case ISD::ABS:                return LowerABS(Op, DAG);
10743   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
10744   case ISD::ROTL:               return LowerROTL(Op, DAG);
10745 
10746   // For counter-based loop handling.
10747   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
10748 
10749   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
10750 
10751   // Frame & Return address.
10752   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
10753   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
10754 
10755   case ISD::INTRINSIC_VOID:
10756     return LowerINTRINSIC_VOID(Op, DAG);
10757   case ISD::BSWAP:
10758     return LowerBSWAP(Op, DAG);
10759   case ISD::ATOMIC_CMP_SWAP:
10760     return LowerATOMIC_CMP_SWAP(Op, DAG);
10761   }
10762 }
10763 
10764 void PPCTargetLowering::LowerOperationWrapper(SDNode *N,
10765                                               SmallVectorImpl<SDValue> &Results,
10766                                               SelectionDAG &DAG) const {
10767   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
10768 
10769   if (!Res.getNode())
10770     return;
10771 
10772   // Take the return value as-is if original node has only one result.
10773   if (N->getNumValues() == 1) {
10774     Results.push_back(Res);
10775     return;
10776   }
10777 
10778   // New node should have the same number of results.
10779   assert((N->getNumValues() == Res->getNumValues()) &&
10780       "Lowering returned the wrong number of results!");
10781 
10782   for (unsigned i = 0; i < N->getNumValues(); ++i)
10783     Results.push_back(Res.getValue(i));
10784 }
10785 
10786 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10787                                            SmallVectorImpl<SDValue>&Results,
10788                                            SelectionDAG &DAG) const {
10789   SDLoc dl(N);
10790   switch (N->getOpcode()) {
10791   default:
10792     llvm_unreachable("Do not know how to custom type legalize this operation!");
10793   case ISD::READCYCLECOUNTER: {
10794     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10795     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10796 
10797     Results.push_back(
10798         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
10799     Results.push_back(RTB.getValue(2));
10800     break;
10801   }
10802   case ISD::INTRINSIC_W_CHAIN: {
10803     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10804         Intrinsic::loop_decrement)
10805       break;
10806 
10807     assert(N->getValueType(0) == MVT::i1 &&
10808            "Unexpected result type for CTR decrement intrinsic");
10809     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10810                                  N->getValueType(0));
10811     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10812     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10813                                  N->getOperand(1));
10814 
10815     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10816     Results.push_back(NewInt.getValue(1));
10817     break;
10818   }
10819   case ISD::VAARG: {
10820     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10821       return;
10822 
10823     EVT VT = N->getValueType(0);
10824 
10825     if (VT == MVT::i64) {
10826       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10827 
10828       Results.push_back(NewNode);
10829       Results.push_back(NewNode.getValue(1));
10830     }
10831     return;
10832   }
10833   case ISD::STRICT_FP_TO_SINT:
10834   case ISD::STRICT_FP_TO_UINT:
10835   case ISD::FP_TO_SINT:
10836   case ISD::FP_TO_UINT:
10837     // LowerFP_TO_INT() can only handle f32 and f64.
10838     if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
10839         MVT::ppcf128)
10840       return;
10841     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10842     return;
10843   case ISD::TRUNCATE: {
10844     if (!N->getValueType(0).isVector())
10845       return;
10846     SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
10847     if (Lowered)
10848       Results.push_back(Lowered);
10849     return;
10850   }
10851   case ISD::BITCAST:
10852     // Don't handle bitcast here.
10853     return;
10854   case ISD::FP_EXTEND:
10855     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
10856     if (Lowered)
10857       Results.push_back(Lowered);
10858     return;
10859   }
10860 }
10861 
10862 //===----------------------------------------------------------------------===//
10863 //  Other Lowering Code
10864 //===----------------------------------------------------------------------===//
10865 
10866 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10867   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10868   Function *Func = Intrinsic::getDeclaration(M, Id);
10869   return Builder.CreateCall(Func, {});
10870 }
10871 
10872 // The mappings for emitLeading/TrailingFence is taken from
10873 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10874 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10875                                                  Instruction *Inst,
10876                                                  AtomicOrdering Ord) const {
10877   if (Ord == AtomicOrdering::SequentiallyConsistent)
10878     return callIntrinsic(Builder, Intrinsic::ppc_sync);
10879   if (isReleaseOrStronger(Ord))
10880     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10881   return nullptr;
10882 }
10883 
10884 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10885                                                   Instruction *Inst,
10886                                                   AtomicOrdering Ord) const {
10887   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10888     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10889     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10890     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10891     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10892       return Builder.CreateCall(
10893           Intrinsic::getDeclaration(
10894               Builder.GetInsertBlock()->getParent()->getParent(),
10895               Intrinsic::ppc_cfence, {Inst->getType()}),
10896           {Inst});
10897     // FIXME: Can use isync for rmw operation.
10898     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10899   }
10900   return nullptr;
10901 }
10902 
10903 MachineBasicBlock *
10904 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10905                                     unsigned AtomicSize,
10906                                     unsigned BinOpcode,
10907                                     unsigned CmpOpcode,
10908                                     unsigned CmpPred) const {
10909   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10910   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10911 
10912   auto LoadMnemonic = PPC::LDARX;
10913   auto StoreMnemonic = PPC::STDCX;
10914   switch (AtomicSize) {
10915   default:
10916     llvm_unreachable("Unexpected size of atomic entity");
10917   case 1:
10918     LoadMnemonic = PPC::LBARX;
10919     StoreMnemonic = PPC::STBCX;
10920     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10921     break;
10922   case 2:
10923     LoadMnemonic = PPC::LHARX;
10924     StoreMnemonic = PPC::STHCX;
10925     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10926     break;
10927   case 4:
10928     LoadMnemonic = PPC::LWARX;
10929     StoreMnemonic = PPC::STWCX;
10930     break;
10931   case 8:
10932     LoadMnemonic = PPC::LDARX;
10933     StoreMnemonic = PPC::STDCX;
10934     break;
10935   }
10936 
10937   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10938   MachineFunction *F = BB->getParent();
10939   MachineFunction::iterator It = ++BB->getIterator();
10940 
10941   Register dest = MI.getOperand(0).getReg();
10942   Register ptrA = MI.getOperand(1).getReg();
10943   Register ptrB = MI.getOperand(2).getReg();
10944   Register incr = MI.getOperand(3).getReg();
10945   DebugLoc dl = MI.getDebugLoc();
10946 
10947   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10948   MachineBasicBlock *loop2MBB =
10949     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10950   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10951   F->insert(It, loopMBB);
10952   if (CmpOpcode)
10953     F->insert(It, loop2MBB);
10954   F->insert(It, exitMBB);
10955   exitMBB->splice(exitMBB->begin(), BB,
10956                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10957   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10958 
10959   MachineRegisterInfo &RegInfo = F->getRegInfo();
10960   Register TmpReg = (!BinOpcode) ? incr :
10961     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10962                                            : &PPC::GPRCRegClass);
10963 
10964   //  thisMBB:
10965   //   ...
10966   //   fallthrough --> loopMBB
10967   BB->addSuccessor(loopMBB);
10968 
10969   //  loopMBB:
10970   //   l[wd]arx dest, ptr
10971   //   add r0, dest, incr
10972   //   st[wd]cx. r0, ptr
10973   //   bne- loopMBB
10974   //   fallthrough --> exitMBB
10975 
10976   // For max/min...
10977   //  loopMBB:
10978   //   l[wd]arx dest, ptr
10979   //   cmpl?[wd] incr, dest
10980   //   bgt exitMBB
10981   //  loop2MBB:
10982   //   st[wd]cx. dest, ptr
10983   //   bne- loopMBB
10984   //   fallthrough --> exitMBB
10985 
10986   BB = loopMBB;
10987   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10988     .addReg(ptrA).addReg(ptrB);
10989   if (BinOpcode)
10990     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10991   if (CmpOpcode) {
10992     // Signed comparisons of byte or halfword values must be sign-extended.
10993     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10994       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10995       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10996               ExtReg).addReg(dest);
10997       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10998         .addReg(incr).addReg(ExtReg);
10999     } else
11000       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11001         .addReg(incr).addReg(dest);
11002 
11003     BuildMI(BB, dl, TII->get(PPC::BCC))
11004       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
11005     BB->addSuccessor(loop2MBB);
11006     BB->addSuccessor(exitMBB);
11007     BB = loop2MBB;
11008   }
11009   BuildMI(BB, dl, TII->get(StoreMnemonic))
11010     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
11011   BuildMI(BB, dl, TII->get(PPC::BCC))
11012     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
11013   BB->addSuccessor(loopMBB);
11014   BB->addSuccessor(exitMBB);
11015 
11016   //  exitMBB:
11017   //   ...
11018   BB = exitMBB;
11019   return BB;
11020 }
11021 
11022 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
11023     MachineInstr &MI, MachineBasicBlock *BB,
11024     bool is8bit, // operation
11025     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11026   // If we support part-word atomic mnemonics, just use them
11027   if (Subtarget.hasPartwordAtomics())
11028     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11029                             CmpPred);
11030 
11031   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11032   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11033   // In 64 bit mode we have to use 64 bits for addresses, even though the
11034   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
11035   // registers without caring whether they're 32 or 64, but here we're
11036   // doing actual arithmetic on the addresses.
11037   bool is64bit = Subtarget.isPPC64();
11038   bool isLittleEndian = Subtarget.isLittleEndian();
11039   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11040 
11041   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11042   MachineFunction *F = BB->getParent();
11043   MachineFunction::iterator It = ++BB->getIterator();
11044 
11045   Register dest = MI.getOperand(0).getReg();
11046   Register ptrA = MI.getOperand(1).getReg();
11047   Register ptrB = MI.getOperand(2).getReg();
11048   Register incr = MI.getOperand(3).getReg();
11049   DebugLoc dl = MI.getDebugLoc();
11050 
11051   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11052   MachineBasicBlock *loop2MBB =
11053       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11054   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11055   F->insert(It, loopMBB);
11056   if (CmpOpcode)
11057     F->insert(It, loop2MBB);
11058   F->insert(It, exitMBB);
11059   exitMBB->splice(exitMBB->begin(), BB,
11060                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11061   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11062 
11063   MachineRegisterInfo &RegInfo = F->getRegInfo();
11064   const TargetRegisterClass *RC =
11065       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11066   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11067 
11068   Register PtrReg = RegInfo.createVirtualRegister(RC);
11069   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11070   Register ShiftReg =
11071       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11072   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11073   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11074   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11075   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11076   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11077   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11078   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11079   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11080   Register Ptr1Reg;
11081   Register TmpReg =
11082       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11083 
11084   //  thisMBB:
11085   //   ...
11086   //   fallthrough --> loopMBB
11087   BB->addSuccessor(loopMBB);
11088 
11089   // The 4-byte load must be aligned, while a char or short may be
11090   // anywhere in the word.  Hence all this nasty bookkeeping code.
11091   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11092   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11093   //   xori shift, shift1, 24 [16]
11094   //   rlwinm ptr, ptr1, 0, 0, 29
11095   //   slw incr2, incr, shift
11096   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11097   //   slw mask, mask2, shift
11098   //  loopMBB:
11099   //   lwarx tmpDest, ptr
11100   //   add tmp, tmpDest, incr2
11101   //   andc tmp2, tmpDest, mask
11102   //   and tmp3, tmp, mask
11103   //   or tmp4, tmp3, tmp2
11104   //   stwcx. tmp4, ptr
11105   //   bne- loopMBB
11106   //   fallthrough --> exitMBB
11107   //   srw dest, tmpDest, shift
11108   if (ptrA != ZeroReg) {
11109     Ptr1Reg = RegInfo.createVirtualRegister(RC);
11110     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11111         .addReg(ptrA)
11112         .addReg(ptrB);
11113   } else {
11114     Ptr1Reg = ptrB;
11115   }
11116   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11117   // mode.
11118   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11119       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11120       .addImm(3)
11121       .addImm(27)
11122       .addImm(is8bit ? 28 : 27);
11123   if (!isLittleEndian)
11124     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11125         .addReg(Shift1Reg)
11126         .addImm(is8bit ? 24 : 16);
11127   if (is64bit)
11128     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11129         .addReg(Ptr1Reg)
11130         .addImm(0)
11131         .addImm(61);
11132   else
11133     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11134         .addReg(Ptr1Reg)
11135         .addImm(0)
11136         .addImm(0)
11137         .addImm(29);
11138   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11139   if (is8bit)
11140     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11141   else {
11142     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11143     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11144         .addReg(Mask3Reg)
11145         .addImm(65535);
11146   }
11147   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11148       .addReg(Mask2Reg)
11149       .addReg(ShiftReg);
11150 
11151   BB = loopMBB;
11152   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11153       .addReg(ZeroReg)
11154       .addReg(PtrReg);
11155   if (BinOpcode)
11156     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11157         .addReg(Incr2Reg)
11158         .addReg(TmpDestReg);
11159   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11160       .addReg(TmpDestReg)
11161       .addReg(MaskReg);
11162   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11163   if (CmpOpcode) {
11164     // For unsigned comparisons, we can directly compare the shifted values.
11165     // For signed comparisons we shift and sign extend.
11166     Register SReg = RegInfo.createVirtualRegister(GPRC);
11167     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11168         .addReg(TmpDestReg)
11169         .addReg(MaskReg);
11170     unsigned ValueReg = SReg;
11171     unsigned CmpReg = Incr2Reg;
11172     if (CmpOpcode == PPC::CMPW) {
11173       ValueReg = RegInfo.createVirtualRegister(GPRC);
11174       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11175           .addReg(SReg)
11176           .addReg(ShiftReg);
11177       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11178       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11179           .addReg(ValueReg);
11180       ValueReg = ValueSReg;
11181       CmpReg = incr;
11182     }
11183     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11184         .addReg(CmpReg)
11185         .addReg(ValueReg);
11186     BuildMI(BB, dl, TII->get(PPC::BCC))
11187         .addImm(CmpPred)
11188         .addReg(PPC::CR0)
11189         .addMBB(exitMBB);
11190     BB->addSuccessor(loop2MBB);
11191     BB->addSuccessor(exitMBB);
11192     BB = loop2MBB;
11193   }
11194   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11195   BuildMI(BB, dl, TII->get(PPC::STWCX))
11196       .addReg(Tmp4Reg)
11197       .addReg(ZeroReg)
11198       .addReg(PtrReg);
11199   BuildMI(BB, dl, TII->get(PPC::BCC))
11200       .addImm(PPC::PRED_NE)
11201       .addReg(PPC::CR0)
11202       .addMBB(loopMBB);
11203   BB->addSuccessor(loopMBB);
11204   BB->addSuccessor(exitMBB);
11205 
11206   //  exitMBB:
11207   //   ...
11208   BB = exitMBB;
11209   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11210       .addReg(TmpDestReg)
11211       .addReg(ShiftReg);
11212   return BB;
11213 }
11214 
11215 llvm::MachineBasicBlock *
11216 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11217                                     MachineBasicBlock *MBB) const {
11218   DebugLoc DL = MI.getDebugLoc();
11219   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11220   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11221 
11222   MachineFunction *MF = MBB->getParent();
11223   MachineRegisterInfo &MRI = MF->getRegInfo();
11224 
11225   const BasicBlock *BB = MBB->getBasicBlock();
11226   MachineFunction::iterator I = ++MBB->getIterator();
11227 
11228   Register DstReg = MI.getOperand(0).getReg();
11229   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11230   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11231   Register mainDstReg = MRI.createVirtualRegister(RC);
11232   Register restoreDstReg = MRI.createVirtualRegister(RC);
11233 
11234   MVT PVT = getPointerTy(MF->getDataLayout());
11235   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11236          "Invalid Pointer Size!");
11237   // For v = setjmp(buf), we generate
11238   //
11239   // thisMBB:
11240   //  SjLjSetup mainMBB
11241   //  bl mainMBB
11242   //  v_restore = 1
11243   //  b sinkMBB
11244   //
11245   // mainMBB:
11246   //  buf[LabelOffset] = LR
11247   //  v_main = 0
11248   //
11249   // sinkMBB:
11250   //  v = phi(main, restore)
11251   //
11252 
11253   MachineBasicBlock *thisMBB = MBB;
11254   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11255   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11256   MF->insert(I, mainMBB);
11257   MF->insert(I, sinkMBB);
11258 
11259   MachineInstrBuilder MIB;
11260 
11261   // Transfer the remainder of BB and its successor edges to sinkMBB.
11262   sinkMBB->splice(sinkMBB->begin(), MBB,
11263                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11264   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11265 
11266   // Note that the structure of the jmp_buf used here is not compatible
11267   // with that used by libc, and is not designed to be. Specifically, it
11268   // stores only those 'reserved' registers that LLVM does not otherwise
11269   // understand how to spill. Also, by convention, by the time this
11270   // intrinsic is called, Clang has already stored the frame address in the
11271   // first slot of the buffer and stack address in the third. Following the
11272   // X86 target code, we'll store the jump address in the second slot. We also
11273   // need to save the TOC pointer (R2) to handle jumps between shared
11274   // libraries, and that will be stored in the fourth slot. The thread
11275   // identifier (R13) is not affected.
11276 
11277   // thisMBB:
11278   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11279   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11280   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11281 
11282   // Prepare IP either in reg.
11283   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11284   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11285   Register BufReg = MI.getOperand(1).getReg();
11286 
11287   if (Subtarget.is64BitELFABI()) {
11288     setUsesTOCBasePtr(*MBB->getParent());
11289     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11290               .addReg(PPC::X2)
11291               .addImm(TOCOffset)
11292               .addReg(BufReg)
11293               .cloneMemRefs(MI);
11294   }
11295 
11296   // Naked functions never have a base pointer, and so we use r1. For all
11297   // other functions, this decision must be delayed until during PEI.
11298   unsigned BaseReg;
11299   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11300     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11301   else
11302     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11303 
11304   MIB = BuildMI(*thisMBB, MI, DL,
11305                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11306             .addReg(BaseReg)
11307             .addImm(BPOffset)
11308             .addReg(BufReg)
11309             .cloneMemRefs(MI);
11310 
11311   // Setup
11312   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11313   MIB.addRegMask(TRI->getNoPreservedMask());
11314 
11315   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11316 
11317   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11318           .addMBB(mainMBB);
11319   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11320 
11321   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11322   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11323 
11324   // mainMBB:
11325   //  mainDstReg = 0
11326   MIB =
11327       BuildMI(mainMBB, DL,
11328               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11329 
11330   // Store IP
11331   if (Subtarget.isPPC64()) {
11332     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11333             .addReg(LabelReg)
11334             .addImm(LabelOffset)
11335             .addReg(BufReg);
11336   } else {
11337     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11338             .addReg(LabelReg)
11339             .addImm(LabelOffset)
11340             .addReg(BufReg);
11341   }
11342   MIB.cloneMemRefs(MI);
11343 
11344   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11345   mainMBB->addSuccessor(sinkMBB);
11346 
11347   // sinkMBB:
11348   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11349           TII->get(PPC::PHI), DstReg)
11350     .addReg(mainDstReg).addMBB(mainMBB)
11351     .addReg(restoreDstReg).addMBB(thisMBB);
11352 
11353   MI.eraseFromParent();
11354   return sinkMBB;
11355 }
11356 
11357 MachineBasicBlock *
11358 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11359                                      MachineBasicBlock *MBB) const {
11360   DebugLoc DL = MI.getDebugLoc();
11361   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11362 
11363   MachineFunction *MF = MBB->getParent();
11364   MachineRegisterInfo &MRI = MF->getRegInfo();
11365 
11366   MVT PVT = getPointerTy(MF->getDataLayout());
11367   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11368          "Invalid Pointer Size!");
11369 
11370   const TargetRegisterClass *RC =
11371     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11372   Register Tmp = MRI.createVirtualRegister(RC);
11373   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11374   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11375   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11376   unsigned BP =
11377       (PVT == MVT::i64)
11378           ? PPC::X30
11379           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11380                                                               : PPC::R30);
11381 
11382   MachineInstrBuilder MIB;
11383 
11384   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11385   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11386   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11387   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11388 
11389   Register BufReg = MI.getOperand(0).getReg();
11390 
11391   // Reload FP (the jumped-to function may not have had a
11392   // frame pointer, and if so, then its r31 will be restored
11393   // as necessary).
11394   if (PVT == MVT::i64) {
11395     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11396             .addImm(0)
11397             .addReg(BufReg);
11398   } else {
11399     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11400             .addImm(0)
11401             .addReg(BufReg);
11402   }
11403   MIB.cloneMemRefs(MI);
11404 
11405   // Reload IP
11406   if (PVT == MVT::i64) {
11407     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11408             .addImm(LabelOffset)
11409             .addReg(BufReg);
11410   } else {
11411     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11412             .addImm(LabelOffset)
11413             .addReg(BufReg);
11414   }
11415   MIB.cloneMemRefs(MI);
11416 
11417   // Reload SP
11418   if (PVT == MVT::i64) {
11419     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11420             .addImm(SPOffset)
11421             .addReg(BufReg);
11422   } else {
11423     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11424             .addImm(SPOffset)
11425             .addReg(BufReg);
11426   }
11427   MIB.cloneMemRefs(MI);
11428 
11429   // Reload BP
11430   if (PVT == MVT::i64) {
11431     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11432             .addImm(BPOffset)
11433             .addReg(BufReg);
11434   } else {
11435     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11436             .addImm(BPOffset)
11437             .addReg(BufReg);
11438   }
11439   MIB.cloneMemRefs(MI);
11440 
11441   // Reload TOC
11442   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11443     setUsesTOCBasePtr(*MBB->getParent());
11444     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11445               .addImm(TOCOffset)
11446               .addReg(BufReg)
11447               .cloneMemRefs(MI);
11448   }
11449 
11450   // Jump
11451   BuildMI(*MBB, MI, DL,
11452           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11453   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11454 
11455   MI.eraseFromParent();
11456   return MBB;
11457 }
11458 
11459 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11460   // If the function specifically requests inline stack probes, emit them.
11461   if (MF.getFunction().hasFnAttribute("probe-stack"))
11462     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11463            "inline-asm";
11464   return false;
11465 }
11466 
11467 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11468   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11469   unsigned StackAlign = TFI->getStackAlignment();
11470   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11471          "Unexpected stack alignment");
11472   // The default stack probe size is 4096 if the function has no
11473   // stack-probe-size attribute.
11474   unsigned StackProbeSize = 4096;
11475   const Function &Fn = MF.getFunction();
11476   if (Fn.hasFnAttribute("stack-probe-size"))
11477     Fn.getFnAttribute("stack-probe-size")
11478         .getValueAsString()
11479         .getAsInteger(0, StackProbeSize);
11480   // Round down to the stack alignment.
11481   StackProbeSize &= ~(StackAlign - 1);
11482   return StackProbeSize ? StackProbeSize : StackAlign;
11483 }
11484 
11485 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11486 // into three phases. In the first phase, it uses pseudo instruction
11487 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11488 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11489 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11490 // MaxCallFrameSize so that it can calculate correct data area pointer.
11491 MachineBasicBlock *
11492 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11493                                     MachineBasicBlock *MBB) const {
11494   const bool isPPC64 = Subtarget.isPPC64();
11495   MachineFunction *MF = MBB->getParent();
11496   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11497   DebugLoc DL = MI.getDebugLoc();
11498   const unsigned ProbeSize = getStackProbeSize(*MF);
11499   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11500   MachineRegisterInfo &MRI = MF->getRegInfo();
11501   // The CFG of probing stack looks as
11502   //         +-----+
11503   //         | MBB |
11504   //         +--+--+
11505   //            |
11506   //       +----v----+
11507   //  +--->+ TestMBB +---+
11508   //  |    +----+----+   |
11509   //  |         |        |
11510   //  |   +-----v----+   |
11511   //  +---+ BlockMBB |   |
11512   //      +----------+   |
11513   //                     |
11514   //       +---------+   |
11515   //       | TailMBB +<--+
11516   //       +---------+
11517   // In MBB, calculate previous frame pointer and final stack pointer.
11518   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11519   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11520   // TailMBB is spliced via \p MI.
11521   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11522   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11523   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11524 
11525   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11526   MF->insert(MBBIter, TestMBB);
11527   MF->insert(MBBIter, BlockMBB);
11528   MF->insert(MBBIter, TailMBB);
11529 
11530   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11531   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11532 
11533   Register DstReg = MI.getOperand(0).getReg();
11534   Register NegSizeReg = MI.getOperand(1).getReg();
11535   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11536   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11537   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11538   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11539 
11540   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11541   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11542   // NegSize.
11543   unsigned ProbeOpc;
11544   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11545     ProbeOpc =
11546         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11547   else
11548     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11549     // and NegSizeReg will be allocated in the same phyreg to avoid
11550     // redundant copy when NegSizeReg has only one use which is current MI and
11551     // will be replaced by PREPARE_PROBED_ALLOCA then.
11552     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11553                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11554   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11555       .addDef(ActualNegSizeReg)
11556       .addReg(NegSizeReg)
11557       .add(MI.getOperand(2))
11558       .add(MI.getOperand(3));
11559 
11560   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11561   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11562           FinalStackPtr)
11563       .addReg(SPReg)
11564       .addReg(ActualNegSizeReg);
11565 
11566   // Materialize a scratch register for update.
11567   int64_t NegProbeSize = -(int64_t)ProbeSize;
11568   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11569   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11570   if (!isInt<16>(NegProbeSize)) {
11571     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11572     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11573         .addImm(NegProbeSize >> 16);
11574     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11575             ScratchReg)
11576         .addReg(TempReg)
11577         .addImm(NegProbeSize & 0xFFFF);
11578   } else
11579     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11580         .addImm(NegProbeSize);
11581 
11582   {
11583     // Probing leading residual part.
11584     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11585     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11586         .addReg(ActualNegSizeReg)
11587         .addReg(ScratchReg);
11588     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11589     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11590         .addReg(Div)
11591         .addReg(ScratchReg);
11592     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11593     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11594         .addReg(Mul)
11595         .addReg(ActualNegSizeReg);
11596     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11597         .addReg(FramePointer)
11598         .addReg(SPReg)
11599         .addReg(NegMod);
11600   }
11601 
11602   {
11603     // Remaining part should be multiple of ProbeSize.
11604     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11605     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11606         .addReg(SPReg)
11607         .addReg(FinalStackPtr);
11608     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11609         .addImm(PPC::PRED_EQ)
11610         .addReg(CmpResult)
11611         .addMBB(TailMBB);
11612     TestMBB->addSuccessor(BlockMBB);
11613     TestMBB->addSuccessor(TailMBB);
11614   }
11615 
11616   {
11617     // Touch the block.
11618     // |P...|P...|P...
11619     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11620         .addReg(FramePointer)
11621         .addReg(SPReg)
11622         .addReg(ScratchReg);
11623     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11624     BlockMBB->addSuccessor(TestMBB);
11625   }
11626 
11627   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11628   // DYNAREAOFFSET pseudo instruction to get the future result.
11629   Register MaxCallFrameSizeReg =
11630       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11631   BuildMI(TailMBB, DL,
11632           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11633           MaxCallFrameSizeReg)
11634       .add(MI.getOperand(2))
11635       .add(MI.getOperand(3));
11636   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11637       .addReg(SPReg)
11638       .addReg(MaxCallFrameSizeReg);
11639 
11640   // Splice instructions after MI to TailMBB.
11641   TailMBB->splice(TailMBB->end(), MBB,
11642                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11643   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11644   MBB->addSuccessor(TestMBB);
11645 
11646   // Delete the pseudo instruction.
11647   MI.eraseFromParent();
11648 
11649   ++NumDynamicAllocaProbed;
11650   return TailMBB;
11651 }
11652 
11653 MachineBasicBlock *
11654 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11655                                                MachineBasicBlock *BB) const {
11656   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11657       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11658     if (Subtarget.is64BitELFABI() &&
11659         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11660         !Subtarget.isUsingPCRelativeCalls()) {
11661       // Call lowering should have added an r2 operand to indicate a dependence
11662       // on the TOC base pointer value. It can't however, because there is no
11663       // way to mark the dependence as implicit there, and so the stackmap code
11664       // will confuse it with a regular operand. Instead, add the dependence
11665       // here.
11666       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11667     }
11668 
11669     return emitPatchPoint(MI, BB);
11670   }
11671 
11672   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11673       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11674     return emitEHSjLjSetJmp(MI, BB);
11675   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11676              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11677     return emitEHSjLjLongJmp(MI, BB);
11678   }
11679 
11680   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11681 
11682   // To "insert" these instructions we actually have to insert their
11683   // control-flow patterns.
11684   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11685   MachineFunction::iterator It = ++BB->getIterator();
11686 
11687   MachineFunction *F = BB->getParent();
11688 
11689   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11690       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11691       MI.getOpcode() == PPC::SELECT_I8) {
11692     SmallVector<MachineOperand, 2> Cond;
11693     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11694         MI.getOpcode() == PPC::SELECT_CC_I8)
11695       Cond.push_back(MI.getOperand(4));
11696     else
11697       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11698     Cond.push_back(MI.getOperand(1));
11699 
11700     DebugLoc dl = MI.getDebugLoc();
11701     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11702                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11703   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11704              MI.getOpcode() == PPC::SELECT_CC_F8 ||
11705              MI.getOpcode() == PPC::SELECT_CC_F16 ||
11706              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11707              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11708              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11709              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11710              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11711              MI.getOpcode() == PPC::SELECT_CC_SPE ||
11712              MI.getOpcode() == PPC::SELECT_F4 ||
11713              MI.getOpcode() == PPC::SELECT_F8 ||
11714              MI.getOpcode() == PPC::SELECT_F16 ||
11715              MI.getOpcode() == PPC::SELECT_SPE ||
11716              MI.getOpcode() == PPC::SELECT_SPE4 ||
11717              MI.getOpcode() == PPC::SELECT_VRRC ||
11718              MI.getOpcode() == PPC::SELECT_VSFRC ||
11719              MI.getOpcode() == PPC::SELECT_VSSRC ||
11720              MI.getOpcode() == PPC::SELECT_VSRC) {
11721     // The incoming instruction knows the destination vreg to set, the
11722     // condition code register to branch on, the true/false values to
11723     // select between, and a branch opcode to use.
11724 
11725     //  thisMBB:
11726     //  ...
11727     //   TrueVal = ...
11728     //   cmpTY ccX, r1, r2
11729     //   bCC copy1MBB
11730     //   fallthrough --> copy0MBB
11731     MachineBasicBlock *thisMBB = BB;
11732     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11733     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11734     DebugLoc dl = MI.getDebugLoc();
11735     F->insert(It, copy0MBB);
11736     F->insert(It, sinkMBB);
11737 
11738     // Transfer the remainder of BB and its successor edges to sinkMBB.
11739     sinkMBB->splice(sinkMBB->begin(), BB,
11740                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11741     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11742 
11743     // Next, add the true and fallthrough blocks as its successors.
11744     BB->addSuccessor(copy0MBB);
11745     BB->addSuccessor(sinkMBB);
11746 
11747     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11748         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11749         MI.getOpcode() == PPC::SELECT_F16 ||
11750         MI.getOpcode() == PPC::SELECT_SPE4 ||
11751         MI.getOpcode() == PPC::SELECT_SPE ||
11752         MI.getOpcode() == PPC::SELECT_VRRC ||
11753         MI.getOpcode() == PPC::SELECT_VSFRC ||
11754         MI.getOpcode() == PPC::SELECT_VSSRC ||
11755         MI.getOpcode() == PPC::SELECT_VSRC) {
11756       BuildMI(BB, dl, TII->get(PPC::BC))
11757           .addReg(MI.getOperand(1).getReg())
11758           .addMBB(sinkMBB);
11759     } else {
11760       unsigned SelectPred = MI.getOperand(4).getImm();
11761       BuildMI(BB, dl, TII->get(PPC::BCC))
11762           .addImm(SelectPred)
11763           .addReg(MI.getOperand(1).getReg())
11764           .addMBB(sinkMBB);
11765     }
11766 
11767     //  copy0MBB:
11768     //   %FalseValue = ...
11769     //   # fallthrough to sinkMBB
11770     BB = copy0MBB;
11771 
11772     // Update machine-CFG edges
11773     BB->addSuccessor(sinkMBB);
11774 
11775     //  sinkMBB:
11776     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11777     //  ...
11778     BB = sinkMBB;
11779     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11780         .addReg(MI.getOperand(3).getReg())
11781         .addMBB(copy0MBB)
11782         .addReg(MI.getOperand(2).getReg())
11783         .addMBB(thisMBB);
11784   } else if (MI.getOpcode() == PPC::ReadTB) {
11785     // To read the 64-bit time-base register on a 32-bit target, we read the
11786     // two halves. Should the counter have wrapped while it was being read, we
11787     // need to try again.
11788     // ...
11789     // readLoop:
11790     // mfspr Rx,TBU # load from TBU
11791     // mfspr Ry,TB  # load from TB
11792     // mfspr Rz,TBU # load from TBU
11793     // cmpw crX,Rx,Rz # check if 'old'='new'
11794     // bne readLoop   # branch if they're not equal
11795     // ...
11796 
11797     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11798     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11799     DebugLoc dl = MI.getDebugLoc();
11800     F->insert(It, readMBB);
11801     F->insert(It, sinkMBB);
11802 
11803     // Transfer the remainder of BB and its successor edges to sinkMBB.
11804     sinkMBB->splice(sinkMBB->begin(), BB,
11805                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11806     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11807 
11808     BB->addSuccessor(readMBB);
11809     BB = readMBB;
11810 
11811     MachineRegisterInfo &RegInfo = F->getRegInfo();
11812     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11813     Register LoReg = MI.getOperand(0).getReg();
11814     Register HiReg = MI.getOperand(1).getReg();
11815 
11816     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11817     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11818     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11819 
11820     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11821 
11822     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11823         .addReg(HiReg)
11824         .addReg(ReadAgainReg);
11825     BuildMI(BB, dl, TII->get(PPC::BCC))
11826         .addImm(PPC::PRED_NE)
11827         .addReg(CmpReg)
11828         .addMBB(readMBB);
11829 
11830     BB->addSuccessor(readMBB);
11831     BB->addSuccessor(sinkMBB);
11832   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11833     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11834   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11835     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11836   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11837     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11838   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11839     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11840 
11841   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11842     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11843   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11844     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11845   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11846     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11847   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11848     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11849 
11850   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11851     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11852   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11853     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11854   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11855     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11856   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11857     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11858 
11859   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11860     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11861   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11862     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11863   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11864     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11865   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11866     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11867 
11868   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11869     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11870   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11871     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11872   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11873     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11874   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11875     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11876 
11877   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11878     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11879   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11880     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11881   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11882     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11883   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11884     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11885 
11886   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11887     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11888   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11889     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11890   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11891     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11892   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11893     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11894 
11895   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11896     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11897   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11898     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11899   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11900     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11901   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11902     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11903 
11904   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11905     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11906   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11907     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11908   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11909     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
11910   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11911     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
11912 
11913   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11914     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
11915   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11916     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
11917   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11918     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
11919   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11920     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
11921 
11922   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11923     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
11924   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11925     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
11926   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11927     BB = EmitAtomicBinary(MI, BB, 4, 0);
11928   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11929     BB = EmitAtomicBinary(MI, BB, 8, 0);
11930   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11931            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11932            (Subtarget.hasPartwordAtomics() &&
11933             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11934            (Subtarget.hasPartwordAtomics() &&
11935             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11936     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11937 
11938     auto LoadMnemonic = PPC::LDARX;
11939     auto StoreMnemonic = PPC::STDCX;
11940     switch (MI.getOpcode()) {
11941     default:
11942       llvm_unreachable("Compare and swap of unknown size");
11943     case PPC::ATOMIC_CMP_SWAP_I8:
11944       LoadMnemonic = PPC::LBARX;
11945       StoreMnemonic = PPC::STBCX;
11946       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11947       break;
11948     case PPC::ATOMIC_CMP_SWAP_I16:
11949       LoadMnemonic = PPC::LHARX;
11950       StoreMnemonic = PPC::STHCX;
11951       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11952       break;
11953     case PPC::ATOMIC_CMP_SWAP_I32:
11954       LoadMnemonic = PPC::LWARX;
11955       StoreMnemonic = PPC::STWCX;
11956       break;
11957     case PPC::ATOMIC_CMP_SWAP_I64:
11958       LoadMnemonic = PPC::LDARX;
11959       StoreMnemonic = PPC::STDCX;
11960       break;
11961     }
11962     Register dest = MI.getOperand(0).getReg();
11963     Register ptrA = MI.getOperand(1).getReg();
11964     Register ptrB = MI.getOperand(2).getReg();
11965     Register oldval = MI.getOperand(3).getReg();
11966     Register newval = MI.getOperand(4).getReg();
11967     DebugLoc dl = MI.getDebugLoc();
11968 
11969     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11970     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11971     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11972     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11973     F->insert(It, loop1MBB);
11974     F->insert(It, loop2MBB);
11975     F->insert(It, midMBB);
11976     F->insert(It, exitMBB);
11977     exitMBB->splice(exitMBB->begin(), BB,
11978                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11979     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11980 
11981     //  thisMBB:
11982     //   ...
11983     //   fallthrough --> loopMBB
11984     BB->addSuccessor(loop1MBB);
11985 
11986     // loop1MBB:
11987     //   l[bhwd]arx dest, ptr
11988     //   cmp[wd] dest, oldval
11989     //   bne- midMBB
11990     // loop2MBB:
11991     //   st[bhwd]cx. newval, ptr
11992     //   bne- loopMBB
11993     //   b exitBB
11994     // midMBB:
11995     //   st[bhwd]cx. dest, ptr
11996     // exitBB:
11997     BB = loop1MBB;
11998     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11999     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
12000         .addReg(oldval)
12001         .addReg(dest);
12002     BuildMI(BB, dl, TII->get(PPC::BCC))
12003         .addImm(PPC::PRED_NE)
12004         .addReg(PPC::CR0)
12005         .addMBB(midMBB);
12006     BB->addSuccessor(loop2MBB);
12007     BB->addSuccessor(midMBB);
12008 
12009     BB = loop2MBB;
12010     BuildMI(BB, dl, TII->get(StoreMnemonic))
12011         .addReg(newval)
12012         .addReg(ptrA)
12013         .addReg(ptrB);
12014     BuildMI(BB, dl, TII->get(PPC::BCC))
12015         .addImm(PPC::PRED_NE)
12016         .addReg(PPC::CR0)
12017         .addMBB(loop1MBB);
12018     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12019     BB->addSuccessor(loop1MBB);
12020     BB->addSuccessor(exitMBB);
12021 
12022     BB = midMBB;
12023     BuildMI(BB, dl, TII->get(StoreMnemonic))
12024         .addReg(dest)
12025         .addReg(ptrA)
12026         .addReg(ptrB);
12027     BB->addSuccessor(exitMBB);
12028 
12029     //  exitMBB:
12030     //   ...
12031     BB = exitMBB;
12032   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12033              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12034     // We must use 64-bit registers for addresses when targeting 64-bit,
12035     // since we're actually doing arithmetic on them.  Other registers
12036     // can be 32-bit.
12037     bool is64bit = Subtarget.isPPC64();
12038     bool isLittleEndian = Subtarget.isLittleEndian();
12039     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12040 
12041     Register dest = MI.getOperand(0).getReg();
12042     Register ptrA = MI.getOperand(1).getReg();
12043     Register ptrB = MI.getOperand(2).getReg();
12044     Register oldval = MI.getOperand(3).getReg();
12045     Register newval = MI.getOperand(4).getReg();
12046     DebugLoc dl = MI.getDebugLoc();
12047 
12048     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12049     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12050     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12051     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12052     F->insert(It, loop1MBB);
12053     F->insert(It, loop2MBB);
12054     F->insert(It, midMBB);
12055     F->insert(It, exitMBB);
12056     exitMBB->splice(exitMBB->begin(), BB,
12057                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12058     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12059 
12060     MachineRegisterInfo &RegInfo = F->getRegInfo();
12061     const TargetRegisterClass *RC =
12062         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12063     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12064 
12065     Register PtrReg = RegInfo.createVirtualRegister(RC);
12066     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12067     Register ShiftReg =
12068         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12069     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12070     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12071     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12072     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12073     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12074     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12075     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12076     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12077     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12078     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12079     Register Ptr1Reg;
12080     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12081     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12082     //  thisMBB:
12083     //   ...
12084     //   fallthrough --> loopMBB
12085     BB->addSuccessor(loop1MBB);
12086 
12087     // The 4-byte load must be aligned, while a char or short may be
12088     // anywhere in the word.  Hence all this nasty bookkeeping code.
12089     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
12090     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12091     //   xori shift, shift1, 24 [16]
12092     //   rlwinm ptr, ptr1, 0, 0, 29
12093     //   slw newval2, newval, shift
12094     //   slw oldval2, oldval,shift
12095     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12096     //   slw mask, mask2, shift
12097     //   and newval3, newval2, mask
12098     //   and oldval3, oldval2, mask
12099     // loop1MBB:
12100     //   lwarx tmpDest, ptr
12101     //   and tmp, tmpDest, mask
12102     //   cmpw tmp, oldval3
12103     //   bne- midMBB
12104     // loop2MBB:
12105     //   andc tmp2, tmpDest, mask
12106     //   or tmp4, tmp2, newval3
12107     //   stwcx. tmp4, ptr
12108     //   bne- loop1MBB
12109     //   b exitBB
12110     // midMBB:
12111     //   stwcx. tmpDest, ptr
12112     // exitBB:
12113     //   srw dest, tmpDest, shift
12114     if (ptrA != ZeroReg) {
12115       Ptr1Reg = RegInfo.createVirtualRegister(RC);
12116       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12117           .addReg(ptrA)
12118           .addReg(ptrB);
12119     } else {
12120       Ptr1Reg = ptrB;
12121     }
12122 
12123     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12124     // mode.
12125     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12126         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12127         .addImm(3)
12128         .addImm(27)
12129         .addImm(is8bit ? 28 : 27);
12130     if (!isLittleEndian)
12131       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12132           .addReg(Shift1Reg)
12133           .addImm(is8bit ? 24 : 16);
12134     if (is64bit)
12135       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12136           .addReg(Ptr1Reg)
12137           .addImm(0)
12138           .addImm(61);
12139     else
12140       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12141           .addReg(Ptr1Reg)
12142           .addImm(0)
12143           .addImm(0)
12144           .addImm(29);
12145     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12146         .addReg(newval)
12147         .addReg(ShiftReg);
12148     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12149         .addReg(oldval)
12150         .addReg(ShiftReg);
12151     if (is8bit)
12152       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12153     else {
12154       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12155       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12156           .addReg(Mask3Reg)
12157           .addImm(65535);
12158     }
12159     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12160         .addReg(Mask2Reg)
12161         .addReg(ShiftReg);
12162     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12163         .addReg(NewVal2Reg)
12164         .addReg(MaskReg);
12165     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12166         .addReg(OldVal2Reg)
12167         .addReg(MaskReg);
12168 
12169     BB = loop1MBB;
12170     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12171         .addReg(ZeroReg)
12172         .addReg(PtrReg);
12173     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12174         .addReg(TmpDestReg)
12175         .addReg(MaskReg);
12176     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12177         .addReg(TmpReg)
12178         .addReg(OldVal3Reg);
12179     BuildMI(BB, dl, TII->get(PPC::BCC))
12180         .addImm(PPC::PRED_NE)
12181         .addReg(PPC::CR0)
12182         .addMBB(midMBB);
12183     BB->addSuccessor(loop2MBB);
12184     BB->addSuccessor(midMBB);
12185 
12186     BB = loop2MBB;
12187     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12188         .addReg(TmpDestReg)
12189         .addReg(MaskReg);
12190     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12191         .addReg(Tmp2Reg)
12192         .addReg(NewVal3Reg);
12193     BuildMI(BB, dl, TII->get(PPC::STWCX))
12194         .addReg(Tmp4Reg)
12195         .addReg(ZeroReg)
12196         .addReg(PtrReg);
12197     BuildMI(BB, dl, TII->get(PPC::BCC))
12198         .addImm(PPC::PRED_NE)
12199         .addReg(PPC::CR0)
12200         .addMBB(loop1MBB);
12201     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12202     BB->addSuccessor(loop1MBB);
12203     BB->addSuccessor(exitMBB);
12204 
12205     BB = midMBB;
12206     BuildMI(BB, dl, TII->get(PPC::STWCX))
12207         .addReg(TmpDestReg)
12208         .addReg(ZeroReg)
12209         .addReg(PtrReg);
12210     BB->addSuccessor(exitMBB);
12211 
12212     //  exitMBB:
12213     //   ...
12214     BB = exitMBB;
12215     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12216         .addReg(TmpReg)
12217         .addReg(ShiftReg);
12218   } else if (MI.getOpcode() == PPC::FADDrtz) {
12219     // This pseudo performs an FADD with rounding mode temporarily forced
12220     // to round-to-zero.  We emit this via custom inserter since the FPSCR
12221     // is not modeled at the SelectionDAG level.
12222     Register Dest = MI.getOperand(0).getReg();
12223     Register Src1 = MI.getOperand(1).getReg();
12224     Register Src2 = MI.getOperand(2).getReg();
12225     DebugLoc dl = MI.getDebugLoc();
12226 
12227     MachineRegisterInfo &RegInfo = F->getRegInfo();
12228     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12229 
12230     // Save FPSCR value.
12231     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12232 
12233     // Set rounding mode to round-to-zero.
12234     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
12235         .addImm(31)
12236         .addReg(PPC::RM, RegState::ImplicitDefine);
12237 
12238     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
12239         .addImm(30)
12240         .addReg(PPC::RM, RegState::ImplicitDefine);
12241 
12242     // Perform addition.
12243     auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
12244                    .addReg(Src1)
12245                    .addReg(Src2);
12246     if (MI.getFlag(MachineInstr::NoFPExcept))
12247       MIB.setMIFlag(MachineInstr::NoFPExcept);
12248 
12249     // Restore FPSCR value.
12250     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12251   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12252              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12253              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12254              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12255     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12256                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12257                           ? PPC::ANDI8_rec
12258                           : PPC::ANDI_rec;
12259     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12260                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12261 
12262     MachineRegisterInfo &RegInfo = F->getRegInfo();
12263     Register Dest = RegInfo.createVirtualRegister(
12264         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12265 
12266     DebugLoc Dl = MI.getDebugLoc();
12267     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12268         .addReg(MI.getOperand(1).getReg())
12269         .addImm(1);
12270     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12271             MI.getOperand(0).getReg())
12272         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12273   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12274     DebugLoc Dl = MI.getDebugLoc();
12275     MachineRegisterInfo &RegInfo = F->getRegInfo();
12276     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12277     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12278     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12279             MI.getOperand(0).getReg())
12280         .addReg(CRReg);
12281   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12282     DebugLoc Dl = MI.getDebugLoc();
12283     unsigned Imm = MI.getOperand(1).getImm();
12284     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12285     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12286             MI.getOperand(0).getReg())
12287         .addReg(PPC::CR0EQ);
12288   } else if (MI.getOpcode() == PPC::SETRNDi) {
12289     DebugLoc dl = MI.getDebugLoc();
12290     Register OldFPSCRReg = MI.getOperand(0).getReg();
12291 
12292     // Save FPSCR value.
12293     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12294 
12295     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12296     // the following settings:
12297     //   00 Round to nearest
12298     //   01 Round to 0
12299     //   10 Round to +inf
12300     //   11 Round to -inf
12301 
12302     // When the operand is immediate, using the two least significant bits of
12303     // the immediate to set the bits 62:63 of FPSCR.
12304     unsigned Mode = MI.getOperand(1).getImm();
12305     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12306         .addImm(31)
12307         .addReg(PPC::RM, RegState::ImplicitDefine);
12308 
12309     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12310         .addImm(30)
12311         .addReg(PPC::RM, RegState::ImplicitDefine);
12312   } else if (MI.getOpcode() == PPC::SETRND) {
12313     DebugLoc dl = MI.getDebugLoc();
12314 
12315     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12316     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12317     // If the target doesn't have DirectMove, we should use stack to do the
12318     // conversion, because the target doesn't have the instructions like mtvsrd
12319     // or mfvsrd to do this conversion directly.
12320     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12321       if (Subtarget.hasDirectMove()) {
12322         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12323           .addReg(SrcReg);
12324       } else {
12325         // Use stack to do the register copy.
12326         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12327         MachineRegisterInfo &RegInfo = F->getRegInfo();
12328         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12329         if (RC == &PPC::F8RCRegClass) {
12330           // Copy register from F8RCRegClass to G8RCRegclass.
12331           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12332                  "Unsupported RegClass.");
12333 
12334           StoreOp = PPC::STFD;
12335           LoadOp = PPC::LD;
12336         } else {
12337           // Copy register from G8RCRegClass to F8RCRegclass.
12338           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12339                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12340                  "Unsupported RegClass.");
12341         }
12342 
12343         MachineFrameInfo &MFI = F->getFrameInfo();
12344         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12345 
12346         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12347             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12348             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12349             MFI.getObjectAlign(FrameIdx));
12350 
12351         // Store the SrcReg into the stack.
12352         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12353           .addReg(SrcReg)
12354           .addImm(0)
12355           .addFrameIndex(FrameIdx)
12356           .addMemOperand(MMOStore);
12357 
12358         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12359             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12360             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12361             MFI.getObjectAlign(FrameIdx));
12362 
12363         // Load from the stack where SrcReg is stored, and save to DestReg,
12364         // so we have done the RegClass conversion from RegClass::SrcReg to
12365         // RegClass::DestReg.
12366         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12367           .addImm(0)
12368           .addFrameIndex(FrameIdx)
12369           .addMemOperand(MMOLoad);
12370       }
12371     };
12372 
12373     Register OldFPSCRReg = MI.getOperand(0).getReg();
12374 
12375     // Save FPSCR value.
12376     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12377 
12378     // When the operand is gprc register, use two least significant bits of the
12379     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12380     //
12381     // copy OldFPSCRTmpReg, OldFPSCRReg
12382     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12383     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12384     // copy NewFPSCRReg, NewFPSCRTmpReg
12385     // mtfsf 255, NewFPSCRReg
12386     MachineOperand SrcOp = MI.getOperand(1);
12387     MachineRegisterInfo &RegInfo = F->getRegInfo();
12388     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12389 
12390     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12391 
12392     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12393     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12394 
12395     // The first operand of INSERT_SUBREG should be a register which has
12396     // subregisters, we only care about its RegClass, so we should use an
12397     // IMPLICIT_DEF register.
12398     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12399     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12400       .addReg(ImDefReg)
12401       .add(SrcOp)
12402       .addImm(1);
12403 
12404     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12405     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12406       .addReg(OldFPSCRTmpReg)
12407       .addReg(ExtSrcReg)
12408       .addImm(0)
12409       .addImm(62);
12410 
12411     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12412     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12413 
12414     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12415     // bits of FPSCR.
12416     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12417       .addImm(255)
12418       .addReg(NewFPSCRReg)
12419       .addImm(0)
12420       .addImm(0);
12421   } else if (MI.getOpcode() == PPC::SETFLM) {
12422     DebugLoc Dl = MI.getDebugLoc();
12423 
12424     // Result of setflm is previous FPSCR content, so we need to save it first.
12425     Register OldFPSCRReg = MI.getOperand(0).getReg();
12426     BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
12427 
12428     // Put bits in 32:63 to FPSCR.
12429     Register NewFPSCRReg = MI.getOperand(1).getReg();
12430     BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
12431         .addImm(255)
12432         .addReg(NewFPSCRReg)
12433         .addImm(0)
12434         .addImm(0);
12435   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12436              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12437     return emitProbedAlloca(MI, BB);
12438   } else {
12439     llvm_unreachable("Unexpected instr type to insert");
12440   }
12441 
12442   MI.eraseFromParent(); // The pseudo instruction is gone now.
12443   return BB;
12444 }
12445 
12446 //===----------------------------------------------------------------------===//
12447 // Target Optimization Hooks
12448 //===----------------------------------------------------------------------===//
12449 
12450 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12451   // For the estimates, convergence is quadratic, so we essentially double the
12452   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12453   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12454   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12455   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12456   if (VT.getScalarType() == MVT::f64)
12457     RefinementSteps++;
12458   return RefinementSteps;
12459 }
12460 
12461 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12462                                            int Enabled, int &RefinementSteps,
12463                                            bool &UseOneConstNR,
12464                                            bool Reciprocal) const {
12465   EVT VT = Operand.getValueType();
12466   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12467       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12468       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12469       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12470     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12471       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12472 
12473     // The Newton-Raphson computation with a single constant does not provide
12474     // enough accuracy on some CPUs.
12475     UseOneConstNR = !Subtarget.needsTwoConstNR();
12476     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12477   }
12478   return SDValue();
12479 }
12480 
12481 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12482                                             int Enabled,
12483                                             int &RefinementSteps) const {
12484   EVT VT = Operand.getValueType();
12485   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12486       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12487       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12488       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12489     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12490       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12491     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12492   }
12493   return SDValue();
12494 }
12495 
12496 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12497   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12498   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12499   // enabled for division), this functionality is redundant with the default
12500   // combiner logic (once the division -> reciprocal/multiply transformation
12501   // has taken place). As a result, this matters more for older cores than for
12502   // newer ones.
12503 
12504   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12505   // reciprocal if there are two or more FDIVs (for embedded cores with only
12506   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12507   switch (Subtarget.getCPUDirective()) {
12508   default:
12509     return 3;
12510   case PPC::DIR_440:
12511   case PPC::DIR_A2:
12512   case PPC::DIR_E500:
12513   case PPC::DIR_E500mc:
12514   case PPC::DIR_E5500:
12515     return 2;
12516   }
12517 }
12518 
12519 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12520 // collapsed, and so we need to look through chains of them.
12521 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12522                                      int64_t& Offset, SelectionDAG &DAG) {
12523   if (DAG.isBaseWithConstantOffset(Loc)) {
12524     Base = Loc.getOperand(0);
12525     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12526 
12527     // The base might itself be a base plus an offset, and if so, accumulate
12528     // that as well.
12529     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12530   }
12531 }
12532 
12533 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12534                             unsigned Bytes, int Dist,
12535                             SelectionDAG &DAG) {
12536   if (VT.getSizeInBits() / 8 != Bytes)
12537     return false;
12538 
12539   SDValue BaseLoc = Base->getBasePtr();
12540   if (Loc.getOpcode() == ISD::FrameIndex) {
12541     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12542       return false;
12543     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12544     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12545     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12546     int FS  = MFI.getObjectSize(FI);
12547     int BFS = MFI.getObjectSize(BFI);
12548     if (FS != BFS || FS != (int)Bytes) return false;
12549     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12550   }
12551 
12552   SDValue Base1 = Loc, Base2 = BaseLoc;
12553   int64_t Offset1 = 0, Offset2 = 0;
12554   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12555   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12556   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12557     return true;
12558 
12559   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12560   const GlobalValue *GV1 = nullptr;
12561   const GlobalValue *GV2 = nullptr;
12562   Offset1 = 0;
12563   Offset2 = 0;
12564   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12565   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12566   if (isGA1 && isGA2 && GV1 == GV2)
12567     return Offset1 == (Offset2 + Dist*Bytes);
12568   return false;
12569 }
12570 
12571 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12572 // not enforce equality of the chain operands.
12573 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12574                             unsigned Bytes, int Dist,
12575                             SelectionDAG &DAG) {
12576   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12577     EVT VT = LS->getMemoryVT();
12578     SDValue Loc = LS->getBasePtr();
12579     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12580   }
12581 
12582   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12583     EVT VT;
12584     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12585     default: return false;
12586     case Intrinsic::ppc_altivec_lvx:
12587     case Intrinsic::ppc_altivec_lvxl:
12588     case Intrinsic::ppc_vsx_lxvw4x:
12589     case Intrinsic::ppc_vsx_lxvw4x_be:
12590       VT = MVT::v4i32;
12591       break;
12592     case Intrinsic::ppc_vsx_lxvd2x:
12593     case Intrinsic::ppc_vsx_lxvd2x_be:
12594       VT = MVT::v2f64;
12595       break;
12596     case Intrinsic::ppc_altivec_lvebx:
12597       VT = MVT::i8;
12598       break;
12599     case Intrinsic::ppc_altivec_lvehx:
12600       VT = MVT::i16;
12601       break;
12602     case Intrinsic::ppc_altivec_lvewx:
12603       VT = MVT::i32;
12604       break;
12605     }
12606 
12607     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12608   }
12609 
12610   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12611     EVT VT;
12612     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12613     default: return false;
12614     case Intrinsic::ppc_altivec_stvx:
12615     case Intrinsic::ppc_altivec_stvxl:
12616     case Intrinsic::ppc_vsx_stxvw4x:
12617       VT = MVT::v4i32;
12618       break;
12619     case Intrinsic::ppc_vsx_stxvd2x:
12620       VT = MVT::v2f64;
12621       break;
12622     case Intrinsic::ppc_vsx_stxvw4x_be:
12623       VT = MVT::v4i32;
12624       break;
12625     case Intrinsic::ppc_vsx_stxvd2x_be:
12626       VT = MVT::v2f64;
12627       break;
12628     case Intrinsic::ppc_altivec_stvebx:
12629       VT = MVT::i8;
12630       break;
12631     case Intrinsic::ppc_altivec_stvehx:
12632       VT = MVT::i16;
12633       break;
12634     case Intrinsic::ppc_altivec_stvewx:
12635       VT = MVT::i32;
12636       break;
12637     }
12638 
12639     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12640   }
12641 
12642   return false;
12643 }
12644 
12645 // Return true is there is a nearyby consecutive load to the one provided
12646 // (regardless of alignment). We search up and down the chain, looking though
12647 // token factors and other loads (but nothing else). As a result, a true result
12648 // indicates that it is safe to create a new consecutive load adjacent to the
12649 // load provided.
12650 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12651   SDValue Chain = LD->getChain();
12652   EVT VT = LD->getMemoryVT();
12653 
12654   SmallSet<SDNode *, 16> LoadRoots;
12655   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12656   SmallSet<SDNode *, 16> Visited;
12657 
12658   // First, search up the chain, branching to follow all token-factor operands.
12659   // If we find a consecutive load, then we're done, otherwise, record all
12660   // nodes just above the top-level loads and token factors.
12661   while (!Queue.empty()) {
12662     SDNode *ChainNext = Queue.pop_back_val();
12663     if (!Visited.insert(ChainNext).second)
12664       continue;
12665 
12666     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12667       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12668         return true;
12669 
12670       if (!Visited.count(ChainLD->getChain().getNode()))
12671         Queue.push_back(ChainLD->getChain().getNode());
12672     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12673       for (const SDUse &O : ChainNext->ops())
12674         if (!Visited.count(O.getNode()))
12675           Queue.push_back(O.getNode());
12676     } else
12677       LoadRoots.insert(ChainNext);
12678   }
12679 
12680   // Second, search down the chain, starting from the top-level nodes recorded
12681   // in the first phase. These top-level nodes are the nodes just above all
12682   // loads and token factors. Starting with their uses, recursively look though
12683   // all loads (just the chain uses) and token factors to find a consecutive
12684   // load.
12685   Visited.clear();
12686   Queue.clear();
12687 
12688   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12689        IE = LoadRoots.end(); I != IE; ++I) {
12690     Queue.push_back(*I);
12691 
12692     while (!Queue.empty()) {
12693       SDNode *LoadRoot = Queue.pop_back_val();
12694       if (!Visited.insert(LoadRoot).second)
12695         continue;
12696 
12697       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12698         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12699           return true;
12700 
12701       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12702            UE = LoadRoot->use_end(); UI != UE; ++UI)
12703         if (((isa<MemSDNode>(*UI) &&
12704             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12705             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12706           Queue.push_back(*UI);
12707     }
12708   }
12709 
12710   return false;
12711 }
12712 
12713 /// This function is called when we have proved that a SETCC node can be replaced
12714 /// by subtraction (and other supporting instructions) so that the result of
12715 /// comparison is kept in a GPR instead of CR. This function is purely for
12716 /// codegen purposes and has some flags to guide the codegen process.
12717 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12718                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12719   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12720 
12721   // Zero extend the operands to the largest legal integer. Originally, they
12722   // must be of a strictly smaller size.
12723   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12724                          DAG.getConstant(Size, DL, MVT::i32));
12725   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12726                          DAG.getConstant(Size, DL, MVT::i32));
12727 
12728   // Swap if needed. Depends on the condition code.
12729   if (Swap)
12730     std::swap(Op0, Op1);
12731 
12732   // Subtract extended integers.
12733   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12734 
12735   // Move the sign bit to the least significant position and zero out the rest.
12736   // Now the least significant bit carries the result of original comparison.
12737   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12738                              DAG.getConstant(Size - 1, DL, MVT::i32));
12739   auto Final = Shifted;
12740 
12741   // Complement the result if needed. Based on the condition code.
12742   if (Complement)
12743     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12744                         DAG.getConstant(1, DL, MVT::i64));
12745 
12746   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12747 }
12748 
12749 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12750                                                   DAGCombinerInfo &DCI) const {
12751   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12752 
12753   SelectionDAG &DAG = DCI.DAG;
12754   SDLoc DL(N);
12755 
12756   // Size of integers being compared has a critical role in the following
12757   // analysis, so we prefer to do this when all types are legal.
12758   if (!DCI.isAfterLegalizeDAG())
12759     return SDValue();
12760 
12761   // If all users of SETCC extend its value to a legal integer type
12762   // then we replace SETCC with a subtraction
12763   for (SDNode::use_iterator UI = N->use_begin(),
12764        UE = N->use_end(); UI != UE; ++UI) {
12765     if (UI->getOpcode() != ISD::ZERO_EXTEND)
12766       return SDValue();
12767   }
12768 
12769   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12770   auto OpSize = N->getOperand(0).getValueSizeInBits();
12771 
12772   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12773 
12774   if (OpSize < Size) {
12775     switch (CC) {
12776     default: break;
12777     case ISD::SETULT:
12778       return generateEquivalentSub(N, Size, false, false, DL, DAG);
12779     case ISD::SETULE:
12780       return generateEquivalentSub(N, Size, true, true, DL, DAG);
12781     case ISD::SETUGT:
12782       return generateEquivalentSub(N, Size, false, true, DL, DAG);
12783     case ISD::SETUGE:
12784       return generateEquivalentSub(N, Size, true, false, DL, DAG);
12785     }
12786   }
12787 
12788   return SDValue();
12789 }
12790 
12791 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12792                                                   DAGCombinerInfo &DCI) const {
12793   SelectionDAG &DAG = DCI.DAG;
12794   SDLoc dl(N);
12795 
12796   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12797   // If we're tracking CR bits, we need to be careful that we don't have:
12798   //   trunc(binary-ops(zext(x), zext(y)))
12799   // or
12800   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12801   // such that we're unnecessarily moving things into GPRs when it would be
12802   // better to keep them in CR bits.
12803 
12804   // Note that trunc here can be an actual i1 trunc, or can be the effective
12805   // truncation that comes from a setcc or select_cc.
12806   if (N->getOpcode() == ISD::TRUNCATE &&
12807       N->getValueType(0) != MVT::i1)
12808     return SDValue();
12809 
12810   if (N->getOperand(0).getValueType() != MVT::i32 &&
12811       N->getOperand(0).getValueType() != MVT::i64)
12812     return SDValue();
12813 
12814   if (N->getOpcode() == ISD::SETCC ||
12815       N->getOpcode() == ISD::SELECT_CC) {
12816     // If we're looking at a comparison, then we need to make sure that the
12817     // high bits (all except for the first) don't matter the result.
12818     ISD::CondCode CC =
12819       cast<CondCodeSDNode>(N->getOperand(
12820         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12821     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12822 
12823     if (ISD::isSignedIntSetCC(CC)) {
12824       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12825           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12826         return SDValue();
12827     } else if (ISD::isUnsignedIntSetCC(CC)) {
12828       if (!DAG.MaskedValueIsZero(N->getOperand(0),
12829                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12830           !DAG.MaskedValueIsZero(N->getOperand(1),
12831                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
12832         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12833                                              : SDValue());
12834     } else {
12835       // This is neither a signed nor an unsigned comparison, just make sure
12836       // that the high bits are equal.
12837       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12838       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12839 
12840       // We don't really care about what is known about the first bit (if
12841       // anything), so clear it in all masks prior to comparing them.
12842       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
12843       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
12844 
12845       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
12846         return SDValue();
12847     }
12848   }
12849 
12850   // We now know that the higher-order bits are irrelevant, we just need to
12851   // make sure that all of the intermediate operations are bit operations, and
12852   // all inputs are extensions.
12853   if (N->getOperand(0).getOpcode() != ISD::AND &&
12854       N->getOperand(0).getOpcode() != ISD::OR  &&
12855       N->getOperand(0).getOpcode() != ISD::XOR &&
12856       N->getOperand(0).getOpcode() != ISD::SELECT &&
12857       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12858       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12859       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12860       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12861       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12862     return SDValue();
12863 
12864   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12865       N->getOperand(1).getOpcode() != ISD::AND &&
12866       N->getOperand(1).getOpcode() != ISD::OR  &&
12867       N->getOperand(1).getOpcode() != ISD::XOR &&
12868       N->getOperand(1).getOpcode() != ISD::SELECT &&
12869       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
12870       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
12871       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
12872       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
12873       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
12874     return SDValue();
12875 
12876   SmallVector<SDValue, 4> Inputs;
12877   SmallVector<SDValue, 8> BinOps, PromOps;
12878   SmallPtrSet<SDNode *, 16> Visited;
12879 
12880   for (unsigned i = 0; i < 2; ++i) {
12881     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12882           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12883           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12884           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12885         isa<ConstantSDNode>(N->getOperand(i)))
12886       Inputs.push_back(N->getOperand(i));
12887     else
12888       BinOps.push_back(N->getOperand(i));
12889 
12890     if (N->getOpcode() == ISD::TRUNCATE)
12891       break;
12892   }
12893 
12894   // Visit all inputs, collect all binary operations (and, or, xor and
12895   // select) that are all fed by extensions.
12896   while (!BinOps.empty()) {
12897     SDValue BinOp = BinOps.back();
12898     BinOps.pop_back();
12899 
12900     if (!Visited.insert(BinOp.getNode()).second)
12901       continue;
12902 
12903     PromOps.push_back(BinOp);
12904 
12905     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12906       // The condition of the select is not promoted.
12907       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12908         continue;
12909       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12910         continue;
12911 
12912       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12913             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12914             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12915            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12916           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12917         Inputs.push_back(BinOp.getOperand(i));
12918       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12919                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12920                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12921                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12922                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
12923                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12924                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12925                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12926                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
12927         BinOps.push_back(BinOp.getOperand(i));
12928       } else {
12929         // We have an input that is not an extension or another binary
12930         // operation; we'll abort this transformation.
12931         return SDValue();
12932       }
12933     }
12934   }
12935 
12936   // Make sure that this is a self-contained cluster of operations (which
12937   // is not quite the same thing as saying that everything has only one
12938   // use).
12939   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12940     if (isa<ConstantSDNode>(Inputs[i]))
12941       continue;
12942 
12943     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12944                               UE = Inputs[i].getNode()->use_end();
12945          UI != UE; ++UI) {
12946       SDNode *User = *UI;
12947       if (User != N && !Visited.count(User))
12948         return SDValue();
12949 
12950       // Make sure that we're not going to promote the non-output-value
12951       // operand(s) or SELECT or SELECT_CC.
12952       // FIXME: Although we could sometimes handle this, and it does occur in
12953       // practice that one of the condition inputs to the select is also one of
12954       // the outputs, we currently can't deal with this.
12955       if (User->getOpcode() == ISD::SELECT) {
12956         if (User->getOperand(0) == Inputs[i])
12957           return SDValue();
12958       } else if (User->getOpcode() == ISD::SELECT_CC) {
12959         if (User->getOperand(0) == Inputs[i] ||
12960             User->getOperand(1) == Inputs[i])
12961           return SDValue();
12962       }
12963     }
12964   }
12965 
12966   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12967     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12968                               UE = PromOps[i].getNode()->use_end();
12969          UI != UE; ++UI) {
12970       SDNode *User = *UI;
12971       if (User != N && !Visited.count(User))
12972         return SDValue();
12973 
12974       // Make sure that we're not going to promote the non-output-value
12975       // operand(s) or SELECT or SELECT_CC.
12976       // FIXME: Although we could sometimes handle this, and it does occur in
12977       // practice that one of the condition inputs to the select is also one of
12978       // the outputs, we currently can't deal with this.
12979       if (User->getOpcode() == ISD::SELECT) {
12980         if (User->getOperand(0) == PromOps[i])
12981           return SDValue();
12982       } else if (User->getOpcode() == ISD::SELECT_CC) {
12983         if (User->getOperand(0) == PromOps[i] ||
12984             User->getOperand(1) == PromOps[i])
12985           return SDValue();
12986       }
12987     }
12988   }
12989 
12990   // Replace all inputs with the extension operand.
12991   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12992     // Constants may have users outside the cluster of to-be-promoted nodes,
12993     // and so we need to replace those as we do the promotions.
12994     if (isa<ConstantSDNode>(Inputs[i]))
12995       continue;
12996     else
12997       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
12998   }
12999 
13000   std::list<HandleSDNode> PromOpHandles;
13001   for (auto &PromOp : PromOps)
13002     PromOpHandles.emplace_back(PromOp);
13003 
13004   // Replace all operations (these are all the same, but have a different
13005   // (i1) return type). DAG.getNode will validate that the types of
13006   // a binary operator match, so go through the list in reverse so that
13007   // we've likely promoted both operands first. Any intermediate truncations or
13008   // extensions disappear.
13009   while (!PromOpHandles.empty()) {
13010     SDValue PromOp = PromOpHandles.back().getValue();
13011     PromOpHandles.pop_back();
13012 
13013     if (PromOp.getOpcode() == ISD::TRUNCATE ||
13014         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13015         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13016         PromOp.getOpcode() == ISD::ANY_EXTEND) {
13017       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13018           PromOp.getOperand(0).getValueType() != MVT::i1) {
13019         // The operand is not yet ready (see comment below).
13020         PromOpHandles.emplace_front(PromOp);
13021         continue;
13022       }
13023 
13024       SDValue RepValue = PromOp.getOperand(0);
13025       if (isa<ConstantSDNode>(RepValue))
13026         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13027 
13028       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13029       continue;
13030     }
13031 
13032     unsigned C;
13033     switch (PromOp.getOpcode()) {
13034     default:             C = 0; break;
13035     case ISD::SELECT:    C = 1; break;
13036     case ISD::SELECT_CC: C = 2; break;
13037     }
13038 
13039     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13040          PromOp.getOperand(C).getValueType() != MVT::i1) ||
13041         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13042          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13043       // The to-be-promoted operands of this node have not yet been
13044       // promoted (this should be rare because we're going through the
13045       // list backward, but if one of the operands has several users in
13046       // this cluster of to-be-promoted nodes, it is possible).
13047       PromOpHandles.emplace_front(PromOp);
13048       continue;
13049     }
13050 
13051     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13052                                 PromOp.getNode()->op_end());
13053 
13054     // If there are any constant inputs, make sure they're replaced now.
13055     for (unsigned i = 0; i < 2; ++i)
13056       if (isa<ConstantSDNode>(Ops[C+i]))
13057         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13058 
13059     DAG.ReplaceAllUsesOfValueWith(PromOp,
13060       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13061   }
13062 
13063   // Now we're left with the initial truncation itself.
13064   if (N->getOpcode() == ISD::TRUNCATE)
13065     return N->getOperand(0);
13066 
13067   // Otherwise, this is a comparison. The operands to be compared have just
13068   // changed type (to i1), but everything else is the same.
13069   return SDValue(N, 0);
13070 }
13071 
13072 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13073                                                   DAGCombinerInfo &DCI) const {
13074   SelectionDAG &DAG = DCI.DAG;
13075   SDLoc dl(N);
13076 
13077   // If we're tracking CR bits, we need to be careful that we don't have:
13078   //   zext(binary-ops(trunc(x), trunc(y)))
13079   // or
13080   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13081   // such that we're unnecessarily moving things into CR bits that can more
13082   // efficiently stay in GPRs. Note that if we're not certain that the high
13083   // bits are set as required by the final extension, we still may need to do
13084   // some masking to get the proper behavior.
13085 
13086   // This same functionality is important on PPC64 when dealing with
13087   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13088   // the return values of functions. Because it is so similar, it is handled
13089   // here as well.
13090 
13091   if (N->getValueType(0) != MVT::i32 &&
13092       N->getValueType(0) != MVT::i64)
13093     return SDValue();
13094 
13095   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13096         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13097     return SDValue();
13098 
13099   if (N->getOperand(0).getOpcode() != ISD::AND &&
13100       N->getOperand(0).getOpcode() != ISD::OR  &&
13101       N->getOperand(0).getOpcode() != ISD::XOR &&
13102       N->getOperand(0).getOpcode() != ISD::SELECT &&
13103       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13104     return SDValue();
13105 
13106   SmallVector<SDValue, 4> Inputs;
13107   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13108   SmallPtrSet<SDNode *, 16> Visited;
13109 
13110   // Visit all inputs, collect all binary operations (and, or, xor and
13111   // select) that are all fed by truncations.
13112   while (!BinOps.empty()) {
13113     SDValue BinOp = BinOps.back();
13114     BinOps.pop_back();
13115 
13116     if (!Visited.insert(BinOp.getNode()).second)
13117       continue;
13118 
13119     PromOps.push_back(BinOp);
13120 
13121     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13122       // The condition of the select is not promoted.
13123       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13124         continue;
13125       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13126         continue;
13127 
13128       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13129           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13130         Inputs.push_back(BinOp.getOperand(i));
13131       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13132                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13133                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13134                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13135                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13136         BinOps.push_back(BinOp.getOperand(i));
13137       } else {
13138         // We have an input that is not a truncation or another binary
13139         // operation; we'll abort this transformation.
13140         return SDValue();
13141       }
13142     }
13143   }
13144 
13145   // The operands of a select that must be truncated when the select is
13146   // promoted because the operand is actually part of the to-be-promoted set.
13147   DenseMap<SDNode *, EVT> SelectTruncOp[2];
13148 
13149   // Make sure that this is a self-contained cluster of operations (which
13150   // is not quite the same thing as saying that everything has only one
13151   // use).
13152   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13153     if (isa<ConstantSDNode>(Inputs[i]))
13154       continue;
13155 
13156     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13157                               UE = Inputs[i].getNode()->use_end();
13158          UI != UE; ++UI) {
13159       SDNode *User = *UI;
13160       if (User != N && !Visited.count(User))
13161         return SDValue();
13162 
13163       // If we're going to promote the non-output-value operand(s) or SELECT or
13164       // SELECT_CC, record them for truncation.
13165       if (User->getOpcode() == ISD::SELECT) {
13166         if (User->getOperand(0) == Inputs[i])
13167           SelectTruncOp[0].insert(std::make_pair(User,
13168                                     User->getOperand(0).getValueType()));
13169       } else if (User->getOpcode() == ISD::SELECT_CC) {
13170         if (User->getOperand(0) == Inputs[i])
13171           SelectTruncOp[0].insert(std::make_pair(User,
13172                                     User->getOperand(0).getValueType()));
13173         if (User->getOperand(1) == Inputs[i])
13174           SelectTruncOp[1].insert(std::make_pair(User,
13175                                     User->getOperand(1).getValueType()));
13176       }
13177     }
13178   }
13179 
13180   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13181     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13182                               UE = PromOps[i].getNode()->use_end();
13183          UI != UE; ++UI) {
13184       SDNode *User = *UI;
13185       if (User != N && !Visited.count(User))
13186         return SDValue();
13187 
13188       // If we're going to promote the non-output-value operand(s) or SELECT or
13189       // SELECT_CC, record them for truncation.
13190       if (User->getOpcode() == ISD::SELECT) {
13191         if (User->getOperand(0) == PromOps[i])
13192           SelectTruncOp[0].insert(std::make_pair(User,
13193                                     User->getOperand(0).getValueType()));
13194       } else if (User->getOpcode() == ISD::SELECT_CC) {
13195         if (User->getOperand(0) == PromOps[i])
13196           SelectTruncOp[0].insert(std::make_pair(User,
13197                                     User->getOperand(0).getValueType()));
13198         if (User->getOperand(1) == PromOps[i])
13199           SelectTruncOp[1].insert(std::make_pair(User,
13200                                     User->getOperand(1).getValueType()));
13201       }
13202     }
13203   }
13204 
13205   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13206   bool ReallyNeedsExt = false;
13207   if (N->getOpcode() != ISD::ANY_EXTEND) {
13208     // If all of the inputs are not already sign/zero extended, then
13209     // we'll still need to do that at the end.
13210     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13211       if (isa<ConstantSDNode>(Inputs[i]))
13212         continue;
13213 
13214       unsigned OpBits =
13215         Inputs[i].getOperand(0).getValueSizeInBits();
13216       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
13217 
13218       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13219            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13220                                   APInt::getHighBitsSet(OpBits,
13221                                                         OpBits-PromBits))) ||
13222           (N->getOpcode() == ISD::SIGN_EXTEND &&
13223            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13224              (OpBits-(PromBits-1)))) {
13225         ReallyNeedsExt = true;
13226         break;
13227       }
13228     }
13229   }
13230 
13231   // Replace all inputs, either with the truncation operand, or a
13232   // truncation or extension to the final output type.
13233   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13234     // Constant inputs need to be replaced with the to-be-promoted nodes that
13235     // use them because they might have users outside of the cluster of
13236     // promoted nodes.
13237     if (isa<ConstantSDNode>(Inputs[i]))
13238       continue;
13239 
13240     SDValue InSrc = Inputs[i].getOperand(0);
13241     if (Inputs[i].getValueType() == N->getValueType(0))
13242       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13243     else if (N->getOpcode() == ISD::SIGN_EXTEND)
13244       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13245         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13246     else if (N->getOpcode() == ISD::ZERO_EXTEND)
13247       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13248         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13249     else
13250       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13251         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13252   }
13253 
13254   std::list<HandleSDNode> PromOpHandles;
13255   for (auto &PromOp : PromOps)
13256     PromOpHandles.emplace_back(PromOp);
13257 
13258   // Replace all operations (these are all the same, but have a different
13259   // (promoted) return type). DAG.getNode will validate that the types of
13260   // a binary operator match, so go through the list in reverse so that
13261   // we've likely promoted both operands first.
13262   while (!PromOpHandles.empty()) {
13263     SDValue PromOp = PromOpHandles.back().getValue();
13264     PromOpHandles.pop_back();
13265 
13266     unsigned C;
13267     switch (PromOp.getOpcode()) {
13268     default:             C = 0; break;
13269     case ISD::SELECT:    C = 1; break;
13270     case ISD::SELECT_CC: C = 2; break;
13271     }
13272 
13273     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13274          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13275         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13276          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13277       // The to-be-promoted operands of this node have not yet been
13278       // promoted (this should be rare because we're going through the
13279       // list backward, but if one of the operands has several users in
13280       // this cluster of to-be-promoted nodes, it is possible).
13281       PromOpHandles.emplace_front(PromOp);
13282       continue;
13283     }
13284 
13285     // For SELECT and SELECT_CC nodes, we do a similar check for any
13286     // to-be-promoted comparison inputs.
13287     if (PromOp.getOpcode() == ISD::SELECT ||
13288         PromOp.getOpcode() == ISD::SELECT_CC) {
13289       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13290            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13291           (SelectTruncOp[1].count(PromOp.getNode()) &&
13292            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13293         PromOpHandles.emplace_front(PromOp);
13294         continue;
13295       }
13296     }
13297 
13298     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13299                                 PromOp.getNode()->op_end());
13300 
13301     // If this node has constant inputs, then they'll need to be promoted here.
13302     for (unsigned i = 0; i < 2; ++i) {
13303       if (!isa<ConstantSDNode>(Ops[C+i]))
13304         continue;
13305       if (Ops[C+i].getValueType() == N->getValueType(0))
13306         continue;
13307 
13308       if (N->getOpcode() == ISD::SIGN_EXTEND)
13309         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13310       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13311         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13312       else
13313         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13314     }
13315 
13316     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13317     // truncate them again to the original value type.
13318     if (PromOp.getOpcode() == ISD::SELECT ||
13319         PromOp.getOpcode() == ISD::SELECT_CC) {
13320       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13321       if (SI0 != SelectTruncOp[0].end())
13322         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13323       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13324       if (SI1 != SelectTruncOp[1].end())
13325         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13326     }
13327 
13328     DAG.ReplaceAllUsesOfValueWith(PromOp,
13329       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13330   }
13331 
13332   // Now we're left with the initial extension itself.
13333   if (!ReallyNeedsExt)
13334     return N->getOperand(0);
13335 
13336   // To zero extend, just mask off everything except for the first bit (in the
13337   // i1 case).
13338   if (N->getOpcode() == ISD::ZERO_EXTEND)
13339     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13340                        DAG.getConstant(APInt::getLowBitsSet(
13341                                          N->getValueSizeInBits(0), PromBits),
13342                                        dl, N->getValueType(0)));
13343 
13344   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13345          "Invalid extension type");
13346   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13347   SDValue ShiftCst =
13348       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13349   return DAG.getNode(
13350       ISD::SRA, dl, N->getValueType(0),
13351       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13352       ShiftCst);
13353 }
13354 
13355 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13356                                         DAGCombinerInfo &DCI) const {
13357   assert(N->getOpcode() == ISD::SETCC &&
13358          "Should be called with a SETCC node");
13359 
13360   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13361   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13362     SDValue LHS = N->getOperand(0);
13363     SDValue RHS = N->getOperand(1);
13364 
13365     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13366     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13367         LHS.hasOneUse())
13368       std::swap(LHS, RHS);
13369 
13370     // x == 0-y --> x+y == 0
13371     // x != 0-y --> x+y != 0
13372     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13373         RHS.hasOneUse()) {
13374       SDLoc DL(N);
13375       SelectionDAG &DAG = DCI.DAG;
13376       EVT VT = N->getValueType(0);
13377       EVT OpVT = LHS.getValueType();
13378       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13379       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13380     }
13381   }
13382 
13383   return DAGCombineTruncBoolExt(N, DCI);
13384 }
13385 
13386 // Is this an extending load from an f32 to an f64?
13387 static bool isFPExtLoad(SDValue Op) {
13388   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13389     return LD->getExtensionType() == ISD::EXTLOAD &&
13390       Op.getValueType() == MVT::f64;
13391   return false;
13392 }
13393 
13394 /// Reduces the number of fp-to-int conversion when building a vector.
13395 ///
13396 /// If this vector is built out of floating to integer conversions,
13397 /// transform it to a vector built out of floating point values followed by a
13398 /// single floating to integer conversion of the vector.
13399 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13400 /// becomes (fptosi (build_vector ($A, $B, ...)))
13401 SDValue PPCTargetLowering::
13402 combineElementTruncationToVectorTruncation(SDNode *N,
13403                                            DAGCombinerInfo &DCI) const {
13404   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13405          "Should be called with a BUILD_VECTOR node");
13406 
13407   SelectionDAG &DAG = DCI.DAG;
13408   SDLoc dl(N);
13409 
13410   SDValue FirstInput = N->getOperand(0);
13411   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13412          "The input operand must be an fp-to-int conversion.");
13413 
13414   // This combine happens after legalization so the fp_to_[su]i nodes are
13415   // already converted to PPCSISD nodes.
13416   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13417   if (FirstConversion == PPCISD::FCTIDZ ||
13418       FirstConversion == PPCISD::FCTIDUZ ||
13419       FirstConversion == PPCISD::FCTIWZ ||
13420       FirstConversion == PPCISD::FCTIWUZ) {
13421     bool IsSplat = true;
13422     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13423       FirstConversion == PPCISD::FCTIWUZ;
13424     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13425     SmallVector<SDValue, 4> Ops;
13426     EVT TargetVT = N->getValueType(0);
13427     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13428       SDValue NextOp = N->getOperand(i);
13429       if (NextOp.getOpcode() != PPCISD::MFVSR)
13430         return SDValue();
13431       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13432       if (NextConversion != FirstConversion)
13433         return SDValue();
13434       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13435       // This is not valid if the input was originally double precision. It is
13436       // also not profitable to do unless this is an extending load in which
13437       // case doing this combine will allow us to combine consecutive loads.
13438       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13439         return SDValue();
13440       if (N->getOperand(i) != FirstInput)
13441         IsSplat = false;
13442     }
13443 
13444     // If this is a splat, we leave it as-is since there will be only a single
13445     // fp-to-int conversion followed by a splat of the integer. This is better
13446     // for 32-bit and smaller ints and neutral for 64-bit ints.
13447     if (IsSplat)
13448       return SDValue();
13449 
13450     // Now that we know we have the right type of node, get its operands
13451     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13452       SDValue In = N->getOperand(i).getOperand(0);
13453       if (Is32Bit) {
13454         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13455         // here, we know that all inputs are extending loads so this is safe).
13456         if (In.isUndef())
13457           Ops.push_back(DAG.getUNDEF(SrcVT));
13458         else {
13459           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13460                                       MVT::f32, In.getOperand(0),
13461                                       DAG.getIntPtrConstant(1, dl));
13462           Ops.push_back(Trunc);
13463         }
13464       } else
13465         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13466     }
13467 
13468     unsigned Opcode;
13469     if (FirstConversion == PPCISD::FCTIDZ ||
13470         FirstConversion == PPCISD::FCTIWZ)
13471       Opcode = ISD::FP_TO_SINT;
13472     else
13473       Opcode = ISD::FP_TO_UINT;
13474 
13475     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13476     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13477     return DAG.getNode(Opcode, dl, TargetVT, BV);
13478   }
13479   return SDValue();
13480 }
13481 
13482 /// Reduce the number of loads when building a vector.
13483 ///
13484 /// Building a vector out of multiple loads can be converted to a load
13485 /// of the vector type if the loads are consecutive. If the loads are
13486 /// consecutive but in descending order, a shuffle is added at the end
13487 /// to reorder the vector.
13488 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13489   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13490          "Should be called with a BUILD_VECTOR node");
13491 
13492   SDLoc dl(N);
13493 
13494   // Return early for non byte-sized type, as they can't be consecutive.
13495   if (!N->getValueType(0).getVectorElementType().isByteSized())
13496     return SDValue();
13497 
13498   bool InputsAreConsecutiveLoads = true;
13499   bool InputsAreReverseConsecutive = true;
13500   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13501   SDValue FirstInput = N->getOperand(0);
13502   bool IsRoundOfExtLoad = false;
13503 
13504   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13505       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13506     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13507     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13508   }
13509   // Not a build vector of (possibly fp_rounded) loads.
13510   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13511       N->getNumOperands() == 1)
13512     return SDValue();
13513 
13514   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13515     // If any inputs are fp_round(extload), they all must be.
13516     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13517       return SDValue();
13518 
13519     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13520       N->getOperand(i);
13521     if (NextInput.getOpcode() != ISD::LOAD)
13522       return SDValue();
13523 
13524     SDValue PreviousInput =
13525       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13526     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13527     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13528 
13529     // If any inputs are fp_round(extload), they all must be.
13530     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13531       return SDValue();
13532 
13533     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13534       InputsAreConsecutiveLoads = false;
13535     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13536       InputsAreReverseConsecutive = false;
13537 
13538     // Exit early if the loads are neither consecutive nor reverse consecutive.
13539     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13540       return SDValue();
13541   }
13542 
13543   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13544          "The loads cannot be both consecutive and reverse consecutive.");
13545 
13546   SDValue FirstLoadOp =
13547     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13548   SDValue LastLoadOp =
13549     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13550                        N->getOperand(N->getNumOperands()-1);
13551 
13552   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13553   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13554   if (InputsAreConsecutiveLoads) {
13555     assert(LD1 && "Input needs to be a LoadSDNode.");
13556     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13557                        LD1->getBasePtr(), LD1->getPointerInfo(),
13558                        LD1->getAlignment());
13559   }
13560   if (InputsAreReverseConsecutive) {
13561     assert(LDL && "Input needs to be a LoadSDNode.");
13562     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13563                                LDL->getBasePtr(), LDL->getPointerInfo(),
13564                                LDL->getAlignment());
13565     SmallVector<int, 16> Ops;
13566     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13567       Ops.push_back(i);
13568 
13569     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13570                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13571   }
13572   return SDValue();
13573 }
13574 
13575 // This function adds the required vector_shuffle needed to get
13576 // the elements of the vector extract in the correct position
13577 // as specified by the CorrectElems encoding.
13578 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13579                                       SDValue Input, uint64_t Elems,
13580                                       uint64_t CorrectElems) {
13581   SDLoc dl(N);
13582 
13583   unsigned NumElems = Input.getValueType().getVectorNumElements();
13584   SmallVector<int, 16> ShuffleMask(NumElems, -1);
13585 
13586   // Knowing the element indices being extracted from the original
13587   // vector and the order in which they're being inserted, just put
13588   // them at element indices required for the instruction.
13589   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13590     if (DAG.getDataLayout().isLittleEndian())
13591       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13592     else
13593       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13594     CorrectElems = CorrectElems >> 8;
13595     Elems = Elems >> 8;
13596   }
13597 
13598   SDValue Shuffle =
13599       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13600                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13601 
13602   EVT VT = N->getValueType(0);
13603   SDValue Conv = DAG.getBitcast(VT, Shuffle);
13604 
13605   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13606                                Input.getValueType().getVectorElementType(),
13607                                VT.getVectorNumElements());
13608   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13609                      DAG.getValueType(ExtVT));
13610 }
13611 
13612 // Look for build vector patterns where input operands come from sign
13613 // extended vector_extract elements of specific indices. If the correct indices
13614 // aren't used, add a vector shuffle to fix up the indices and create
13615 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13616 // during instruction selection.
13617 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13618   // This array encodes the indices that the vector sign extend instructions
13619   // extract from when extending from one type to another for both BE and LE.
13620   // The right nibble of each byte corresponds to the LE incides.
13621   // and the left nibble of each byte corresponds to the BE incides.
13622   // For example: 0x3074B8FC  byte->word
13623   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13624   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13625   // For example: 0x000070F8  byte->double word
13626   // For LE: the allowed indices are: 0x0,0x8
13627   // For BE: the allowed indices are: 0x7,0xF
13628   uint64_t TargetElems[] = {
13629       0x3074B8FC, // b->w
13630       0x000070F8, // b->d
13631       0x10325476, // h->w
13632       0x00003074, // h->d
13633       0x00001032, // w->d
13634   };
13635 
13636   uint64_t Elems = 0;
13637   int Index;
13638   SDValue Input;
13639 
13640   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13641     if (!Op)
13642       return false;
13643     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13644         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13645       return false;
13646 
13647     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13648     // of the right width.
13649     SDValue Extract = Op.getOperand(0);
13650     if (Extract.getOpcode() == ISD::ANY_EXTEND)
13651       Extract = Extract.getOperand(0);
13652     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13653       return false;
13654 
13655     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13656     if (!ExtOp)
13657       return false;
13658 
13659     Index = ExtOp->getZExtValue();
13660     if (Input && Input != Extract.getOperand(0))
13661       return false;
13662 
13663     if (!Input)
13664       Input = Extract.getOperand(0);
13665 
13666     Elems = Elems << 8;
13667     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13668     Elems |= Index;
13669 
13670     return true;
13671   };
13672 
13673   // If the build vector operands aren't sign extended vector extracts,
13674   // of the same input vector, then return.
13675   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13676     if (!isSExtOfVecExtract(N->getOperand(i))) {
13677       return SDValue();
13678     }
13679   }
13680 
13681   // If the vector extract indicies are not correct, add the appropriate
13682   // vector_shuffle.
13683   int TgtElemArrayIdx;
13684   int InputSize = Input.getValueType().getScalarSizeInBits();
13685   int OutputSize = N->getValueType(0).getScalarSizeInBits();
13686   if (InputSize + OutputSize == 40)
13687     TgtElemArrayIdx = 0;
13688   else if (InputSize + OutputSize == 72)
13689     TgtElemArrayIdx = 1;
13690   else if (InputSize + OutputSize == 48)
13691     TgtElemArrayIdx = 2;
13692   else if (InputSize + OutputSize == 80)
13693     TgtElemArrayIdx = 3;
13694   else if (InputSize + OutputSize == 96)
13695     TgtElemArrayIdx = 4;
13696   else
13697     return SDValue();
13698 
13699   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13700   CorrectElems = DAG.getDataLayout().isLittleEndian()
13701                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13702                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13703   if (Elems != CorrectElems) {
13704     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13705   }
13706 
13707   // Regular lowering will catch cases where a shuffle is not needed.
13708   return SDValue();
13709 }
13710 
13711 // Look for the pattern of a load from a narrow width to i128, feeding
13712 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
13713 // (LXVRZX). This node represents a zero extending load that will be matched
13714 // to the Load VSX Vector Rightmost instructions.
13715 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
13716   SDLoc DL(N);
13717 
13718   // This combine is only eligible for a BUILD_VECTOR of v1i128.
13719   if (N->getValueType(0) != MVT::v1i128)
13720     return SDValue();
13721 
13722   SDValue Operand = N->getOperand(0);
13723   // Proceed with the transformation if the operand to the BUILD_VECTOR
13724   // is a load instruction.
13725   if (Operand.getOpcode() != ISD::LOAD)
13726     return SDValue();
13727 
13728   LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand);
13729   EVT MemoryType = LD->getMemoryVT();
13730 
13731   // This transformation is only valid if the we are loading either a byte,
13732   // halfword, word, or doubleword.
13733   bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
13734                      MemoryType == MVT::i32 || MemoryType == MVT::i64;
13735 
13736   // Ensure that the load from the narrow width is being zero extended to i128.
13737   if (!ValidLDType ||
13738       (LD->getExtensionType() != ISD::ZEXTLOAD &&
13739        LD->getExtensionType() != ISD::EXTLOAD))
13740     return SDValue();
13741 
13742   SDValue LoadOps[] = {
13743       LD->getChain(), LD->getBasePtr(),
13744       DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
13745 
13746   return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
13747                                  DAG.getVTList(MVT::v1i128, MVT::Other),
13748                                  LoadOps, MemoryType, LD->getMemOperand());
13749 }
13750 
13751 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13752                                                  DAGCombinerInfo &DCI) const {
13753   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13754          "Should be called with a BUILD_VECTOR node");
13755 
13756   SelectionDAG &DAG = DCI.DAG;
13757   SDLoc dl(N);
13758 
13759   if (!Subtarget.hasVSX())
13760     return SDValue();
13761 
13762   // The target independent DAG combiner will leave a build_vector of
13763   // float-to-int conversions intact. We can generate MUCH better code for
13764   // a float-to-int conversion of a vector of floats.
13765   SDValue FirstInput = N->getOperand(0);
13766   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13767     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13768     if (Reduced)
13769       return Reduced;
13770   }
13771 
13772   // If we're building a vector out of consecutive loads, just load that
13773   // vector type.
13774   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13775   if (Reduced)
13776     return Reduced;
13777 
13778   // If we're building a vector out of extended elements from another vector
13779   // we have P9 vector integer extend instructions. The code assumes legal
13780   // input types (i.e. it can't handle things like v4i16) so do not run before
13781   // legalization.
13782   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13783     Reduced = combineBVOfVecSExt(N, DAG);
13784     if (Reduced)
13785       return Reduced;
13786   }
13787 
13788   // On Power10, the Load VSX Vector Rightmost instructions can be utilized
13789   // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
13790   // is a load from <valid narrow width> to i128.
13791   if (Subtarget.isISA3_1()) {
13792     SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
13793     if (BVOfZLoad)
13794       return BVOfZLoad;
13795   }
13796 
13797   if (N->getValueType(0) != MVT::v2f64)
13798     return SDValue();
13799 
13800   // Looking for:
13801   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13802   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13803       FirstInput.getOpcode() != ISD::UINT_TO_FP)
13804     return SDValue();
13805   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13806       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13807     return SDValue();
13808   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13809     return SDValue();
13810 
13811   SDValue Ext1 = FirstInput.getOperand(0);
13812   SDValue Ext2 = N->getOperand(1).getOperand(0);
13813   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13814      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13815     return SDValue();
13816 
13817   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13818   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13819   if (!Ext1Op || !Ext2Op)
13820     return SDValue();
13821   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13822       Ext1.getOperand(0) != Ext2.getOperand(0))
13823     return SDValue();
13824 
13825   int FirstElem = Ext1Op->getZExtValue();
13826   int SecondElem = Ext2Op->getZExtValue();
13827   int SubvecIdx;
13828   if (FirstElem == 0 && SecondElem == 1)
13829     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13830   else if (FirstElem == 2 && SecondElem == 3)
13831     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13832   else
13833     return SDValue();
13834 
13835   SDValue SrcVec = Ext1.getOperand(0);
13836   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13837     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13838   return DAG.getNode(NodeType, dl, MVT::v2f64,
13839                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13840 }
13841 
13842 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13843                                               DAGCombinerInfo &DCI) const {
13844   assert((N->getOpcode() == ISD::SINT_TO_FP ||
13845           N->getOpcode() == ISD::UINT_TO_FP) &&
13846          "Need an int -> FP conversion node here");
13847 
13848   if (useSoftFloat() || !Subtarget.has64BitSupport())
13849     return SDValue();
13850 
13851   SelectionDAG &DAG = DCI.DAG;
13852   SDLoc dl(N);
13853   SDValue Op(N, 0);
13854 
13855   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13856   // from the hardware.
13857   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13858     return SDValue();
13859   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13860       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13861     return SDValue();
13862 
13863   SDValue FirstOperand(Op.getOperand(0));
13864   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13865     (FirstOperand.getValueType() == MVT::i8 ||
13866      FirstOperand.getValueType() == MVT::i16);
13867   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
13868     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
13869     bool DstDouble = Op.getValueType() == MVT::f64;
13870     unsigned ConvOp = Signed ?
13871       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
13872       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
13873     SDValue WidthConst =
13874       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
13875                             dl, false);
13876     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
13877     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
13878     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
13879                                          DAG.getVTList(MVT::f64, MVT::Other),
13880                                          Ops, MVT::i8, LDN->getMemOperand());
13881 
13882     // For signed conversion, we need to sign-extend the value in the VSR
13883     if (Signed) {
13884       SDValue ExtOps[] = { Ld, WidthConst };
13885       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
13886       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
13887     } else
13888       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
13889   }
13890 
13891 
13892   // For i32 intermediate values, unfortunately, the conversion functions
13893   // leave the upper 32 bits of the value are undefined. Within the set of
13894   // scalar instructions, we have no method for zero- or sign-extending the
13895   // value. Thus, we cannot handle i32 intermediate values here.
13896   if (Op.getOperand(0).getValueType() == MVT::i32)
13897     return SDValue();
13898 
13899   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
13900          "UINT_TO_FP is supported only with FPCVT");
13901 
13902   // If we have FCFIDS, then use it when converting to single-precision.
13903   // Otherwise, convert to double-precision and then round.
13904   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13905                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
13906                                                             : PPCISD::FCFIDS)
13907                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
13908                                                             : PPCISD::FCFID);
13909   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13910                   ? MVT::f32
13911                   : MVT::f64;
13912 
13913   // If we're converting from a float, to an int, and back to a float again,
13914   // then we don't need the store/load pair at all.
13915   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
13916        Subtarget.hasFPCVT()) ||
13917       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
13918     SDValue Src = Op.getOperand(0).getOperand(0);
13919     if (Src.getValueType() == MVT::f32) {
13920       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
13921       DCI.AddToWorklist(Src.getNode());
13922     } else if (Src.getValueType() != MVT::f64) {
13923       // Make sure that we don't pick up a ppc_fp128 source value.
13924       return SDValue();
13925     }
13926 
13927     unsigned FCTOp =
13928       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
13929                                                         PPCISD::FCTIDUZ;
13930 
13931     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
13932     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
13933 
13934     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
13935       FP = DAG.getNode(ISD::FP_ROUND, dl,
13936                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
13937       DCI.AddToWorklist(FP.getNode());
13938     }
13939 
13940     return FP;
13941   }
13942 
13943   return SDValue();
13944 }
13945 
13946 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
13947 // builtins) into loads with swaps.
13948 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
13949                                               DAGCombinerInfo &DCI) const {
13950   SelectionDAG &DAG = DCI.DAG;
13951   SDLoc dl(N);
13952   SDValue Chain;
13953   SDValue Base;
13954   MachineMemOperand *MMO;
13955 
13956   switch (N->getOpcode()) {
13957   default:
13958     llvm_unreachable("Unexpected opcode for little endian VSX load");
13959   case ISD::LOAD: {
13960     LoadSDNode *LD = cast<LoadSDNode>(N);
13961     Chain = LD->getChain();
13962     Base = LD->getBasePtr();
13963     MMO = LD->getMemOperand();
13964     // If the MMO suggests this isn't a load of a full vector, leave
13965     // things alone.  For a built-in, we have to make the change for
13966     // correctness, so if there is a size problem that will be a bug.
13967     if (MMO->getSize() < 16)
13968       return SDValue();
13969     break;
13970   }
13971   case ISD::INTRINSIC_W_CHAIN: {
13972     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13973     Chain = Intrin->getChain();
13974     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
13975     // us what we want. Get operand 2 instead.
13976     Base = Intrin->getOperand(2);
13977     MMO = Intrin->getMemOperand();
13978     break;
13979   }
13980   }
13981 
13982   MVT VecTy = N->getValueType(0).getSimpleVT();
13983 
13984   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
13985   // aligned and the type is a vector with elements up to 4 bytes
13986   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13987       VecTy.getScalarSizeInBits() <= 32) {
13988     return SDValue();
13989   }
13990 
13991   SDValue LoadOps[] = { Chain, Base };
13992   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
13993                                          DAG.getVTList(MVT::v2f64, MVT::Other),
13994                                          LoadOps, MVT::v2f64, MMO);
13995 
13996   DCI.AddToWorklist(Load.getNode());
13997   Chain = Load.getValue(1);
13998   SDValue Swap = DAG.getNode(
13999       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14000   DCI.AddToWorklist(Swap.getNode());
14001 
14002   // Add a bitcast if the resulting load type doesn't match v2f64.
14003   if (VecTy != MVT::v2f64) {
14004     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14005     DCI.AddToWorklist(N.getNode());
14006     // Package {bitcast value, swap's chain} to match Load's shape.
14007     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14008                        N, Swap.getValue(1));
14009   }
14010 
14011   return Swap;
14012 }
14013 
14014 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14015 // builtins) into stores with swaps.
14016 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14017                                                DAGCombinerInfo &DCI) const {
14018   SelectionDAG &DAG = DCI.DAG;
14019   SDLoc dl(N);
14020   SDValue Chain;
14021   SDValue Base;
14022   unsigned SrcOpnd;
14023   MachineMemOperand *MMO;
14024 
14025   switch (N->getOpcode()) {
14026   default:
14027     llvm_unreachable("Unexpected opcode for little endian VSX store");
14028   case ISD::STORE: {
14029     StoreSDNode *ST = cast<StoreSDNode>(N);
14030     Chain = ST->getChain();
14031     Base = ST->getBasePtr();
14032     MMO = ST->getMemOperand();
14033     SrcOpnd = 1;
14034     // If the MMO suggests this isn't a store of a full vector, leave
14035     // things alone.  For a built-in, we have to make the change for
14036     // correctness, so if there is a size problem that will be a bug.
14037     if (MMO->getSize() < 16)
14038       return SDValue();
14039     break;
14040   }
14041   case ISD::INTRINSIC_VOID: {
14042     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14043     Chain = Intrin->getChain();
14044     // Intrin->getBasePtr() oddly does not get what we want.
14045     Base = Intrin->getOperand(3);
14046     MMO = Intrin->getMemOperand();
14047     SrcOpnd = 2;
14048     break;
14049   }
14050   }
14051 
14052   SDValue Src = N->getOperand(SrcOpnd);
14053   MVT VecTy = Src.getValueType().getSimpleVT();
14054 
14055   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14056   // aligned and the type is a vector with elements up to 4 bytes
14057   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14058       VecTy.getScalarSizeInBits() <= 32) {
14059     return SDValue();
14060   }
14061 
14062   // All stores are done as v2f64 and possible bit cast.
14063   if (VecTy != MVT::v2f64) {
14064     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14065     DCI.AddToWorklist(Src.getNode());
14066   }
14067 
14068   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14069                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14070   DCI.AddToWorklist(Swap.getNode());
14071   Chain = Swap.getValue(1);
14072   SDValue StoreOps[] = { Chain, Swap, Base };
14073   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14074                                           DAG.getVTList(MVT::Other),
14075                                           StoreOps, VecTy, MMO);
14076   DCI.AddToWorklist(Store.getNode());
14077   return Store;
14078 }
14079 
14080 // Handle DAG combine for STORE (FP_TO_INT F).
14081 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14082                                                DAGCombinerInfo &DCI) const {
14083 
14084   SelectionDAG &DAG = DCI.DAG;
14085   SDLoc dl(N);
14086   unsigned Opcode = N->getOperand(1).getOpcode();
14087 
14088   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
14089          && "Not a FP_TO_INT Instruction!");
14090 
14091   SDValue Val = N->getOperand(1).getOperand(0);
14092   EVT Op1VT = N->getOperand(1).getValueType();
14093   EVT ResVT = Val.getValueType();
14094 
14095   // Floating point types smaller than 32 bits are not legal on Power.
14096   if (ResVT.getScalarSizeInBits() < 32)
14097     return SDValue();
14098 
14099   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14100   bool ValidTypeForStoreFltAsInt =
14101         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14102          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14103 
14104   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14105       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14106     return SDValue();
14107 
14108   // Extend f32 values to f64
14109   if (ResVT.getScalarSizeInBits() == 32) {
14110     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14111     DCI.AddToWorklist(Val.getNode());
14112   }
14113 
14114   // Set signed or unsigned conversion opcode.
14115   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14116                           PPCISD::FP_TO_SINT_IN_VSR :
14117                           PPCISD::FP_TO_UINT_IN_VSR;
14118 
14119   Val = DAG.getNode(ConvOpcode,
14120                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14121   DCI.AddToWorklist(Val.getNode());
14122 
14123   // Set number of bytes being converted.
14124   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14125   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14126                     DAG.getIntPtrConstant(ByteSize, dl, false),
14127                     DAG.getValueType(Op1VT) };
14128 
14129   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14130           DAG.getVTList(MVT::Other), Ops,
14131           cast<StoreSDNode>(N)->getMemoryVT(),
14132           cast<StoreSDNode>(N)->getMemOperand());
14133 
14134   DCI.AddToWorklist(Val.getNode());
14135   return Val;
14136 }
14137 
14138 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14139   // Check that the source of the element keeps flipping
14140   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14141   bool PrevElemFromFirstVec = Mask[0] < NumElts;
14142   for (int i = 1, e = Mask.size(); i < e; i++) {
14143     if (PrevElemFromFirstVec && Mask[i] < NumElts)
14144       return false;
14145     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14146       return false;
14147     PrevElemFromFirstVec = !PrevElemFromFirstVec;
14148   }
14149   return true;
14150 }
14151 
14152 static bool isSplatBV(SDValue Op) {
14153   if (Op.getOpcode() != ISD::BUILD_VECTOR)
14154     return false;
14155   SDValue FirstOp;
14156 
14157   // Find first non-undef input.
14158   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14159     FirstOp = Op.getOperand(i);
14160     if (!FirstOp.isUndef())
14161       break;
14162   }
14163 
14164   // All inputs are undef or the same as the first non-undef input.
14165   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14166     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14167       return false;
14168   return true;
14169 }
14170 
14171 static SDValue isScalarToVec(SDValue Op) {
14172   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14173     return Op;
14174   if (Op.getOpcode() != ISD::BITCAST)
14175     return SDValue();
14176   Op = Op.getOperand(0);
14177   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14178     return Op;
14179   return SDValue();
14180 }
14181 
14182 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14183                                             int LHSMaxIdx, int RHSMinIdx,
14184                                             int RHSMaxIdx, int HalfVec) {
14185   for (int i = 0, e = ShuffV.size(); i < e; i++) {
14186     int Idx = ShuffV[i];
14187     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14188       ShuffV[i] += HalfVec;
14189   }
14190   return;
14191 }
14192 
14193 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14194 // the original is:
14195 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14196 // In such a case, just change the shuffle mask to extract the element
14197 // from the permuted index.
14198 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
14199   SDLoc dl(OrigSToV);
14200   EVT VT = OrigSToV.getValueType();
14201   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
14202          "Expecting a SCALAR_TO_VECTOR here");
14203   SDValue Input = OrigSToV.getOperand(0);
14204 
14205   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14206     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14207     SDValue OrigVector = Input.getOperand(0);
14208 
14209     // Can't handle non-const element indices or different vector types
14210     // for the input to the extract and the output of the scalar_to_vector.
14211     if (Idx && VT == OrigVector.getValueType()) {
14212       SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
14213       NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
14214       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14215     }
14216   }
14217   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14218                      OrigSToV.getOperand(0));
14219 }
14220 
14221 // On little endian subtargets, combine shuffles such as:
14222 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14223 // into:
14224 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14225 // because the latter can be matched to a single instruction merge.
14226 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14227 // to put the value into element zero. Adjust the shuffle mask so that the
14228 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
14229 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14230                                                 SelectionDAG &DAG) const {
14231   SDValue LHS = SVN->getOperand(0);
14232   SDValue RHS = SVN->getOperand(1);
14233   auto Mask = SVN->getMask();
14234   int NumElts = LHS.getValueType().getVectorNumElements();
14235   SDValue Res(SVN, 0);
14236   SDLoc dl(SVN);
14237 
14238   // None of these combines are useful on big endian systems since the ISA
14239   // already has a big endian bias.
14240   if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14241     return Res;
14242 
14243   // If this is not a shuffle of a shuffle and the first element comes from
14244   // the second vector, canonicalize to the commuted form. This will make it
14245   // more likely to match one of the single instruction patterns.
14246   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14247       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14248     std::swap(LHS, RHS);
14249     Res = DAG.getCommutedVectorShuffle(*SVN);
14250     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14251   }
14252 
14253   // Adjust the shuffle mask if either input vector comes from a
14254   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14255   // form (to prevent the need for a swap).
14256   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14257   SDValue SToVLHS = isScalarToVec(LHS);
14258   SDValue SToVRHS = isScalarToVec(RHS);
14259   if (SToVLHS || SToVRHS) {
14260     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14261                             : SToVRHS.getValueType().getVectorNumElements();
14262     int NumEltsOut = ShuffV.size();
14263 
14264     // Initially assume that neither input is permuted. These will be adjusted
14265     // accordingly if either input is.
14266     int LHSMaxIdx = -1;
14267     int RHSMinIdx = -1;
14268     int RHSMaxIdx = -1;
14269     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14270 
14271     // Get the permuted scalar to vector nodes for the source(s) that come from
14272     // ISD::SCALAR_TO_VECTOR.
14273     if (SToVLHS) {
14274       // Set up the values for the shuffle vector fixup.
14275       LHSMaxIdx = NumEltsOut / NumEltsIn;
14276       SToVLHS = getSToVPermuted(SToVLHS, DAG);
14277       if (SToVLHS.getValueType() != LHS.getValueType())
14278         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14279       LHS = SToVLHS;
14280     }
14281     if (SToVRHS) {
14282       RHSMinIdx = NumEltsOut;
14283       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14284       SToVRHS = getSToVPermuted(SToVRHS, DAG);
14285       if (SToVRHS.getValueType() != RHS.getValueType())
14286         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14287       RHS = SToVRHS;
14288     }
14289 
14290     // Fix up the shuffle mask to reflect where the desired element actually is.
14291     // The minimum and maximum indices that correspond to element zero for both
14292     // the LHS and RHS are computed and will control which shuffle mask entries
14293     // are to be changed. For example, if the RHS is permuted, any shuffle mask
14294     // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
14295     // HalfVec to refer to the corresponding element in the permuted vector.
14296     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14297                                     HalfVec);
14298     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14299 
14300     // We may have simplified away the shuffle. We won't be able to do anything
14301     // further with it here.
14302     if (!isa<ShuffleVectorSDNode>(Res))
14303       return Res;
14304     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14305   }
14306 
14307   // The common case after we commuted the shuffle is that the RHS is a splat
14308   // and we have elements coming in from the splat at indices that are not
14309   // conducive to using a merge.
14310   // Example:
14311   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14312   if (!isSplatBV(RHS))
14313     return Res;
14314 
14315   // We are looking for a mask such that all even elements are from
14316   // one vector and all odd elements from the other.
14317   if (!isAlternatingShuffMask(Mask, NumElts))
14318     return Res;
14319 
14320   // Adjust the mask so we are pulling in the same index from the splat
14321   // as the index from the interesting vector in consecutive elements.
14322   // Example (even elements from first vector):
14323   // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14324   if (Mask[0] < NumElts)
14325     for (int i = 1, e = Mask.size(); i < e; i += 2)
14326       ShuffV[i] = (ShuffV[i - 1] + NumElts);
14327   // Example (odd elements from first vector):
14328   // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14329   else
14330     for (int i = 0, e = Mask.size(); i < e; i += 2)
14331       ShuffV[i] = (ShuffV[i + 1] + NumElts);
14332 
14333   // If the RHS has undefs, we need to remove them since we may have created
14334   // a shuffle that adds those instead of the splat value.
14335   SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
14336   RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
14337 
14338   Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14339   return Res;
14340 }
14341 
14342 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14343                                                 LSBaseSDNode *LSBase,
14344                                                 DAGCombinerInfo &DCI) const {
14345   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14346         "Not a reverse memop pattern!");
14347 
14348   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14349     auto Mask = SVN->getMask();
14350     int i = 0;
14351     auto I = Mask.rbegin();
14352     auto E = Mask.rend();
14353 
14354     for (; I != E; ++I) {
14355       if (*I != i)
14356         return false;
14357       i++;
14358     }
14359     return true;
14360   };
14361 
14362   SelectionDAG &DAG = DCI.DAG;
14363   EVT VT = SVN->getValueType(0);
14364 
14365   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14366     return SDValue();
14367 
14368   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14369   // See comment in PPCVSXSwapRemoval.cpp.
14370   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14371   if (!Subtarget.hasP9Vector())
14372     return SDValue();
14373 
14374   if(!IsElementReverse(SVN))
14375     return SDValue();
14376 
14377   if (LSBase->getOpcode() == ISD::LOAD) {
14378     SDLoc dl(SVN);
14379     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14380     return DAG.getMemIntrinsicNode(
14381         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14382         LSBase->getMemoryVT(), LSBase->getMemOperand());
14383   }
14384 
14385   if (LSBase->getOpcode() == ISD::STORE) {
14386     SDLoc dl(LSBase);
14387     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14388                           LSBase->getBasePtr()};
14389     return DAG.getMemIntrinsicNode(
14390         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14391         LSBase->getMemoryVT(), LSBase->getMemOperand());
14392   }
14393 
14394   llvm_unreachable("Expected a load or store node here");
14395 }
14396 
14397 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14398                                              DAGCombinerInfo &DCI) const {
14399   SelectionDAG &DAG = DCI.DAG;
14400   SDLoc dl(N);
14401   switch (N->getOpcode()) {
14402   default: break;
14403   case ISD::ADD:
14404     return combineADD(N, DCI);
14405   case ISD::SHL:
14406     return combineSHL(N, DCI);
14407   case ISD::SRA:
14408     return combineSRA(N, DCI);
14409   case ISD::SRL:
14410     return combineSRL(N, DCI);
14411   case ISD::MUL:
14412     return combineMUL(N, DCI);
14413   case ISD::FMA:
14414   case PPCISD::FNMSUB:
14415     return combineFMALike(N, DCI);
14416   case PPCISD::SHL:
14417     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14418         return N->getOperand(0);
14419     break;
14420   case PPCISD::SRL:
14421     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14422         return N->getOperand(0);
14423     break;
14424   case PPCISD::SRA:
14425     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14426       if (C->isNullValue() ||   //  0 >>s V -> 0.
14427           C->isAllOnesValue())    // -1 >>s V -> -1.
14428         return N->getOperand(0);
14429     }
14430     break;
14431   case ISD::SIGN_EXTEND:
14432   case ISD::ZERO_EXTEND:
14433   case ISD::ANY_EXTEND:
14434     return DAGCombineExtBoolTrunc(N, DCI);
14435   case ISD::TRUNCATE:
14436     return combineTRUNCATE(N, DCI);
14437   case ISD::SETCC:
14438     if (SDValue CSCC = combineSetCC(N, DCI))
14439       return CSCC;
14440     LLVM_FALLTHROUGH;
14441   case ISD::SELECT_CC:
14442     return DAGCombineTruncBoolExt(N, DCI);
14443   case ISD::SINT_TO_FP:
14444   case ISD::UINT_TO_FP:
14445     return combineFPToIntToFP(N, DCI);
14446   case ISD::VECTOR_SHUFFLE:
14447     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14448       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14449       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14450     }
14451     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14452   case ISD::STORE: {
14453 
14454     EVT Op1VT = N->getOperand(1).getValueType();
14455     unsigned Opcode = N->getOperand(1).getOpcode();
14456 
14457     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14458       SDValue Val= combineStoreFPToInt(N, DCI);
14459       if (Val)
14460         return Val;
14461     }
14462 
14463     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14464       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14465       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14466       if (Val)
14467         return Val;
14468     }
14469 
14470     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14471     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14472         N->getOperand(1).getNode()->hasOneUse() &&
14473         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14474          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14475 
14476       // STBRX can only handle simple types and it makes no sense to store less
14477       // two bytes in byte-reversed order.
14478       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14479       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14480         break;
14481 
14482       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14483       // Do an any-extend to 32-bits if this is a half-word input.
14484       if (BSwapOp.getValueType() == MVT::i16)
14485         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14486 
14487       // If the type of BSWAP operand is wider than stored memory width
14488       // it need to be shifted to the right side before STBRX.
14489       if (Op1VT.bitsGT(mVT)) {
14490         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14491         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14492                               DAG.getConstant(Shift, dl, MVT::i32));
14493         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14494         if (Op1VT == MVT::i64)
14495           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14496       }
14497 
14498       SDValue Ops[] = {
14499         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14500       };
14501       return
14502         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14503                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14504                                 cast<StoreSDNode>(N)->getMemOperand());
14505     }
14506 
14507     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14508     // So it can increase the chance of CSE constant construction.
14509     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14510         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14511       // Need to sign-extended to 64-bits to handle negative values.
14512       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14513       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14514                                     MemVT.getSizeInBits());
14515       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14516 
14517       // DAG.getTruncStore() can't be used here because it doesn't accept
14518       // the general (base + offset) addressing mode.
14519       // So we use UpdateNodeOperands and setTruncatingStore instead.
14520       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14521                              N->getOperand(3));
14522       cast<StoreSDNode>(N)->setTruncatingStore(true);
14523       return SDValue(N, 0);
14524     }
14525 
14526     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14527     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14528     if (Op1VT.isSimple()) {
14529       MVT StoreVT = Op1VT.getSimpleVT();
14530       if (Subtarget.needsSwapsForVSXMemOps() &&
14531           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14532            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14533         return expandVSXStoreForLE(N, DCI);
14534     }
14535     break;
14536   }
14537   case ISD::LOAD: {
14538     LoadSDNode *LD = cast<LoadSDNode>(N);
14539     EVT VT = LD->getValueType(0);
14540 
14541     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14542     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14543     if (VT.isSimple()) {
14544       MVT LoadVT = VT.getSimpleVT();
14545       if (Subtarget.needsSwapsForVSXMemOps() &&
14546           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14547            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14548         return expandVSXLoadForLE(N, DCI);
14549     }
14550 
14551     // We sometimes end up with a 64-bit integer load, from which we extract
14552     // two single-precision floating-point numbers. This happens with
14553     // std::complex<float>, and other similar structures, because of the way we
14554     // canonicalize structure copies. However, if we lack direct moves,
14555     // then the final bitcasts from the extracted integer values to the
14556     // floating-point numbers turn into store/load pairs. Even with direct moves,
14557     // just loading the two floating-point numbers is likely better.
14558     auto ReplaceTwoFloatLoad = [&]() {
14559       if (VT != MVT::i64)
14560         return false;
14561 
14562       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14563           LD->isVolatile())
14564         return false;
14565 
14566       //  We're looking for a sequence like this:
14567       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14568       //      t16: i64 = srl t13, Constant:i32<32>
14569       //    t17: i32 = truncate t16
14570       //  t18: f32 = bitcast t17
14571       //    t19: i32 = truncate t13
14572       //  t20: f32 = bitcast t19
14573 
14574       if (!LD->hasNUsesOfValue(2, 0))
14575         return false;
14576 
14577       auto UI = LD->use_begin();
14578       while (UI.getUse().getResNo() != 0) ++UI;
14579       SDNode *Trunc = *UI++;
14580       while (UI.getUse().getResNo() != 0) ++UI;
14581       SDNode *RightShift = *UI;
14582       if (Trunc->getOpcode() != ISD::TRUNCATE)
14583         std::swap(Trunc, RightShift);
14584 
14585       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14586           Trunc->getValueType(0) != MVT::i32 ||
14587           !Trunc->hasOneUse())
14588         return false;
14589       if (RightShift->getOpcode() != ISD::SRL ||
14590           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14591           RightShift->getConstantOperandVal(1) != 32 ||
14592           !RightShift->hasOneUse())
14593         return false;
14594 
14595       SDNode *Trunc2 = *RightShift->use_begin();
14596       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14597           Trunc2->getValueType(0) != MVT::i32 ||
14598           !Trunc2->hasOneUse())
14599         return false;
14600 
14601       SDNode *Bitcast = *Trunc->use_begin();
14602       SDNode *Bitcast2 = *Trunc2->use_begin();
14603 
14604       if (Bitcast->getOpcode() != ISD::BITCAST ||
14605           Bitcast->getValueType(0) != MVT::f32)
14606         return false;
14607       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14608           Bitcast2->getValueType(0) != MVT::f32)
14609         return false;
14610 
14611       if (Subtarget.isLittleEndian())
14612         std::swap(Bitcast, Bitcast2);
14613 
14614       // Bitcast has the second float (in memory-layout order) and Bitcast2
14615       // has the first one.
14616 
14617       SDValue BasePtr = LD->getBasePtr();
14618       if (LD->isIndexed()) {
14619         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14620                "Non-pre-inc AM on PPC?");
14621         BasePtr =
14622           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14623                       LD->getOffset());
14624       }
14625 
14626       auto MMOFlags =
14627           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14628       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14629                                       LD->getPointerInfo(), LD->getAlignment(),
14630                                       MMOFlags, LD->getAAInfo());
14631       SDValue AddPtr =
14632         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14633                     BasePtr, DAG.getIntPtrConstant(4, dl));
14634       SDValue FloatLoad2 = DAG.getLoad(
14635           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14636           LD->getPointerInfo().getWithOffset(4),
14637           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14638 
14639       if (LD->isIndexed()) {
14640         // Note that DAGCombine should re-form any pre-increment load(s) from
14641         // what is produced here if that makes sense.
14642         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14643       }
14644 
14645       DCI.CombineTo(Bitcast2, FloatLoad);
14646       DCI.CombineTo(Bitcast, FloatLoad2);
14647 
14648       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14649                                     SDValue(FloatLoad2.getNode(), 1));
14650       return true;
14651     };
14652 
14653     if (ReplaceTwoFloatLoad())
14654       return SDValue(N, 0);
14655 
14656     EVT MemVT = LD->getMemoryVT();
14657     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
14658     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
14659     if (LD->isUnindexed() && VT.isVector() &&
14660         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
14661           // P8 and later hardware should just use LOAD.
14662           !Subtarget.hasP8Vector() &&
14663           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
14664            VT == MVT::v4f32))) &&
14665         LD->getAlign() < ABIAlignment) {
14666       // This is a type-legal unaligned Altivec load.
14667       SDValue Chain = LD->getChain();
14668       SDValue Ptr = LD->getBasePtr();
14669       bool isLittleEndian = Subtarget.isLittleEndian();
14670 
14671       // This implements the loading of unaligned vectors as described in
14672       // the venerable Apple Velocity Engine overview. Specifically:
14673       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
14674       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
14675       //
14676       // The general idea is to expand a sequence of one or more unaligned
14677       // loads into an alignment-based permutation-control instruction (lvsl
14678       // or lvsr), a series of regular vector loads (which always truncate
14679       // their input address to an aligned address), and a series of
14680       // permutations.  The results of these permutations are the requested
14681       // loaded values.  The trick is that the last "extra" load is not taken
14682       // from the address you might suspect (sizeof(vector) bytes after the
14683       // last requested load), but rather sizeof(vector) - 1 bytes after the
14684       // last requested vector. The point of this is to avoid a page fault if
14685       // the base address happened to be aligned. This works because if the
14686       // base address is aligned, then adding less than a full vector length
14687       // will cause the last vector in the sequence to be (re)loaded.
14688       // Otherwise, the next vector will be fetched as you might suspect was
14689       // necessary.
14690 
14691       // We might be able to reuse the permutation generation from
14692       // a different base address offset from this one by an aligned amount.
14693       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14694       // optimization later.
14695       Intrinsic::ID Intr, IntrLD, IntrPerm;
14696       MVT PermCntlTy, PermTy, LDTy;
14697       Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14698                             : Intrinsic::ppc_altivec_lvsl;
14699       IntrLD = Intrinsic::ppc_altivec_lvx;
14700       IntrPerm = Intrinsic::ppc_altivec_vperm;
14701       PermCntlTy = MVT::v16i8;
14702       PermTy = MVT::v4i32;
14703       LDTy = MVT::v4i32;
14704 
14705       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14706 
14707       // Create the new MMO for the new base load. It is like the original MMO,
14708       // but represents an area in memory almost twice the vector size centered
14709       // on the original address. If the address is unaligned, we might start
14710       // reading up to (sizeof(vector)-1) bytes below the address of the
14711       // original unaligned load.
14712       MachineFunction &MF = DAG.getMachineFunction();
14713       MachineMemOperand *BaseMMO =
14714         MF.getMachineMemOperand(LD->getMemOperand(),
14715                                 -(long)MemVT.getStoreSize()+1,
14716                                 2*MemVT.getStoreSize()-1);
14717 
14718       // Create the new base load.
14719       SDValue LDXIntID =
14720           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14721       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14722       SDValue BaseLoad =
14723         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14724                                 DAG.getVTList(PermTy, MVT::Other),
14725                                 BaseLoadOps, LDTy, BaseMMO);
14726 
14727       // Note that the value of IncOffset (which is provided to the next
14728       // load's pointer info offset value, and thus used to calculate the
14729       // alignment), and the value of IncValue (which is actually used to
14730       // increment the pointer value) are different! This is because we
14731       // require the next load to appear to be aligned, even though it
14732       // is actually offset from the base pointer by a lesser amount.
14733       int IncOffset = VT.getSizeInBits() / 8;
14734       int IncValue = IncOffset;
14735 
14736       // Walk (both up and down) the chain looking for another load at the real
14737       // (aligned) offset (the alignment of the other load does not matter in
14738       // this case). If found, then do not use the offset reduction trick, as
14739       // that will prevent the loads from being later combined (as they would
14740       // otherwise be duplicates).
14741       if (!findConsecutiveLoad(LD, DAG))
14742         --IncValue;
14743 
14744       SDValue Increment =
14745           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14746       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14747 
14748       MachineMemOperand *ExtraMMO =
14749         MF.getMachineMemOperand(LD->getMemOperand(),
14750                                 1, 2*MemVT.getStoreSize()-1);
14751       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14752       SDValue ExtraLoad =
14753         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14754                                 DAG.getVTList(PermTy, MVT::Other),
14755                                 ExtraLoadOps, LDTy, ExtraMMO);
14756 
14757       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14758         BaseLoad.getValue(1), ExtraLoad.getValue(1));
14759 
14760       // Because vperm has a big-endian bias, we must reverse the order
14761       // of the input vectors and complement the permute control vector
14762       // when generating little endian code.  We have already handled the
14763       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14764       // and ExtraLoad here.
14765       SDValue Perm;
14766       if (isLittleEndian)
14767         Perm = BuildIntrinsicOp(IntrPerm,
14768                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14769       else
14770         Perm = BuildIntrinsicOp(IntrPerm,
14771                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14772 
14773       if (VT != PermTy)
14774         Perm = Subtarget.hasAltivec()
14775                    ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
14776                    : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
14777                                  DAG.getTargetConstant(1, dl, MVT::i64));
14778                                // second argument is 1 because this rounding
14779                                // is always exact.
14780 
14781       // The output of the permutation is our loaded result, the TokenFactor is
14782       // our new chain.
14783       DCI.CombineTo(N, Perm, TF);
14784       return SDValue(N, 0);
14785     }
14786     }
14787     break;
14788     case ISD::INTRINSIC_WO_CHAIN: {
14789       bool isLittleEndian = Subtarget.isLittleEndian();
14790       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14791       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14792                                            : Intrinsic::ppc_altivec_lvsl);
14793       if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
14794         SDValue Add = N->getOperand(1);
14795 
14796         int Bits = 4 /* 16 byte alignment */;
14797 
14798         if (DAG.MaskedValueIsZero(Add->getOperand(1),
14799                                   APInt::getAllOnesValue(Bits /* alignment */)
14800                                       .zext(Add.getScalarValueSizeInBits()))) {
14801           SDNode *BasePtr = Add->getOperand(0).getNode();
14802           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14803                                     UE = BasePtr->use_end();
14804                UI != UE; ++UI) {
14805             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14806                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
14807                     IID) {
14808               // We've found another LVSL/LVSR, and this address is an aligned
14809               // multiple of that one. The results will be the same, so use the
14810               // one we've just found instead.
14811 
14812               return SDValue(*UI, 0);
14813             }
14814           }
14815         }
14816 
14817         if (isa<ConstantSDNode>(Add->getOperand(1))) {
14818           SDNode *BasePtr = Add->getOperand(0).getNode();
14819           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14820                UE = BasePtr->use_end(); UI != UE; ++UI) {
14821             if (UI->getOpcode() == ISD::ADD &&
14822                 isa<ConstantSDNode>(UI->getOperand(1)) &&
14823                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
14824                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
14825                 (1ULL << Bits) == 0) {
14826               SDNode *OtherAdd = *UI;
14827               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
14828                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
14829                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14830                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
14831                   return SDValue(*VI, 0);
14832                 }
14833               }
14834             }
14835           }
14836         }
14837       }
14838 
14839       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
14840       // Expose the vabsduw/h/b opportunity for down stream
14841       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
14842           (IID == Intrinsic::ppc_altivec_vmaxsw ||
14843            IID == Intrinsic::ppc_altivec_vmaxsh ||
14844            IID == Intrinsic::ppc_altivec_vmaxsb)) {
14845         SDValue V1 = N->getOperand(1);
14846         SDValue V2 = N->getOperand(2);
14847         if ((V1.getSimpleValueType() == MVT::v4i32 ||
14848              V1.getSimpleValueType() == MVT::v8i16 ||
14849              V1.getSimpleValueType() == MVT::v16i8) &&
14850             V1.getSimpleValueType() == V2.getSimpleValueType()) {
14851           // (0-a, a)
14852           if (V1.getOpcode() == ISD::SUB &&
14853               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
14854               V1.getOperand(1) == V2) {
14855             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
14856           }
14857           // (a, 0-a)
14858           if (V2.getOpcode() == ISD::SUB &&
14859               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
14860               V2.getOperand(1) == V1) {
14861             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14862           }
14863           // (x-y, y-x)
14864           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
14865               V1.getOperand(0) == V2.getOperand(1) &&
14866               V1.getOperand(1) == V2.getOperand(0)) {
14867             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14868           }
14869         }
14870       }
14871     }
14872 
14873     break;
14874   case ISD::INTRINSIC_W_CHAIN:
14875     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14876     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14877     if (Subtarget.needsSwapsForVSXMemOps()) {
14878       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14879       default:
14880         break;
14881       case Intrinsic::ppc_vsx_lxvw4x:
14882       case Intrinsic::ppc_vsx_lxvd2x:
14883         return expandVSXLoadForLE(N, DCI);
14884       }
14885     }
14886     break;
14887   case ISD::INTRINSIC_VOID:
14888     // For little endian, VSX stores require generating xxswapd/stxvd2x.
14889     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14890     if (Subtarget.needsSwapsForVSXMemOps()) {
14891       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14892       default:
14893         break;
14894       case Intrinsic::ppc_vsx_stxvw4x:
14895       case Intrinsic::ppc_vsx_stxvd2x:
14896         return expandVSXStoreForLE(N, DCI);
14897       }
14898     }
14899     break;
14900   case ISD::BSWAP:
14901     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
14902     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
14903         N->getOperand(0).hasOneUse() &&
14904         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
14905          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
14906           N->getValueType(0) == MVT::i64))) {
14907       SDValue Load = N->getOperand(0);
14908       LoadSDNode *LD = cast<LoadSDNode>(Load);
14909       // Create the byte-swapping load.
14910       SDValue Ops[] = {
14911         LD->getChain(),    // Chain
14912         LD->getBasePtr(),  // Ptr
14913         DAG.getValueType(N->getValueType(0)) // VT
14914       };
14915       SDValue BSLoad =
14916         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
14917                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
14918                                               MVT::i64 : MVT::i32, MVT::Other),
14919                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
14920 
14921       // If this is an i16 load, insert the truncate.
14922       SDValue ResVal = BSLoad;
14923       if (N->getValueType(0) == MVT::i16)
14924         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
14925 
14926       // First, combine the bswap away.  This makes the value produced by the
14927       // load dead.
14928       DCI.CombineTo(N, ResVal);
14929 
14930       // Next, combine the load away, we give it a bogus result value but a real
14931       // chain result.  The result value is dead because the bswap is dead.
14932       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
14933 
14934       // Return N so it doesn't get rechecked!
14935       return SDValue(N, 0);
14936     }
14937     break;
14938   case PPCISD::VCMP:
14939     // If a VCMPo node already exists with exactly the same operands as this
14940     // node, use its result instead of this node (VCMPo computes both a CR6 and
14941     // a normal output).
14942     //
14943     if (!N->getOperand(0).hasOneUse() &&
14944         !N->getOperand(1).hasOneUse() &&
14945         !N->getOperand(2).hasOneUse()) {
14946 
14947       // Scan all of the users of the LHS, looking for VCMPo's that match.
14948       SDNode *VCMPoNode = nullptr;
14949 
14950       SDNode *LHSN = N->getOperand(0).getNode();
14951       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
14952            UI != E; ++UI)
14953         if (UI->getOpcode() == PPCISD::VCMPo &&
14954             UI->getOperand(1) == N->getOperand(1) &&
14955             UI->getOperand(2) == N->getOperand(2) &&
14956             UI->getOperand(0) == N->getOperand(0)) {
14957           VCMPoNode = *UI;
14958           break;
14959         }
14960 
14961       // If there is no VCMPo node, or if the flag value has a single use, don't
14962       // transform this.
14963       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
14964         break;
14965 
14966       // Look at the (necessarily single) use of the flag value.  If it has a
14967       // chain, this transformation is more complex.  Note that multiple things
14968       // could use the value result, which we should ignore.
14969       SDNode *FlagUser = nullptr;
14970       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
14971            FlagUser == nullptr; ++UI) {
14972         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
14973         SDNode *User = *UI;
14974         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
14975           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
14976             FlagUser = User;
14977             break;
14978           }
14979         }
14980       }
14981 
14982       // If the user is a MFOCRF instruction, we know this is safe.
14983       // Otherwise we give up for right now.
14984       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
14985         return SDValue(VCMPoNode, 0);
14986     }
14987     break;
14988   case ISD::BRCOND: {
14989     SDValue Cond = N->getOperand(1);
14990     SDValue Target = N->getOperand(2);
14991 
14992     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14993         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
14994           Intrinsic::loop_decrement) {
14995 
14996       // We now need to make the intrinsic dead (it cannot be instruction
14997       // selected).
14998       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
14999       assert(Cond.getNode()->hasOneUse() &&
15000              "Counter decrement has more than one use");
15001 
15002       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15003                          N->getOperand(0), Target);
15004     }
15005   }
15006   break;
15007   case ISD::BR_CC: {
15008     // If this is a branch on an altivec predicate comparison, lower this so
15009     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
15010     // lowering is done pre-legalize, because the legalizer lowers the predicate
15011     // compare down to code that is difficult to reassemble.
15012     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15013     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15014 
15015     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15016     // value. If so, pass-through the AND to get to the intrinsic.
15017     if (LHS.getOpcode() == ISD::AND &&
15018         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15019         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15020           Intrinsic::loop_decrement &&
15021         isa<ConstantSDNode>(LHS.getOperand(1)) &&
15022         !isNullConstant(LHS.getOperand(1)))
15023       LHS = LHS.getOperand(0);
15024 
15025     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15026         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15027           Intrinsic::loop_decrement &&
15028         isa<ConstantSDNode>(RHS)) {
15029       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
15030              "Counter decrement comparison is not EQ or NE");
15031 
15032       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15033       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15034                     (CC == ISD::SETNE && !Val);
15035 
15036       // We now need to make the intrinsic dead (it cannot be instruction
15037       // selected).
15038       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15039       assert(LHS.getNode()->hasOneUse() &&
15040              "Counter decrement has more than one use");
15041 
15042       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15043                          N->getOperand(0), N->getOperand(4));
15044     }
15045 
15046     int CompareOpc;
15047     bool isDot;
15048 
15049     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15050         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15051         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15052       assert(isDot && "Can't compare against a vector result!");
15053 
15054       // If this is a comparison against something other than 0/1, then we know
15055       // that the condition is never/always true.
15056       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15057       if (Val != 0 && Val != 1) {
15058         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
15059           return N->getOperand(0);
15060         // Always !=, turn it into an unconditional branch.
15061         return DAG.getNode(ISD::BR, dl, MVT::Other,
15062                            N->getOperand(0), N->getOperand(4));
15063       }
15064 
15065       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15066 
15067       // Create the PPCISD altivec 'dot' comparison node.
15068       SDValue Ops[] = {
15069         LHS.getOperand(2),  // LHS of compare
15070         LHS.getOperand(3),  // RHS of compare
15071         DAG.getConstant(CompareOpc, dl, MVT::i32)
15072       };
15073       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15074       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
15075 
15076       // Unpack the result based on how the target uses it.
15077       PPC::Predicate CompOpc;
15078       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15079       default:  // Can't happen, don't crash on invalid number though.
15080       case 0:   // Branch on the value of the EQ bit of CR6.
15081         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15082         break;
15083       case 1:   // Branch on the inverted value of the EQ bit of CR6.
15084         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15085         break;
15086       case 2:   // Branch on the value of the LT bit of CR6.
15087         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15088         break;
15089       case 3:   // Branch on the inverted value of the LT bit of CR6.
15090         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15091         break;
15092       }
15093 
15094       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15095                          DAG.getConstant(CompOpc, dl, MVT::i32),
15096                          DAG.getRegister(PPC::CR6, MVT::i32),
15097                          N->getOperand(4), CompNode.getValue(1));
15098     }
15099     break;
15100   }
15101   case ISD::BUILD_VECTOR:
15102     return DAGCombineBuildVector(N, DCI);
15103   case ISD::ABS:
15104     return combineABS(N, DCI);
15105   case ISD::VSELECT:
15106     return combineVSelect(N, DCI);
15107   }
15108 
15109   return SDValue();
15110 }
15111 
15112 SDValue
15113 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15114                                  SelectionDAG &DAG,
15115                                  SmallVectorImpl<SDNode *> &Created) const {
15116   // fold (sdiv X, pow2)
15117   EVT VT = N->getValueType(0);
15118   if (VT == MVT::i64 && !Subtarget.isPPC64())
15119     return SDValue();
15120   if ((VT != MVT::i32 && VT != MVT::i64) ||
15121       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15122     return SDValue();
15123 
15124   SDLoc DL(N);
15125   SDValue N0 = N->getOperand(0);
15126 
15127   bool IsNegPow2 = (-Divisor).isPowerOf2();
15128   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15129   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15130 
15131   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15132   Created.push_back(Op.getNode());
15133 
15134   if (IsNegPow2) {
15135     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15136     Created.push_back(Op.getNode());
15137   }
15138 
15139   return Op;
15140 }
15141 
15142 //===----------------------------------------------------------------------===//
15143 // Inline Assembly Support
15144 //===----------------------------------------------------------------------===//
15145 
15146 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15147                                                       KnownBits &Known,
15148                                                       const APInt &DemandedElts,
15149                                                       const SelectionDAG &DAG,
15150                                                       unsigned Depth) const {
15151   Known.resetAll();
15152   switch (Op.getOpcode()) {
15153   default: break;
15154   case PPCISD::LBRX: {
15155     // lhbrx is known to have the top bits cleared out.
15156     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15157       Known.Zero = 0xFFFF0000;
15158     break;
15159   }
15160   case ISD::INTRINSIC_WO_CHAIN: {
15161     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15162     default: break;
15163     case Intrinsic::ppc_altivec_vcmpbfp_p:
15164     case Intrinsic::ppc_altivec_vcmpeqfp_p:
15165     case Intrinsic::ppc_altivec_vcmpequb_p:
15166     case Intrinsic::ppc_altivec_vcmpequh_p:
15167     case Intrinsic::ppc_altivec_vcmpequw_p:
15168     case Intrinsic::ppc_altivec_vcmpequd_p:
15169     case Intrinsic::ppc_altivec_vcmpgefp_p:
15170     case Intrinsic::ppc_altivec_vcmpgtfp_p:
15171     case Intrinsic::ppc_altivec_vcmpgtsb_p:
15172     case Intrinsic::ppc_altivec_vcmpgtsh_p:
15173     case Intrinsic::ppc_altivec_vcmpgtsw_p:
15174     case Intrinsic::ppc_altivec_vcmpgtsd_p:
15175     case Intrinsic::ppc_altivec_vcmpgtub_p:
15176     case Intrinsic::ppc_altivec_vcmpgtuh_p:
15177     case Intrinsic::ppc_altivec_vcmpgtuw_p:
15178     case Intrinsic::ppc_altivec_vcmpgtud_p:
15179       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
15180       break;
15181     }
15182   }
15183   }
15184 }
15185 
15186 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15187   switch (Subtarget.getCPUDirective()) {
15188   default: break;
15189   case PPC::DIR_970:
15190   case PPC::DIR_PWR4:
15191   case PPC::DIR_PWR5:
15192   case PPC::DIR_PWR5X:
15193   case PPC::DIR_PWR6:
15194   case PPC::DIR_PWR6X:
15195   case PPC::DIR_PWR7:
15196   case PPC::DIR_PWR8:
15197   case PPC::DIR_PWR9:
15198   case PPC::DIR_PWR10:
15199   case PPC::DIR_PWR_FUTURE: {
15200     if (!ML)
15201       break;
15202 
15203     if (!DisableInnermostLoopAlign32) {
15204       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15205       // so that we can decrease cache misses and branch-prediction misses.
15206       // Actual alignment of the loop will depend on the hotness check and other
15207       // logic in alignBlocks.
15208       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15209         return Align(32);
15210     }
15211 
15212     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15213 
15214     // For small loops (between 5 and 8 instructions), align to a 32-byte
15215     // boundary so that the entire loop fits in one instruction-cache line.
15216     uint64_t LoopSize = 0;
15217     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15218       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15219         LoopSize += TII->getInstSizeInBytes(*J);
15220         if (LoopSize > 32)
15221           break;
15222       }
15223 
15224     if (LoopSize > 16 && LoopSize <= 32)
15225       return Align(32);
15226 
15227     break;
15228   }
15229   }
15230 
15231   return TargetLowering::getPrefLoopAlignment(ML);
15232 }
15233 
15234 /// getConstraintType - Given a constraint, return the type of
15235 /// constraint it is for this target.
15236 PPCTargetLowering::ConstraintType
15237 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15238   if (Constraint.size() == 1) {
15239     switch (Constraint[0]) {
15240     default: break;
15241     case 'b':
15242     case 'r':
15243     case 'f':
15244     case 'd':
15245     case 'v':
15246     case 'y':
15247       return C_RegisterClass;
15248     case 'Z':
15249       // FIXME: While Z does indicate a memory constraint, it specifically
15250       // indicates an r+r address (used in conjunction with the 'y' modifier
15251       // in the replacement string). Currently, we're forcing the base
15252       // register to be r0 in the asm printer (which is interpreted as zero)
15253       // and forming the complete address in the second register. This is
15254       // suboptimal.
15255       return C_Memory;
15256     }
15257   } else if (Constraint == "wc") { // individual CR bits.
15258     return C_RegisterClass;
15259   } else if (Constraint == "wa" || Constraint == "wd" ||
15260              Constraint == "wf" || Constraint == "ws" ||
15261              Constraint == "wi" || Constraint == "ww") {
15262     return C_RegisterClass; // VSX registers.
15263   }
15264   return TargetLowering::getConstraintType(Constraint);
15265 }
15266 
15267 /// Examine constraint type and operand type and determine a weight value.
15268 /// This object must already have been set up with the operand type
15269 /// and the current alternative constraint selected.
15270 TargetLowering::ConstraintWeight
15271 PPCTargetLowering::getSingleConstraintMatchWeight(
15272     AsmOperandInfo &info, const char *constraint) const {
15273   ConstraintWeight weight = CW_Invalid;
15274   Value *CallOperandVal = info.CallOperandVal;
15275     // If we don't have a value, we can't do a match,
15276     // but allow it at the lowest weight.
15277   if (!CallOperandVal)
15278     return CW_Default;
15279   Type *type = CallOperandVal->getType();
15280 
15281   // Look at the constraint type.
15282   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15283     return CW_Register; // an individual CR bit.
15284   else if ((StringRef(constraint) == "wa" ||
15285             StringRef(constraint) == "wd" ||
15286             StringRef(constraint) == "wf") &&
15287            type->isVectorTy())
15288     return CW_Register;
15289   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15290     return CW_Register; // just hold 64-bit integers data.
15291   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15292     return CW_Register;
15293   else if (StringRef(constraint) == "ww" && type->isFloatTy())
15294     return CW_Register;
15295 
15296   switch (*constraint) {
15297   default:
15298     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15299     break;
15300   case 'b':
15301     if (type->isIntegerTy())
15302       weight = CW_Register;
15303     break;
15304   case 'f':
15305     if (type->isFloatTy())
15306       weight = CW_Register;
15307     break;
15308   case 'd':
15309     if (type->isDoubleTy())
15310       weight = CW_Register;
15311     break;
15312   case 'v':
15313     if (type->isVectorTy())
15314       weight = CW_Register;
15315     break;
15316   case 'y':
15317     weight = CW_Register;
15318     break;
15319   case 'Z':
15320     weight = CW_Memory;
15321     break;
15322   }
15323   return weight;
15324 }
15325 
15326 std::pair<unsigned, const TargetRegisterClass *>
15327 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15328                                                 StringRef Constraint,
15329                                                 MVT VT) const {
15330   if (Constraint.size() == 1) {
15331     // GCC RS6000 Constraint Letters
15332     switch (Constraint[0]) {
15333     case 'b':   // R1-R31
15334       if (VT == MVT::i64 && Subtarget.isPPC64())
15335         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15336       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15337     case 'r':   // R0-R31
15338       if (VT == MVT::i64 && Subtarget.isPPC64())
15339         return std::make_pair(0U, &PPC::G8RCRegClass);
15340       return std::make_pair(0U, &PPC::GPRCRegClass);
15341     // 'd' and 'f' constraints are both defined to be "the floating point
15342     // registers", where one is for 32-bit and the other for 64-bit. We don't
15343     // really care overly much here so just give them all the same reg classes.
15344     case 'd':
15345     case 'f':
15346       if (Subtarget.hasSPE()) {
15347         if (VT == MVT::f32 || VT == MVT::i32)
15348           return std::make_pair(0U, &PPC::GPRCRegClass);
15349         if (VT == MVT::f64 || VT == MVT::i64)
15350           return std::make_pair(0U, &PPC::SPERCRegClass);
15351       } else {
15352         if (VT == MVT::f32 || VT == MVT::i32)
15353           return std::make_pair(0U, &PPC::F4RCRegClass);
15354         if (VT == MVT::f64 || VT == MVT::i64)
15355           return std::make_pair(0U, &PPC::F8RCRegClass);
15356       }
15357       break;
15358     case 'v':
15359       if (Subtarget.hasAltivec())
15360         return std::make_pair(0U, &PPC::VRRCRegClass);
15361       break;
15362     case 'y':   // crrc
15363       return std::make_pair(0U, &PPC::CRRCRegClass);
15364     }
15365   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15366     // An individual CR bit.
15367     return std::make_pair(0U, &PPC::CRBITRCRegClass);
15368   } else if ((Constraint == "wa" || Constraint == "wd" ||
15369              Constraint == "wf" || Constraint == "wi") &&
15370              Subtarget.hasVSX()) {
15371     return std::make_pair(0U, &PPC::VSRCRegClass);
15372   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15373     if (VT == MVT::f32 && Subtarget.hasP8Vector())
15374       return std::make_pair(0U, &PPC::VSSRCRegClass);
15375     else
15376       return std::make_pair(0U, &PPC::VSFRCRegClass);
15377   }
15378 
15379   // If we name a VSX register, we can't defer to the base class because it
15380   // will not recognize the correct register (their names will be VSL{0-31}
15381   // and V{0-31} so they won't match). So we match them here.
15382   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15383     int VSNum = atoi(Constraint.data() + 3);
15384     assert(VSNum >= 0 && VSNum <= 63 &&
15385            "Attempted to access a vsr out of range");
15386     if (VSNum < 32)
15387       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15388     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15389   }
15390   std::pair<unsigned, const TargetRegisterClass *> R =
15391       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15392 
15393   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15394   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15395   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15396   // register.
15397   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15398   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15399   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15400       PPC::GPRCRegClass.contains(R.first))
15401     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15402                             PPC::sub_32, &PPC::G8RCRegClass),
15403                           &PPC::G8RCRegClass);
15404 
15405   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15406   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15407     R.first = PPC::CR0;
15408     R.second = &PPC::CRRCRegClass;
15409   }
15410 
15411   return R;
15412 }
15413 
15414 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15415 /// vector.  If it is invalid, don't add anything to Ops.
15416 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15417                                                      std::string &Constraint,
15418                                                      std::vector<SDValue>&Ops,
15419                                                      SelectionDAG &DAG) const {
15420   SDValue Result;
15421 
15422   // Only support length 1 constraints.
15423   if (Constraint.length() > 1) return;
15424 
15425   char Letter = Constraint[0];
15426   switch (Letter) {
15427   default: break;
15428   case 'I':
15429   case 'J':
15430   case 'K':
15431   case 'L':
15432   case 'M':
15433   case 'N':
15434   case 'O':
15435   case 'P': {
15436     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15437     if (!CST) return; // Must be an immediate to match.
15438     SDLoc dl(Op);
15439     int64_t Value = CST->getSExtValue();
15440     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15441                          // numbers are printed as such.
15442     switch (Letter) {
15443     default: llvm_unreachable("Unknown constraint letter!");
15444     case 'I':  // "I" is a signed 16-bit constant.
15445       if (isInt<16>(Value))
15446         Result = DAG.getTargetConstant(Value, dl, TCVT);
15447       break;
15448     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15449       if (isShiftedUInt<16, 16>(Value))
15450         Result = DAG.getTargetConstant(Value, dl, TCVT);
15451       break;
15452     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15453       if (isShiftedInt<16, 16>(Value))
15454         Result = DAG.getTargetConstant(Value, dl, TCVT);
15455       break;
15456     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15457       if (isUInt<16>(Value))
15458         Result = DAG.getTargetConstant(Value, dl, TCVT);
15459       break;
15460     case 'M':  // "M" is a constant that is greater than 31.
15461       if (Value > 31)
15462         Result = DAG.getTargetConstant(Value, dl, TCVT);
15463       break;
15464     case 'N':  // "N" is a positive constant that is an exact power of two.
15465       if (Value > 0 && isPowerOf2_64(Value))
15466         Result = DAG.getTargetConstant(Value, dl, TCVT);
15467       break;
15468     case 'O':  // "O" is the constant zero.
15469       if (Value == 0)
15470         Result = DAG.getTargetConstant(Value, dl, TCVT);
15471       break;
15472     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15473       if (isInt<16>(-Value))
15474         Result = DAG.getTargetConstant(Value, dl, TCVT);
15475       break;
15476     }
15477     break;
15478   }
15479   }
15480 
15481   if (Result.getNode()) {
15482     Ops.push_back(Result);
15483     return;
15484   }
15485 
15486   // Handle standard constraint letters.
15487   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15488 }
15489 
15490 // isLegalAddressingMode - Return true if the addressing mode represented
15491 // by AM is legal for this target, for a load/store of the specified type.
15492 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15493                                               const AddrMode &AM, Type *Ty,
15494                                               unsigned AS,
15495                                               Instruction *I) const {
15496   // Vector type r+i form is supported since power9 as DQ form. We don't check
15497   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
15498   // imm form is preferred and the offset can be adjusted to use imm form later
15499   // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
15500   // max offset to check legal addressing mode, we should be a little aggressive
15501   // to contain other offsets for that LSRUse.
15502   if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
15503     return false;
15504 
15505   // PPC allows a sign-extended 16-bit immediate field.
15506   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15507     return false;
15508 
15509   // No global is ever allowed as a base.
15510   if (AM.BaseGV)
15511     return false;
15512 
15513   // PPC only support r+r,
15514   switch (AM.Scale) {
15515   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15516     break;
15517   case 1:
15518     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15519       return false;
15520     // Otherwise we have r+r or r+i.
15521     break;
15522   case 2:
15523     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15524       return false;
15525     // Allow 2*r as r+r.
15526     break;
15527   default:
15528     // No other scales are supported.
15529     return false;
15530   }
15531 
15532   return true;
15533 }
15534 
15535 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15536                                            SelectionDAG &DAG) const {
15537   MachineFunction &MF = DAG.getMachineFunction();
15538   MachineFrameInfo &MFI = MF.getFrameInfo();
15539   MFI.setReturnAddressIsTaken(true);
15540 
15541   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15542     return SDValue();
15543 
15544   SDLoc dl(Op);
15545   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15546 
15547   // Make sure the function does not optimize away the store of the RA to
15548   // the stack.
15549   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15550   FuncInfo->setLRStoreRequired();
15551   bool isPPC64 = Subtarget.isPPC64();
15552   auto PtrVT = getPointerTy(MF.getDataLayout());
15553 
15554   if (Depth > 0) {
15555     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15556     SDValue Offset =
15557         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15558                         isPPC64 ? MVT::i64 : MVT::i32);
15559     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15560                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15561                        MachinePointerInfo());
15562   }
15563 
15564   // Just load the return address off the stack.
15565   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15566   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15567                      MachinePointerInfo());
15568 }
15569 
15570 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15571                                           SelectionDAG &DAG) const {
15572   SDLoc dl(Op);
15573   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15574 
15575   MachineFunction &MF = DAG.getMachineFunction();
15576   MachineFrameInfo &MFI = MF.getFrameInfo();
15577   MFI.setFrameAddressIsTaken(true);
15578 
15579   EVT PtrVT = getPointerTy(MF.getDataLayout());
15580   bool isPPC64 = PtrVT == MVT::i64;
15581 
15582   // Naked functions never have a frame pointer, and so we use r1. For all
15583   // other functions, this decision must be delayed until during PEI.
15584   unsigned FrameReg;
15585   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15586     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15587   else
15588     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15589 
15590   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15591                                          PtrVT);
15592   while (Depth--)
15593     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15594                             FrameAddr, MachinePointerInfo());
15595   return FrameAddr;
15596 }
15597 
15598 // FIXME? Maybe this could be a TableGen attribute on some registers and
15599 // this table could be generated automatically from RegInfo.
15600 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15601                                               const MachineFunction &MF) const {
15602   bool isPPC64 = Subtarget.isPPC64();
15603 
15604   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15605   if (!is64Bit && VT != LLT::scalar(32))
15606     report_fatal_error("Invalid register global variable type");
15607 
15608   Register Reg = StringSwitch<Register>(RegName)
15609                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15610                      .Case("r2", isPPC64 ? Register() : PPC::R2)
15611                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15612                      .Default(Register());
15613 
15614   if (Reg)
15615     return Reg;
15616   report_fatal_error("Invalid register name global variable");
15617 }
15618 
15619 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15620   // 32-bit SVR4 ABI access everything as got-indirect.
15621   if (Subtarget.is32BitELFABI())
15622     return true;
15623 
15624   // AIX accesses everything indirectly through the TOC, which is similar to
15625   // the GOT.
15626   if (Subtarget.isAIXABI())
15627     return true;
15628 
15629   CodeModel::Model CModel = getTargetMachine().getCodeModel();
15630   // If it is small or large code model, module locals are accessed
15631   // indirectly by loading their address from .toc/.got.
15632   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15633     return true;
15634 
15635   // JumpTable and BlockAddress are accessed as got-indirect.
15636   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15637     return true;
15638 
15639   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15640     return Subtarget.isGVIndirectSymbol(G->getGlobal());
15641 
15642   return false;
15643 }
15644 
15645 bool
15646 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15647   // The PowerPC target isn't yet aware of offsets.
15648   return false;
15649 }
15650 
15651 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15652                                            const CallInst &I,
15653                                            MachineFunction &MF,
15654                                            unsigned Intrinsic) const {
15655   switch (Intrinsic) {
15656   case Intrinsic::ppc_altivec_lvx:
15657   case Intrinsic::ppc_altivec_lvxl:
15658   case Intrinsic::ppc_altivec_lvebx:
15659   case Intrinsic::ppc_altivec_lvehx:
15660   case Intrinsic::ppc_altivec_lvewx:
15661   case Intrinsic::ppc_vsx_lxvd2x:
15662   case Intrinsic::ppc_vsx_lxvw4x: {
15663     EVT VT;
15664     switch (Intrinsic) {
15665     case Intrinsic::ppc_altivec_lvebx:
15666       VT = MVT::i8;
15667       break;
15668     case Intrinsic::ppc_altivec_lvehx:
15669       VT = MVT::i16;
15670       break;
15671     case Intrinsic::ppc_altivec_lvewx:
15672       VT = MVT::i32;
15673       break;
15674     case Intrinsic::ppc_vsx_lxvd2x:
15675       VT = MVT::v2f64;
15676       break;
15677     default:
15678       VT = MVT::v4i32;
15679       break;
15680     }
15681 
15682     Info.opc = ISD::INTRINSIC_W_CHAIN;
15683     Info.memVT = VT;
15684     Info.ptrVal = I.getArgOperand(0);
15685     Info.offset = -VT.getStoreSize()+1;
15686     Info.size = 2*VT.getStoreSize()-1;
15687     Info.align = Align(1);
15688     Info.flags = MachineMemOperand::MOLoad;
15689     return true;
15690   }
15691   case Intrinsic::ppc_altivec_stvx:
15692   case Intrinsic::ppc_altivec_stvxl:
15693   case Intrinsic::ppc_altivec_stvebx:
15694   case Intrinsic::ppc_altivec_stvehx:
15695   case Intrinsic::ppc_altivec_stvewx:
15696   case Intrinsic::ppc_vsx_stxvd2x:
15697   case Intrinsic::ppc_vsx_stxvw4x: {
15698     EVT VT;
15699     switch (Intrinsic) {
15700     case Intrinsic::ppc_altivec_stvebx:
15701       VT = MVT::i8;
15702       break;
15703     case Intrinsic::ppc_altivec_stvehx:
15704       VT = MVT::i16;
15705       break;
15706     case Intrinsic::ppc_altivec_stvewx:
15707       VT = MVT::i32;
15708       break;
15709     case Intrinsic::ppc_vsx_stxvd2x:
15710       VT = MVT::v2f64;
15711       break;
15712     default:
15713       VT = MVT::v4i32;
15714       break;
15715     }
15716 
15717     Info.opc = ISD::INTRINSIC_VOID;
15718     Info.memVT = VT;
15719     Info.ptrVal = I.getArgOperand(1);
15720     Info.offset = -VT.getStoreSize()+1;
15721     Info.size = 2*VT.getStoreSize()-1;
15722     Info.align = Align(1);
15723     Info.flags = MachineMemOperand::MOStore;
15724     return true;
15725   }
15726   default:
15727     break;
15728   }
15729 
15730   return false;
15731 }
15732 
15733 /// It returns EVT::Other if the type should be determined using generic
15734 /// target-independent logic.
15735 EVT PPCTargetLowering::getOptimalMemOpType(
15736     const MemOp &Op, const AttributeList &FuncAttributes) const {
15737   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15738     // We should use Altivec/VSX loads and stores when available. For unaligned
15739     // addresses, unaligned VSX loads are only fast starting with the P8.
15740     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
15741         (Op.isAligned(Align(16)) ||
15742          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15743       return MVT::v4i32;
15744   }
15745 
15746   if (Subtarget.isPPC64()) {
15747     return MVT::i64;
15748   }
15749 
15750   return MVT::i32;
15751 }
15752 
15753 /// Returns true if it is beneficial to convert a load of a constant
15754 /// to just the constant itself.
15755 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15756                                                           Type *Ty) const {
15757   assert(Ty->isIntegerTy());
15758 
15759   unsigned BitSize = Ty->getPrimitiveSizeInBits();
15760   return !(BitSize == 0 || BitSize > 64);
15761 }
15762 
15763 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15764   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15765     return false;
15766   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
15767   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
15768   return NumBits1 == 64 && NumBits2 == 32;
15769 }
15770 
15771 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15772   if (!VT1.isInteger() || !VT2.isInteger())
15773     return false;
15774   unsigned NumBits1 = VT1.getSizeInBits();
15775   unsigned NumBits2 = VT2.getSizeInBits();
15776   return NumBits1 == 64 && NumBits2 == 32;
15777 }
15778 
15779 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15780   // Generally speaking, zexts are not free, but they are free when they can be
15781   // folded with other operations.
15782   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
15783     EVT MemVT = LD->getMemoryVT();
15784     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
15785          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
15786         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
15787          LD->getExtensionType() == ISD::ZEXTLOAD))
15788       return true;
15789   }
15790 
15791   // FIXME: Add other cases...
15792   //  - 32-bit shifts with a zext to i64
15793   //  - zext after ctlz, bswap, etc.
15794   //  - zext after and by a constant mask
15795 
15796   return TargetLowering::isZExtFree(Val, VT2);
15797 }
15798 
15799 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
15800   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
15801          "invalid fpext types");
15802   // Extending to float128 is not free.
15803   if (DestVT == MVT::f128)
15804     return false;
15805   return true;
15806 }
15807 
15808 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15809   return isInt<16>(Imm) || isUInt<16>(Imm);
15810 }
15811 
15812 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15813   return isInt<16>(Imm) || isUInt<16>(Imm);
15814 }
15815 
15816 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
15817                                                        unsigned,
15818                                                        unsigned,
15819                                                        MachineMemOperand::Flags,
15820                                                        bool *Fast) const {
15821   if (DisablePPCUnaligned)
15822     return false;
15823 
15824   // PowerPC supports unaligned memory access for simple non-vector types.
15825   // Although accessing unaligned addresses is not as efficient as accessing
15826   // aligned addresses, it is generally more efficient than manual expansion,
15827   // and generally only traps for software emulation when crossing page
15828   // boundaries.
15829 
15830   if (!VT.isSimple())
15831     return false;
15832 
15833   if (VT.isFloatingPoint() && !VT.isVector() &&
15834       !Subtarget.allowsUnalignedFPAccess())
15835     return false;
15836 
15837   if (VT.getSimpleVT().isVector()) {
15838     if (Subtarget.hasVSX()) {
15839       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
15840           VT != MVT::v4f32 && VT != MVT::v4i32)
15841         return false;
15842     } else {
15843       return false;
15844     }
15845   }
15846 
15847   if (VT == MVT::ppcf128)
15848     return false;
15849 
15850   if (Fast)
15851     *Fast = true;
15852 
15853   return true;
15854 }
15855 
15856 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15857                                                    EVT VT) const {
15858   return isFMAFasterThanFMulAndFAdd(
15859       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
15860 }
15861 
15862 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
15863                                                    Type *Ty) const {
15864   switch (Ty->getScalarType()->getTypeID()) {
15865   case Type::FloatTyID:
15866   case Type::DoubleTyID:
15867     return true;
15868   case Type::FP128TyID:
15869     return Subtarget.hasP9Vector();
15870   default:
15871     return false;
15872   }
15873 }
15874 
15875 // FIXME: add more patterns which are not profitable to hoist.
15876 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
15877   if (!I->hasOneUse())
15878     return true;
15879 
15880   Instruction *User = I->user_back();
15881   assert(User && "A single use instruction with no uses.");
15882 
15883   switch (I->getOpcode()) {
15884   case Instruction::FMul: {
15885     // Don't break FMA, PowerPC prefers FMA.
15886     if (User->getOpcode() != Instruction::FSub &&
15887         User->getOpcode() != Instruction::FAdd)
15888       return true;
15889 
15890     const TargetOptions &Options = getTargetMachine().Options;
15891     const Function *F = I->getFunction();
15892     const DataLayout &DL = F->getParent()->getDataLayout();
15893     Type *Ty = User->getOperand(0)->getType();
15894 
15895     return !(
15896         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
15897         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
15898         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
15899   }
15900   case Instruction::Load: {
15901     // Don't break "store (load float*)" pattern, this pattern will be combined
15902     // to "store (load int32)" in later InstCombine pass. See function
15903     // combineLoadToOperationType. On PowerPC, loading a float point takes more
15904     // cycles than loading a 32 bit integer.
15905     LoadInst *LI = cast<LoadInst>(I);
15906     // For the loads that combineLoadToOperationType does nothing, like
15907     // ordered load, it should be profitable to hoist them.
15908     // For swifterror load, it can only be used for pointer to pointer type, so
15909     // later type check should get rid of this case.
15910     if (!LI->isUnordered())
15911       return true;
15912 
15913     if (User->getOpcode() != Instruction::Store)
15914       return true;
15915 
15916     if (I->getType()->getTypeID() != Type::FloatTyID)
15917       return true;
15918 
15919     return false;
15920   }
15921   default:
15922     return true;
15923   }
15924   return true;
15925 }
15926 
15927 const MCPhysReg *
15928 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
15929   // LR is a callee-save register, but we must treat it as clobbered by any call
15930   // site. Hence we include LR in the scratch registers, which are in turn added
15931   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
15932   // to CTR, which is used by any indirect call.
15933   static const MCPhysReg ScratchRegs[] = {
15934     PPC::X12, PPC::LR8, PPC::CTR8, 0
15935   };
15936 
15937   return ScratchRegs;
15938 }
15939 
15940 Register PPCTargetLowering::getExceptionPointerRegister(
15941     const Constant *PersonalityFn) const {
15942   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
15943 }
15944 
15945 Register PPCTargetLowering::getExceptionSelectorRegister(
15946     const Constant *PersonalityFn) const {
15947   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
15948 }
15949 
15950 bool
15951 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
15952                      EVT VT , unsigned DefinedValues) const {
15953   if (VT == MVT::v2i64)
15954     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
15955 
15956   if (Subtarget.hasVSX())
15957     return true;
15958 
15959   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
15960 }
15961 
15962 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
15963   if (DisableILPPref || Subtarget.enableMachineScheduler())
15964     return TargetLowering::getSchedulingPreference(N);
15965 
15966   return Sched::ILP;
15967 }
15968 
15969 // Create a fast isel object.
15970 FastISel *
15971 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
15972                                   const TargetLibraryInfo *LibInfo) const {
15973   return PPC::createFastISel(FuncInfo, LibInfo);
15974 }
15975 
15976 // 'Inverted' means the FMA opcode after negating one multiplicand.
15977 // For example, (fma -a b c) = (fnmsub a b c)
15978 static unsigned invertFMAOpcode(unsigned Opc) {
15979   switch (Opc) {
15980   default:
15981     llvm_unreachable("Invalid FMA opcode for PowerPC!");
15982   case ISD::FMA:
15983     return PPCISD::FNMSUB;
15984   case PPCISD::FNMSUB:
15985     return ISD::FMA;
15986   }
15987 }
15988 
15989 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
15990                                                 bool LegalOps, bool OptForSize,
15991                                                 NegatibleCost &Cost,
15992                                                 unsigned Depth) const {
15993   if (Depth > SelectionDAG::MaxRecursionDepth)
15994     return SDValue();
15995 
15996   unsigned Opc = Op.getOpcode();
15997   EVT VT = Op.getValueType();
15998   SDNodeFlags Flags = Op.getNode()->getFlags();
15999 
16000   switch (Opc) {
16001   case PPCISD::FNMSUB:
16002     if (!Op.hasOneUse() || !isTypeLegal(VT))
16003       break;
16004 
16005     const TargetOptions &Options = getTargetMachine().Options;
16006     SDValue N0 = Op.getOperand(0);
16007     SDValue N1 = Op.getOperand(1);
16008     SDValue N2 = Op.getOperand(2);
16009     SDLoc Loc(Op);
16010 
16011     NegatibleCost N2Cost = NegatibleCost::Expensive;
16012     SDValue NegN2 =
16013         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16014 
16015     if (!NegN2)
16016       return SDValue();
16017 
16018     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16019     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16020     // These transformations may change sign of zeroes. For example,
16021     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16022     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16023       // Try and choose the cheaper one to negate.
16024       NegatibleCost N0Cost = NegatibleCost::Expensive;
16025       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16026                                            N0Cost, Depth + 1);
16027 
16028       NegatibleCost N1Cost = NegatibleCost::Expensive;
16029       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16030                                            N1Cost, Depth + 1);
16031 
16032       if (NegN0 && N0Cost <= N1Cost) {
16033         Cost = std::min(N0Cost, N2Cost);
16034         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16035       } else if (NegN1) {
16036         Cost = std::min(N1Cost, N2Cost);
16037         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16038       }
16039     }
16040 
16041     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16042     if (isOperationLegal(ISD::FMA, VT)) {
16043       Cost = N2Cost;
16044       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16045     }
16046 
16047     break;
16048   }
16049 
16050   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16051                                               Cost, Depth);
16052 }
16053 
16054 // Override to enable LOAD_STACK_GUARD lowering on Linux.
16055 bool PPCTargetLowering::useLoadStackGuardNode() const {
16056   if (!Subtarget.isTargetLinux())
16057     return TargetLowering::useLoadStackGuardNode();
16058   return true;
16059 }
16060 
16061 // Override to disable global variable loading on Linux.
16062 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16063   if (!Subtarget.isTargetLinux())
16064     return TargetLowering::insertSSPDeclarations(M);
16065 }
16066 
16067 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16068                                      bool ForCodeSize) const {
16069   if (!VT.isSimple() || !Subtarget.hasVSX())
16070     return false;
16071 
16072   switch(VT.getSimpleVT().SimpleTy) {
16073   default:
16074     // For FP types that are currently not supported by PPC backend, return
16075     // false. Examples: f16, f80.
16076     return false;
16077   case MVT::f32:
16078   case MVT::f64:
16079     if (Subtarget.hasPrefixInstrs()) {
16080       // With prefixed instructions, we can materialize anything that can be
16081       // represented with a 32-bit immediate, not just positive zero.
16082       APFloat APFloatOfImm = Imm;
16083       return convertToNonDenormSingle(APFloatOfImm);
16084     }
16085     LLVM_FALLTHROUGH;
16086   case MVT::ppcf128:
16087     return Imm.isPosZero();
16088   }
16089 }
16090 
16091 // For vector shift operation op, fold
16092 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
16093 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16094                                   SelectionDAG &DAG) {
16095   SDValue N0 = N->getOperand(0);
16096   SDValue N1 = N->getOperand(1);
16097   EVT VT = N0.getValueType();
16098   unsigned OpSizeInBits = VT.getScalarSizeInBits();
16099   unsigned Opcode = N->getOpcode();
16100   unsigned TargetOpcode;
16101 
16102   switch (Opcode) {
16103   default:
16104     llvm_unreachable("Unexpected shift operation");
16105   case ISD::SHL:
16106     TargetOpcode = PPCISD::SHL;
16107     break;
16108   case ISD::SRL:
16109     TargetOpcode = PPCISD::SRL;
16110     break;
16111   case ISD::SRA:
16112     TargetOpcode = PPCISD::SRA;
16113     break;
16114   }
16115 
16116   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16117       N1->getOpcode() == ISD::AND)
16118     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16119       if (Mask->getZExtValue() == OpSizeInBits - 1)
16120         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16121 
16122   return SDValue();
16123 }
16124 
16125 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16126   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16127     return Value;
16128 
16129   SDValue N0 = N->getOperand(0);
16130   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16131   if (!Subtarget.isISA3_0() ||
16132       N0.getOpcode() != ISD::SIGN_EXTEND ||
16133       N0.getOperand(0).getValueType() != MVT::i32 ||
16134       CN1 == nullptr || N->getValueType(0) != MVT::i64)
16135     return SDValue();
16136 
16137   // We can't save an operation here if the value is already extended, and
16138   // the existing shift is easier to combine.
16139   SDValue ExtsSrc = N0.getOperand(0);
16140   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16141       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16142     return SDValue();
16143 
16144   SDLoc DL(N0);
16145   SDValue ShiftBy = SDValue(CN1, 0);
16146   // We want the shift amount to be i32 on the extswli, but the shift could
16147   // have an i64.
16148   if (ShiftBy.getValueType() == MVT::i64)
16149     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16150 
16151   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16152                          ShiftBy);
16153 }
16154 
16155 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16156   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16157     return Value;
16158 
16159   return SDValue();
16160 }
16161 
16162 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16163   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16164     return Value;
16165 
16166   return SDValue();
16167 }
16168 
16169 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16170 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16171 // When C is zero, the equation (addi Z, -C) can be simplified to Z
16172 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
16173 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16174                                  const PPCSubtarget &Subtarget) {
16175   if (!Subtarget.isPPC64())
16176     return SDValue();
16177 
16178   SDValue LHS = N->getOperand(0);
16179   SDValue RHS = N->getOperand(1);
16180 
16181   auto isZextOfCompareWithConstant = [](SDValue Op) {
16182     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16183         Op.getValueType() != MVT::i64)
16184       return false;
16185 
16186     SDValue Cmp = Op.getOperand(0);
16187     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16188         Cmp.getOperand(0).getValueType() != MVT::i64)
16189       return false;
16190 
16191     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16192       int64_t NegConstant = 0 - Constant->getSExtValue();
16193       // Due to the limitations of the addi instruction,
16194       // -C is required to be [-32768, 32767].
16195       return isInt<16>(NegConstant);
16196     }
16197 
16198     return false;
16199   };
16200 
16201   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16202   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16203 
16204   // If there is a pattern, canonicalize a zext operand to the RHS.
16205   if (LHSHasPattern && !RHSHasPattern)
16206     std::swap(LHS, RHS);
16207   else if (!LHSHasPattern && !RHSHasPattern)
16208     return SDValue();
16209 
16210   SDLoc DL(N);
16211   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16212   SDValue Cmp = RHS.getOperand(0);
16213   SDValue Z = Cmp.getOperand(0);
16214   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16215 
16216   assert(Constant && "Constant Should not be a null pointer.");
16217   int64_t NegConstant = 0 - Constant->getSExtValue();
16218 
16219   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16220   default: break;
16221   case ISD::SETNE: {
16222     //                                 when C == 0
16223     //                             --> addze X, (addic Z, -1).carry
16224     //                            /
16225     // add X, (zext(setne Z, C))--
16226     //                            \    when -32768 <= -C <= 32767 && C != 0
16227     //                             --> addze X, (addic (addi Z, -C), -1).carry
16228     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16229                               DAG.getConstant(NegConstant, DL, MVT::i64));
16230     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16231     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16232                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16233     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16234                        SDValue(Addc.getNode(), 1));
16235     }
16236   case ISD::SETEQ: {
16237     //                                 when C == 0
16238     //                             --> addze X, (subfic Z, 0).carry
16239     //                            /
16240     // add X, (zext(sete  Z, C))--
16241     //                            \    when -32768 <= -C <= 32767 && C != 0
16242     //                             --> addze X, (subfic (addi Z, -C), 0).carry
16243     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16244                               DAG.getConstant(NegConstant, DL, MVT::i64));
16245     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16246     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16247                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16248     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16249                        SDValue(Subc.getNode(), 1));
16250     }
16251   }
16252 
16253   return SDValue();
16254 }
16255 
16256 // Transform
16257 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16258 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16259 // In this case both C1 and C2 must be known constants.
16260 // C1+C2 must fit into a 34 bit signed integer.
16261 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16262                                           const PPCSubtarget &Subtarget) {
16263   if (!Subtarget.isUsingPCRelativeCalls())
16264     return SDValue();
16265 
16266   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16267   // If we find that node try to cast the Global Address and the Constant.
16268   SDValue LHS = N->getOperand(0);
16269   SDValue RHS = N->getOperand(1);
16270 
16271   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16272     std::swap(LHS, RHS);
16273 
16274   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16275     return SDValue();
16276 
16277   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16278   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16279   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16280 
16281   // Check that both casts succeeded.
16282   if (!GSDN || !ConstNode)
16283     return SDValue();
16284 
16285   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16286   SDLoc DL(GSDN);
16287 
16288   // The signed int offset needs to fit in 34 bits.
16289   if (!isInt<34>(NewOffset))
16290     return SDValue();
16291 
16292   // The new global address is a copy of the old global address except
16293   // that it has the updated Offset.
16294   SDValue GA =
16295       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16296                                  NewOffset, GSDN->getTargetFlags());
16297   SDValue MatPCRel =
16298       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16299   return MatPCRel;
16300 }
16301 
16302 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16303   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16304     return Value;
16305 
16306   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16307     return Value;
16308 
16309   return SDValue();
16310 }
16311 
16312 // Detect TRUNCATE operations on bitcasts of float128 values.
16313 // What we are looking for here is the situtation where we extract a subset
16314 // of bits from a 128 bit float.
16315 // This can be of two forms:
16316 // 1) BITCAST of f128 feeding TRUNCATE
16317 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16318 // The reason this is required is because we do not have a legal i128 type
16319 // and so we want to prevent having to store the f128 and then reload part
16320 // of it.
16321 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16322                                            DAGCombinerInfo &DCI) const {
16323   // If we are using CRBits then try that first.
16324   if (Subtarget.useCRBits()) {
16325     // Check if CRBits did anything and return that if it did.
16326     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16327       return CRTruncValue;
16328   }
16329 
16330   SDLoc dl(N);
16331   SDValue Op0 = N->getOperand(0);
16332 
16333   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16334   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16335     EVT VT = N->getValueType(0);
16336     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16337       return SDValue();
16338     SDValue Sub = Op0.getOperand(0);
16339     if (Sub.getOpcode() == ISD::SUB) {
16340       SDValue SubOp0 = Sub.getOperand(0);
16341       SDValue SubOp1 = Sub.getOperand(1);
16342       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16343           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16344         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16345                                SubOp1.getOperand(0),
16346                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16347       }
16348     }
16349   }
16350 
16351   // Looking for a truncate of i128 to i64.
16352   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16353     return SDValue();
16354 
16355   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16356 
16357   // SRL feeding TRUNCATE.
16358   if (Op0.getOpcode() == ISD::SRL) {
16359     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16360     // The right shift has to be by 64 bits.
16361     if (!ConstNode || ConstNode->getZExtValue() != 64)
16362       return SDValue();
16363 
16364     // Switch the element number to extract.
16365     EltToExtract = EltToExtract ? 0 : 1;
16366     // Update Op0 past the SRL.
16367     Op0 = Op0.getOperand(0);
16368   }
16369 
16370   // BITCAST feeding a TRUNCATE possibly via SRL.
16371   if (Op0.getOpcode() == ISD::BITCAST &&
16372       Op0.getValueType() == MVT::i128 &&
16373       Op0.getOperand(0).getValueType() == MVT::f128) {
16374     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16375     return DCI.DAG.getNode(
16376         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16377         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16378   }
16379   return SDValue();
16380 }
16381 
16382 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16383   SelectionDAG &DAG = DCI.DAG;
16384 
16385   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16386   if (!ConstOpOrElement)
16387     return SDValue();
16388 
16389   // An imul is usually smaller than the alternative sequence for legal type.
16390   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16391       isOperationLegal(ISD::MUL, N->getValueType(0)))
16392     return SDValue();
16393 
16394   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16395     switch (this->Subtarget.getCPUDirective()) {
16396     default:
16397       // TODO: enhance the condition for subtarget before pwr8
16398       return false;
16399     case PPC::DIR_PWR8:
16400       //  type        mul     add    shl
16401       // scalar        4       1      1
16402       // vector        7       2      2
16403       return true;
16404     case PPC::DIR_PWR9:
16405     case PPC::DIR_PWR10:
16406     case PPC::DIR_PWR_FUTURE:
16407       //  type        mul     add    shl
16408       // scalar        5       2      2
16409       // vector        7       2      2
16410 
16411       // The cycle RATIO of related operations are showed as a table above.
16412       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16413       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16414       // are 4, it is always profitable; but for 3 instrs patterns
16415       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16416       // So we should only do it for vector type.
16417       return IsAddOne && IsNeg ? VT.isVector() : true;
16418     }
16419   };
16420 
16421   EVT VT = N->getValueType(0);
16422   SDLoc DL(N);
16423 
16424   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16425   bool IsNeg = MulAmt.isNegative();
16426   APInt MulAmtAbs = MulAmt.abs();
16427 
16428   if ((MulAmtAbs - 1).isPowerOf2()) {
16429     // (mul x, 2^N + 1) => (add (shl x, N), x)
16430     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16431 
16432     if (!IsProfitable(IsNeg, true, VT))
16433       return SDValue();
16434 
16435     SDValue Op0 = N->getOperand(0);
16436     SDValue Op1 =
16437         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16438                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16439     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16440 
16441     if (!IsNeg)
16442       return Res;
16443 
16444     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16445   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16446     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16447     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16448 
16449     if (!IsProfitable(IsNeg, false, VT))
16450       return SDValue();
16451 
16452     SDValue Op0 = N->getOperand(0);
16453     SDValue Op1 =
16454         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16455                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16456 
16457     if (!IsNeg)
16458       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16459     else
16460       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16461 
16462   } else {
16463     return SDValue();
16464   }
16465 }
16466 
16467 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16468 // in combiner since we need to check SD flags and other subtarget features.
16469 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16470                                           DAGCombinerInfo &DCI) const {
16471   SDValue N0 = N->getOperand(0);
16472   SDValue N1 = N->getOperand(1);
16473   SDValue N2 = N->getOperand(2);
16474   SDNodeFlags Flags = N->getFlags();
16475   EVT VT = N->getValueType(0);
16476   SelectionDAG &DAG = DCI.DAG;
16477   const TargetOptions &Options = getTargetMachine().Options;
16478   unsigned Opc = N->getOpcode();
16479   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16480   bool LegalOps = !DCI.isBeforeLegalizeOps();
16481   SDLoc Loc(N);
16482 
16483   if (!isOperationLegal(ISD::FMA, VT))
16484     return SDValue();
16485 
16486   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16487   // since (fnmsub a b c)=-0 while c-ab=+0.
16488   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16489     return SDValue();
16490 
16491   // (fma (fneg a) b c) => (fnmsub a b c)
16492   // (fnmsub (fneg a) b c) => (fma a b c)
16493   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16494     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16495 
16496   // (fma a (fneg b) c) => (fnmsub a b c)
16497   // (fnmsub a (fneg b) c) => (fma a b c)
16498   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16499     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16500 
16501   return SDValue();
16502 }
16503 
16504 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16505   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16506   if (!Subtarget.is64BitELFABI())
16507     return false;
16508 
16509   // If not a tail call then no need to proceed.
16510   if (!CI->isTailCall())
16511     return false;
16512 
16513   // If sibling calls have been disabled and tail-calls aren't guaranteed
16514   // there is no reason to duplicate.
16515   auto &TM = getTargetMachine();
16516   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16517     return false;
16518 
16519   // Can't tail call a function called indirectly, or if it has variadic args.
16520   const Function *Callee = CI->getCalledFunction();
16521   if (!Callee || Callee->isVarArg())
16522     return false;
16523 
16524   // Make sure the callee and caller calling conventions are eligible for tco.
16525   const Function *Caller = CI->getParent()->getParent();
16526   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
16527                                            CI->getCallingConv()))
16528       return false;
16529 
16530   // If the function is local then we have a good chance at tail-calling it
16531   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
16532 }
16533 
16534 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
16535   if (!Subtarget.hasVSX())
16536     return false;
16537   if (Subtarget.hasP9Vector() && VT == MVT::f128)
16538     return true;
16539   return VT == MVT::f32 || VT == MVT::f64 ||
16540     VT == MVT::v4f32 || VT == MVT::v2f64;
16541 }
16542 
16543 bool PPCTargetLowering::
16544 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
16545   const Value *Mask = AndI.getOperand(1);
16546   // If the mask is suitable for andi. or andis. we should sink the and.
16547   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
16548     // Can't handle constants wider than 64-bits.
16549     if (CI->getBitWidth() > 64)
16550       return false;
16551     int64_t ConstVal = CI->getZExtValue();
16552     return isUInt<16>(ConstVal) ||
16553       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16554   }
16555 
16556   // For non-constant masks, we can always use the record-form and.
16557   return true;
16558 }
16559 
16560 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16561 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16562 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16563 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16564 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
16565 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16566   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
16567   assert(Subtarget.hasP9Altivec() &&
16568          "Only combine this when P9 altivec supported!");
16569   EVT VT = N->getValueType(0);
16570   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16571     return SDValue();
16572 
16573   SelectionDAG &DAG = DCI.DAG;
16574   SDLoc dl(N);
16575   if (N->getOperand(0).getOpcode() == ISD::SUB) {
16576     // Even for signed integers, if it's known to be positive (as signed
16577     // integer) due to zero-extended inputs.
16578     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16579     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16580     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16581          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16582         (SubOpcd1 == ISD::ZERO_EXTEND ||
16583          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16584       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16585                          N->getOperand(0)->getOperand(0),
16586                          N->getOperand(0)->getOperand(1),
16587                          DAG.getTargetConstant(0, dl, MVT::i32));
16588     }
16589 
16590     // For type v4i32, it can be optimized with xvnegsp + vabsduw
16591     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16592         N->getOperand(0).hasOneUse()) {
16593       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16594                          N->getOperand(0)->getOperand(0),
16595                          N->getOperand(0)->getOperand(1),
16596                          DAG.getTargetConstant(1, dl, MVT::i32));
16597     }
16598   }
16599 
16600   return SDValue();
16601 }
16602 
16603 // For type v4i32/v8ii16/v16i8, transform
16604 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16605 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16606 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16607 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
16608 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16609                                           DAGCombinerInfo &DCI) const {
16610   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
16611   assert(Subtarget.hasP9Altivec() &&
16612          "Only combine this when P9 altivec supported!");
16613 
16614   SelectionDAG &DAG = DCI.DAG;
16615   SDLoc dl(N);
16616   SDValue Cond = N->getOperand(0);
16617   SDValue TrueOpnd = N->getOperand(1);
16618   SDValue FalseOpnd = N->getOperand(2);
16619   EVT VT = N->getOperand(1).getValueType();
16620 
16621   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
16622       FalseOpnd.getOpcode() != ISD::SUB)
16623     return SDValue();
16624 
16625   // ABSD only available for type v4i32/v8i16/v16i8
16626   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16627     return SDValue();
16628 
16629   // At least to save one more dependent computation
16630   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
16631     return SDValue();
16632 
16633   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
16634 
16635   // Can only handle unsigned comparison here
16636   switch (CC) {
16637   default:
16638     return SDValue();
16639   case ISD::SETUGT:
16640   case ISD::SETUGE:
16641     break;
16642   case ISD::SETULT:
16643   case ISD::SETULE:
16644     std::swap(TrueOpnd, FalseOpnd);
16645     break;
16646   }
16647 
16648   SDValue CmpOpnd1 = Cond.getOperand(0);
16649   SDValue CmpOpnd2 = Cond.getOperand(1);
16650 
16651   // SETCC CmpOpnd1 CmpOpnd2 cond
16652   // TrueOpnd = CmpOpnd1 - CmpOpnd2
16653   // FalseOpnd = CmpOpnd2 - CmpOpnd1
16654   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
16655       TrueOpnd.getOperand(1) == CmpOpnd2 &&
16656       FalseOpnd.getOperand(0) == CmpOpnd2 &&
16657       FalseOpnd.getOperand(1) == CmpOpnd1) {
16658     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
16659                        CmpOpnd1, CmpOpnd2,
16660                        DAG.getTargetConstant(0, dl, MVT::i32));
16661   }
16662 
16663   return SDValue();
16664 }
16665