1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSectionXCOFF.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <list>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "ppc-lowering"
105 
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 
121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123 
124 static cl::opt<bool> EnablePPCPCRelTLS(
125     "enable-ppc-pcrel-tls",
126     cl::desc("enable the use of PC relative memops in TLS instructions on PPC"),
127     cl::Hidden);
128 
129 STATISTIC(NumTailCalls, "Number of tail calls");
130 STATISTIC(NumSiblingCalls, "Number of sibling calls");
131 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
132 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
133 
134 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
135 
136 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
137 
138 // FIXME: Remove this once the bug has been fixed!
139 extern cl::opt<bool> ANDIGlueBug;
140 
141 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
142                                      const PPCSubtarget &STI)
143     : TargetLowering(TM), Subtarget(STI) {
144   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
145   // arguments are at least 4/8 bytes aligned.
146   bool isPPC64 = Subtarget.isPPC64();
147   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
148 
149   // Set up the register classes.
150   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
151   if (!useSoftFloat()) {
152     if (hasSPE()) {
153       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
154       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
155     } else {
156       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
157       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
158     }
159   }
160 
161   // Match BITREVERSE to customized fast code sequence in the td file.
162   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
163   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
164 
165   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
166   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
167 
168   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
169   for (MVT VT : MVT::integer_valuetypes()) {
170     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
171     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
172   }
173 
174   if (Subtarget.isISA3_0()) {
175     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
176     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
177     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
178     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
179   } else {
180     // No extending loads from f16 or HW conversions back and forth.
181     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
182     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
183     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
184     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
185     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
186     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
187     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
188     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
189   }
190 
191   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
192 
193   // PowerPC has pre-inc load and store's.
194   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
195   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
196   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
197   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
198   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
199   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
200   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
201   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
202   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
203   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
204   if (!Subtarget.hasSPE()) {
205     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
206     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
207     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
208     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
209   }
210 
211   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
212   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
213   for (MVT VT : ScalarIntVTs) {
214     setOperationAction(ISD::ADDC, VT, Legal);
215     setOperationAction(ISD::ADDE, VT, Legal);
216     setOperationAction(ISD::SUBC, VT, Legal);
217     setOperationAction(ISD::SUBE, VT, Legal);
218   }
219 
220   if (Subtarget.useCRBits()) {
221     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
222 
223     if (isPPC64 || Subtarget.hasFPCVT()) {
224       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
225       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
226                          isPPC64 ? MVT::i64 : MVT::i32);
227       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
228       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
229                         isPPC64 ? MVT::i64 : MVT::i32);
230     } else {
231       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
232       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
233     }
234 
235     // PowerPC does not support direct load/store of condition registers.
236     setOperationAction(ISD::LOAD, MVT::i1, Custom);
237     setOperationAction(ISD::STORE, MVT::i1, Custom);
238 
239     // FIXME: Remove this once the ANDI glue bug is fixed:
240     if (ANDIGlueBug)
241       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
242 
243     for (MVT VT : MVT::integer_valuetypes()) {
244       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
245       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
246       setTruncStoreAction(VT, MVT::i1, Expand);
247     }
248 
249     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
250   }
251 
252   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
253   // PPC (the libcall is not available).
254   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
255   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
256 
257   // We do not currently implement these libm ops for PowerPC.
258   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
259   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
260   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
261   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
262   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
263   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
264 
265   // PowerPC has no SREM/UREM instructions unless we are on P9
266   // On P9 we may use a hardware instruction to compute the remainder.
267   // When the result of both the remainder and the division is required it is
268   // more efficient to compute the remainder from the result of the division
269   // rather than use the remainder instruction. The instructions are legalized
270   // directly because the DivRemPairsPass performs the transformation at the IR
271   // level.
272   if (Subtarget.isISA3_0()) {
273     setOperationAction(ISD::SREM, MVT::i32, Legal);
274     setOperationAction(ISD::UREM, MVT::i32, Legal);
275     setOperationAction(ISD::SREM, MVT::i64, Legal);
276     setOperationAction(ISD::UREM, MVT::i64, Legal);
277   } else {
278     setOperationAction(ISD::SREM, MVT::i32, Expand);
279     setOperationAction(ISD::UREM, MVT::i32, Expand);
280     setOperationAction(ISD::SREM, MVT::i64, Expand);
281     setOperationAction(ISD::UREM, MVT::i64, Expand);
282   }
283 
284   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
285   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
286   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
287   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
288   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
289   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
290   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
291   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
292   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
293 
294   // Handle constrained floating-point operations of scalar.
295   // TODO: Handle SPE specific operation.
296   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
297   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
298   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
299   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
300   setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
301   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
302 
303   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
304   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
305   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
306   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
307   setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
308   if (Subtarget.hasVSX())
309     setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f64, Legal);
310 
311   if (Subtarget.hasFSQRT()) {
312     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
313     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
314   }
315 
316   if (Subtarget.hasFPRND()) {
317     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
318     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
319     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
320     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
321 
322     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
323     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
324     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
325     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
326   }
327 
328   // We don't support sin/cos/sqrt/fmod/pow
329   setOperationAction(ISD::FSIN , MVT::f64, Expand);
330   setOperationAction(ISD::FCOS , MVT::f64, Expand);
331   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
332   setOperationAction(ISD::FREM , MVT::f64, Expand);
333   setOperationAction(ISD::FPOW , MVT::f64, Expand);
334   setOperationAction(ISD::FSIN , MVT::f32, Expand);
335   setOperationAction(ISD::FCOS , MVT::f32, Expand);
336   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
337   setOperationAction(ISD::FREM , MVT::f32, Expand);
338   setOperationAction(ISD::FPOW , MVT::f32, Expand);
339   if (Subtarget.hasSPE()) {
340     setOperationAction(ISD::FMA  , MVT::f64, Expand);
341     setOperationAction(ISD::FMA  , MVT::f32, Expand);
342   } else {
343     setOperationAction(ISD::FMA  , MVT::f64, Legal);
344     setOperationAction(ISD::FMA  , MVT::f32, Legal);
345   }
346 
347   if (Subtarget.hasSPE())
348     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
349 
350   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
351 
352   // If we're enabling GP optimizations, use hardware square root
353   if (!Subtarget.hasFSQRT() &&
354       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
355         Subtarget.hasFRE()))
356     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
357 
358   if (!Subtarget.hasFSQRT() &&
359       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
360         Subtarget.hasFRES()))
361     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
362 
363   if (Subtarget.hasFCPSGN()) {
364     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
365     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
366   } else {
367     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
368     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
369   }
370 
371   if (Subtarget.hasFPRND()) {
372     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
373     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
374     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
375     setOperationAction(ISD::FROUND, MVT::f64, Legal);
376 
377     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
378     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
379     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
380     setOperationAction(ISD::FROUND, MVT::f32, Legal);
381   }
382 
383   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
384   // to speed up scalar BSWAP64.
385   // CTPOP or CTTZ were introduced in P8/P9 respectively
386   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
387   if (Subtarget.hasP9Vector())
388     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
389   else
390     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
391   if (Subtarget.isISA3_0()) {
392     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
393     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
394   } else {
395     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
396     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
397   }
398 
399   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
400     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
401     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
402   } else {
403     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
404     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
405   }
406 
407   // PowerPC does not have ROTR
408   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
409   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
410 
411   if (!Subtarget.useCRBits()) {
412     // PowerPC does not have Select
413     setOperationAction(ISD::SELECT, MVT::i32, Expand);
414     setOperationAction(ISD::SELECT, MVT::i64, Expand);
415     setOperationAction(ISD::SELECT, MVT::f32, Expand);
416     setOperationAction(ISD::SELECT, MVT::f64, Expand);
417   }
418 
419   // PowerPC wants to turn select_cc of FP into fsel when possible.
420   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
421   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
422 
423   // PowerPC wants to optimize integer setcc a bit
424   if (!Subtarget.useCRBits())
425     setOperationAction(ISD::SETCC, MVT::i32, Custom);
426 
427   if (Subtarget.hasFPU()) {
428     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
429     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
430     setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
431 
432     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
433     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
434     setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
435   }
436 
437   // PowerPC does not have BRCOND which requires SetCC
438   if (!Subtarget.useCRBits())
439     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
440 
441   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
442 
443   if (Subtarget.hasSPE()) {
444     // SPE has built-in conversions
445     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
446     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
447     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
448     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
449     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
450     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
451   } else {
452     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
453     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
454 
455     // PowerPC does not have [U|S]INT_TO_FP
456     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
457     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
458   }
459 
460   if (Subtarget.hasDirectMove() && isPPC64) {
461     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
462     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
463     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
464     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
465     if (TM.Options.UnsafeFPMath) {
466       setOperationAction(ISD::LRINT, MVT::f64, Legal);
467       setOperationAction(ISD::LRINT, MVT::f32, Legal);
468       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
469       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
470       setOperationAction(ISD::LROUND, MVT::f64, Legal);
471       setOperationAction(ISD::LROUND, MVT::f32, Legal);
472       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
473       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
474     }
475   } else {
476     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
477     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
478     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
479     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
480   }
481 
482   // We cannot sextinreg(i1).  Expand to shifts.
483   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
484 
485   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
486   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
487   // support continuation, user-level threading, and etc.. As a result, no
488   // other SjLj exception interfaces are implemented and please don't build
489   // your own exception handling based on them.
490   // LLVM/Clang supports zero-cost DWARF exception handling.
491   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
492   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
493 
494   // We want to legalize GlobalAddress and ConstantPool nodes into the
495   // appropriate instructions to materialize the address.
496   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
497   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
498   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
499   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
500   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
501   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
502   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
503   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
504   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
505   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
506 
507   // TRAP is legal.
508   setOperationAction(ISD::TRAP, MVT::Other, Legal);
509 
510   // TRAMPOLINE is custom lowered.
511   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
512   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
513 
514   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
515   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
516 
517   if (Subtarget.is64BitELFABI()) {
518     // VAARG always uses double-word chunks, so promote anything smaller.
519     setOperationAction(ISD::VAARG, MVT::i1, Promote);
520     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
521     setOperationAction(ISD::VAARG, MVT::i8, Promote);
522     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
523     setOperationAction(ISD::VAARG, MVT::i16, Promote);
524     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
525     setOperationAction(ISD::VAARG, MVT::i32, Promote);
526     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
527     setOperationAction(ISD::VAARG, MVT::Other, Expand);
528   } else if (Subtarget.is32BitELFABI()) {
529     // VAARG is custom lowered with the 32-bit SVR4 ABI.
530     setOperationAction(ISD::VAARG, MVT::Other, Custom);
531     setOperationAction(ISD::VAARG, MVT::i64, Custom);
532   } else
533     setOperationAction(ISD::VAARG, MVT::Other, Expand);
534 
535   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
536   if (Subtarget.is32BitELFABI())
537     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
538   else
539     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
540 
541   // Use the default implementation.
542   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
543   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
544   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
545   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
546   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
547   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
548   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
549   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
550   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
551 
552   // We want to custom lower some of our intrinsics.
553   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
554 
555   // To handle counter-based loop conditions.
556   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
557 
558   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
559   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
560   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
561   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
562 
563   // Comparisons that require checking two conditions.
564   if (Subtarget.hasSPE()) {
565     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
566     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
567     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
568     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
569   }
570   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
571   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
572   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
573   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
574   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
575   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
576   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
577   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
578   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
579   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
580   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
581   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
582 
583   if (Subtarget.has64BitSupport()) {
584     // They also have instructions for converting between i64 and fp.
585     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
586     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
587     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
588     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
589     // This is just the low 32 bits of a (signed) fp->i64 conversion.
590     // We cannot do this with Promote because i64 is not a legal type.
591     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
592 
593     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
594       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
595   } else {
596     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
597     if (Subtarget.hasSPE()) {
598       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
599       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
600     } else
601       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
602   }
603 
604   // With the instructions enabled under FPCVT, we can do everything.
605   if (Subtarget.hasFPCVT()) {
606     if (Subtarget.has64BitSupport()) {
607       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
608       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
609       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
610       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
611     }
612 
613     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
614     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
615     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
616     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
617   }
618 
619   if (Subtarget.use64BitRegs()) {
620     // 64-bit PowerPC implementations can support i64 types directly
621     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
622     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
623     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
624     // 64-bit PowerPC wants to expand i128 shifts itself.
625     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
626     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
627     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
628   } else {
629     // 32-bit PowerPC wants to expand i64 shifts itself.
630     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
631     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
632     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
633   }
634 
635   // PowerPC has better expansions for funnel shifts than the generic
636   // TargetLowering::expandFunnelShift.
637   if (Subtarget.has64BitSupport()) {
638     setOperationAction(ISD::FSHL, MVT::i64, Custom);
639     setOperationAction(ISD::FSHR, MVT::i64, Custom);
640   }
641   setOperationAction(ISD::FSHL, MVT::i32, Custom);
642   setOperationAction(ISD::FSHR, MVT::i32, Custom);
643 
644   if (Subtarget.hasVSX()) {
645     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
646     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
647     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
648     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
649   }
650 
651   if (Subtarget.hasAltivec()) {
652     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
653       setOperationAction(ISD::SADDSAT, VT, Legal);
654       setOperationAction(ISD::SSUBSAT, VT, Legal);
655       setOperationAction(ISD::UADDSAT, VT, Legal);
656       setOperationAction(ISD::USUBSAT, VT, Legal);
657     }
658     // First set operation action for all vector types to expand. Then we
659     // will selectively turn on ones that can be effectively codegen'd.
660     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
661       // add/sub are legal for all supported vector VT's.
662       setOperationAction(ISD::ADD, VT, Legal);
663       setOperationAction(ISD::SUB, VT, Legal);
664 
665       // For v2i64, these are only valid with P8Vector. This is corrected after
666       // the loop.
667       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
668         setOperationAction(ISD::SMAX, VT, Legal);
669         setOperationAction(ISD::SMIN, VT, Legal);
670         setOperationAction(ISD::UMAX, VT, Legal);
671         setOperationAction(ISD::UMIN, VT, Legal);
672       }
673       else {
674         setOperationAction(ISD::SMAX, VT, Expand);
675         setOperationAction(ISD::SMIN, VT, Expand);
676         setOperationAction(ISD::UMAX, VT, Expand);
677         setOperationAction(ISD::UMIN, VT, Expand);
678       }
679 
680       if (Subtarget.hasVSX()) {
681         setOperationAction(ISD::FMAXNUM, VT, Legal);
682         setOperationAction(ISD::FMINNUM, VT, Legal);
683       }
684 
685       // Vector instructions introduced in P8
686       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
687         setOperationAction(ISD::CTPOP, VT, Legal);
688         setOperationAction(ISD::CTLZ, VT, Legal);
689       }
690       else {
691         setOperationAction(ISD::CTPOP, VT, Expand);
692         setOperationAction(ISD::CTLZ, VT, Expand);
693       }
694 
695       // Vector instructions introduced in P9
696       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
697         setOperationAction(ISD::CTTZ, VT, Legal);
698       else
699         setOperationAction(ISD::CTTZ, VT, Expand);
700 
701       // We promote all shuffles to v16i8.
702       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
703       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
704 
705       // We promote all non-typed operations to v4i32.
706       setOperationAction(ISD::AND   , VT, Promote);
707       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
708       setOperationAction(ISD::OR    , VT, Promote);
709       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
710       setOperationAction(ISD::XOR   , VT, Promote);
711       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
712       setOperationAction(ISD::LOAD  , VT, Promote);
713       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
714       setOperationAction(ISD::SELECT, VT, Promote);
715       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
716       setOperationAction(ISD::VSELECT, VT, Legal);
717       setOperationAction(ISD::SELECT_CC, VT, Promote);
718       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
719       setOperationAction(ISD::STORE, VT, Promote);
720       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
721 
722       // No other operations are legal.
723       setOperationAction(ISD::MUL , VT, Expand);
724       setOperationAction(ISD::SDIV, VT, Expand);
725       setOperationAction(ISD::SREM, VT, Expand);
726       setOperationAction(ISD::UDIV, VT, Expand);
727       setOperationAction(ISD::UREM, VT, Expand);
728       setOperationAction(ISD::FDIV, VT, Expand);
729       setOperationAction(ISD::FREM, VT, Expand);
730       setOperationAction(ISD::FNEG, VT, Expand);
731       setOperationAction(ISD::FSQRT, VT, Expand);
732       setOperationAction(ISD::FLOG, VT, Expand);
733       setOperationAction(ISD::FLOG10, VT, Expand);
734       setOperationAction(ISD::FLOG2, VT, Expand);
735       setOperationAction(ISD::FEXP, VT, Expand);
736       setOperationAction(ISD::FEXP2, VT, Expand);
737       setOperationAction(ISD::FSIN, VT, Expand);
738       setOperationAction(ISD::FCOS, VT, Expand);
739       setOperationAction(ISD::FABS, VT, Expand);
740       setOperationAction(ISD::FFLOOR, VT, Expand);
741       setOperationAction(ISD::FCEIL,  VT, Expand);
742       setOperationAction(ISD::FTRUNC, VT, Expand);
743       setOperationAction(ISD::FRINT,  VT, Expand);
744       setOperationAction(ISD::FNEARBYINT, VT, Expand);
745       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
746       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
747       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
748       setOperationAction(ISD::MULHU, VT, Expand);
749       setOperationAction(ISD::MULHS, VT, Expand);
750       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
751       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
752       setOperationAction(ISD::UDIVREM, VT, Expand);
753       setOperationAction(ISD::SDIVREM, VT, Expand);
754       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
755       setOperationAction(ISD::FPOW, VT, Expand);
756       setOperationAction(ISD::BSWAP, VT, Expand);
757       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
758       setOperationAction(ISD::ROTL, VT, Expand);
759       setOperationAction(ISD::ROTR, VT, Expand);
760 
761       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
762         setTruncStoreAction(VT, InnerVT, Expand);
763         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
764         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
765         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
766       }
767     }
768     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
769     if (!Subtarget.hasP8Vector()) {
770       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
771       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
772       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
773       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
774     }
775 
776     for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
777       setOperationAction(ISD::ABS, VT, Custom);
778 
779     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
780     // with merges, splats, etc.
781     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
782 
783     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
784     // are cheap, so handle them before they get expanded to scalar.
785     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
786     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
787     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
788     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
789     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
790 
791     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
792     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
793     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
794     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
795     setOperationAction(ISD::SELECT, MVT::v4i32,
796                        Subtarget.useCRBits() ? Legal : Expand);
797     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
798     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
799     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
800     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
801     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
802     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
803     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
804     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
805     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
806 
807     // Without hasP8Altivec set, v2i64 SMAX isn't available.
808     // But ABS custom lowering requires SMAX support.
809     if (!Subtarget.hasP8Altivec())
810       setOperationAction(ISD::ABS, MVT::v2i64, Expand);
811 
812     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
813     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
814     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
815     if (Subtarget.hasAltivec())
816       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
817         setOperationAction(ISD::ROTL, VT, Legal);
818     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
819     if (Subtarget.hasP8Altivec())
820       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
821 
822     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
823     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
824     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
825     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
826 
827     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
828     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
829 
830     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
831       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
832       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
833     }
834 
835     if (Subtarget.hasP8Altivec())
836       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
837     else
838       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
839 
840     if (Subtarget.isISA3_1()) {
841       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
842       setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
843       setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
844       setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
845       setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
846       setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
847       setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
848       setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
849       setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
850       setOperationAction(ISD::UREM, MVT::v2i64, Legal);
851       setOperationAction(ISD::SREM, MVT::v2i64, Legal);
852       setOperationAction(ISD::UREM, MVT::v4i32, Legal);
853       setOperationAction(ISD::SREM, MVT::v4i32, Legal);
854     }
855 
856     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
857     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
858 
859     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
860     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
861 
862     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
863     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
864     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
865     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
866 
867     // Altivec does not contain unordered floating-point compare instructions
868     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
869     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
870     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
871     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
872 
873     if (Subtarget.hasVSX()) {
874       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
875       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
876       if (Subtarget.hasP8Vector()) {
877         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
878         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
879       }
880       if (Subtarget.hasDirectMove() && isPPC64) {
881         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
882         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
883         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
884         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
885         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
886         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
887         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
888         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
889       }
890       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
891 
892       // The nearbyint variants are not allowed to raise the inexact exception
893       // so we can only code-gen them with unsafe math.
894       if (TM.Options.UnsafeFPMath) {
895         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
896         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
897       }
898 
899       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
900       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
901       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
902       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
903       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
904       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
905       setOperationAction(ISD::FROUND, MVT::f64, Legal);
906       setOperationAction(ISD::FRINT, MVT::f64, Legal);
907 
908       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
909       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
910       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
911       setOperationAction(ISD::FROUND, MVT::f32, Legal);
912       setOperationAction(ISD::FRINT, MVT::f32, Legal);
913 
914       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
915       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
916 
917       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
918       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
919 
920       // Share the Altivec comparison restrictions.
921       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
922       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
923       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
924       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
925 
926       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
927       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
928 
929       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
930 
931       if (Subtarget.hasP8Vector())
932         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
933 
934       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
935 
936       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
937       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
938       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
939 
940       if (Subtarget.hasP8Altivec()) {
941         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
942         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
943         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
944 
945         // 128 bit shifts can be accomplished via 3 instructions for SHL and
946         // SRL, but not for SRA because of the instructions available:
947         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
948         // doing
949         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
950         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
951         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
952 
953         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
954       }
955       else {
956         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
957         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
958         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
959 
960         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
961 
962         // VSX v2i64 only supports non-arithmetic operations.
963         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
964         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
965       }
966 
967       setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
968 
969       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
970       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
971       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
972       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
973 
974       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
975 
976       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
977       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
978       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
979       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
980 
981       // Custom handling for partial vectors of integers converted to
982       // floating point. We already have optimal handling for v2i32 through
983       // the DAG combine, so those aren't necessary.
984       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
985       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
986       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
987       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
988       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
989       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
990       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
991       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
992 
993       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
994       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
995       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
996       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
997       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
998       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
999 
1000       if (Subtarget.hasDirectMove())
1001         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1002       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1003 
1004       // Handle constrained floating-point operations of vector.
1005       // The predictor is `hasVSX` because altivec instruction has
1006       // no exception but VSX vector instruction has.
1007       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1008       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1009       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1010       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1011       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1012       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1013       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1014       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1015       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
1016       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1017       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
1018       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1019       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1020 
1021       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1022       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1023       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1024       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1025       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1026       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1027       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1028       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1029       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
1030       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1031       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
1032       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1033       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1034 
1035       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1036     }
1037 
1038     if (Subtarget.hasP8Altivec()) {
1039       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1040       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1041     }
1042 
1043     if (Subtarget.hasP9Vector()) {
1044       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1045       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1046 
1047       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1048       // SRL, but not for SRA because of the instructions available:
1049       // VS{RL} and VS{RL}O.
1050       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1051       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1052       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1053 
1054       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1055       setOperationAction(ISD::FADD, MVT::f128, Legal);
1056       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1057       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1058       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1059       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1060       // No extending loads to f128 on PPC.
1061       for (MVT FPT : MVT::fp_valuetypes())
1062         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1063       setOperationAction(ISD::FMA, MVT::f128, Legal);
1064       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1065       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1066       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1067       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1068       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1069       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1070 
1071       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1072       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1073       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1074       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1075       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1076       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1077 
1078       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1079       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1080       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1081       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1082       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1083       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1084       // No implementation for these ops for PowerPC.
1085       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1086       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1087       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1088       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1089       setOperationAction(ISD::FREM, MVT::f128, Expand);
1090 
1091       // Handle constrained floating-point operations of fp128
1092       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1093       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1094       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1095       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1096       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1097       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1098       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1099       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1100       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1101       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1102       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1103       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1104       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1105       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1106       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1107       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1108       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1109       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1110       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1111       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1112     }
1113 
1114     if (Subtarget.hasP9Altivec()) {
1115       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1116       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1117 
1118       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1119       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1120       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1121       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1122       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1123       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1124       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1125     }
1126   }
1127 
1128   if (Subtarget.has64BitSupport())
1129     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1130 
1131   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1132 
1133   if (!isPPC64) {
1134     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1135     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1136   }
1137 
1138   setBooleanContents(ZeroOrOneBooleanContent);
1139 
1140   if (Subtarget.hasAltivec()) {
1141     // Altivec instructions set fields to all zeros or all ones.
1142     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1143   }
1144 
1145   if (!isPPC64) {
1146     // These libcalls are not available in 32-bit.
1147     setLibcallName(RTLIB::SHL_I128, nullptr);
1148     setLibcallName(RTLIB::SRL_I128, nullptr);
1149     setLibcallName(RTLIB::SRA_I128, nullptr);
1150   }
1151 
1152   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1153 
1154   // We have target-specific dag combine patterns for the following nodes:
1155   setTargetDAGCombine(ISD::ADD);
1156   setTargetDAGCombine(ISD::SHL);
1157   setTargetDAGCombine(ISD::SRA);
1158   setTargetDAGCombine(ISD::SRL);
1159   setTargetDAGCombine(ISD::MUL);
1160   setTargetDAGCombine(ISD::FMA);
1161   setTargetDAGCombine(ISD::SINT_TO_FP);
1162   setTargetDAGCombine(ISD::BUILD_VECTOR);
1163   if (Subtarget.hasFPCVT())
1164     setTargetDAGCombine(ISD::UINT_TO_FP);
1165   setTargetDAGCombine(ISD::LOAD);
1166   setTargetDAGCombine(ISD::STORE);
1167   setTargetDAGCombine(ISD::BR_CC);
1168   if (Subtarget.useCRBits())
1169     setTargetDAGCombine(ISD::BRCOND);
1170   setTargetDAGCombine(ISD::BSWAP);
1171   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1172   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1173   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1174 
1175   setTargetDAGCombine(ISD::SIGN_EXTEND);
1176   setTargetDAGCombine(ISD::ZERO_EXTEND);
1177   setTargetDAGCombine(ISD::ANY_EXTEND);
1178 
1179   setTargetDAGCombine(ISD::TRUNCATE);
1180   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1181 
1182 
1183   if (Subtarget.useCRBits()) {
1184     setTargetDAGCombine(ISD::TRUNCATE);
1185     setTargetDAGCombine(ISD::SETCC);
1186     setTargetDAGCombine(ISD::SELECT_CC);
1187   }
1188 
1189   // Use reciprocal estimates.
1190   if (TM.Options.UnsafeFPMath) {
1191     setTargetDAGCombine(ISD::FDIV);
1192     setTargetDAGCombine(ISD::FSQRT);
1193   }
1194 
1195   if (Subtarget.hasP9Altivec()) {
1196     setTargetDAGCombine(ISD::ABS);
1197     setTargetDAGCombine(ISD::VSELECT);
1198   }
1199 
1200   setLibcallName(RTLIB::LOG_F128, "logf128");
1201   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1202   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1203   setLibcallName(RTLIB::EXP_F128, "expf128");
1204   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1205   setLibcallName(RTLIB::SIN_F128, "sinf128");
1206   setLibcallName(RTLIB::COS_F128, "cosf128");
1207   setLibcallName(RTLIB::POW_F128, "powf128");
1208   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1209   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1210   setLibcallName(RTLIB::POWI_F128, "__powikf2");
1211   setLibcallName(RTLIB::REM_F128, "fmodf128");
1212 
1213   // With 32 condition bits, we don't need to sink (and duplicate) compares
1214   // aggressively in CodeGenPrep.
1215   if (Subtarget.useCRBits()) {
1216     setHasMultipleConditionRegisters();
1217     setJumpIsExpensive();
1218   }
1219 
1220   setMinFunctionAlignment(Align(4));
1221 
1222   switch (Subtarget.getCPUDirective()) {
1223   default: break;
1224   case PPC::DIR_970:
1225   case PPC::DIR_A2:
1226   case PPC::DIR_E500:
1227   case PPC::DIR_E500mc:
1228   case PPC::DIR_E5500:
1229   case PPC::DIR_PWR4:
1230   case PPC::DIR_PWR5:
1231   case PPC::DIR_PWR5X:
1232   case PPC::DIR_PWR6:
1233   case PPC::DIR_PWR6X:
1234   case PPC::DIR_PWR7:
1235   case PPC::DIR_PWR8:
1236   case PPC::DIR_PWR9:
1237   case PPC::DIR_PWR10:
1238   case PPC::DIR_PWR_FUTURE:
1239     setPrefLoopAlignment(Align(16));
1240     setPrefFunctionAlignment(Align(16));
1241     break;
1242   }
1243 
1244   if (Subtarget.enableMachineScheduler())
1245     setSchedulingPreference(Sched::Source);
1246   else
1247     setSchedulingPreference(Sched::Hybrid);
1248 
1249   computeRegisterProperties(STI.getRegisterInfo());
1250 
1251   // The Freescale cores do better with aggressive inlining of memcpy and
1252   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1253   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1254       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1255     MaxStoresPerMemset = 32;
1256     MaxStoresPerMemsetOptSize = 16;
1257     MaxStoresPerMemcpy = 32;
1258     MaxStoresPerMemcpyOptSize = 8;
1259     MaxStoresPerMemmove = 32;
1260     MaxStoresPerMemmoveOptSize = 8;
1261   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1262     // The A2 also benefits from (very) aggressive inlining of memcpy and
1263     // friends. The overhead of a the function call, even when warm, can be
1264     // over one hundred cycles.
1265     MaxStoresPerMemset = 128;
1266     MaxStoresPerMemcpy = 128;
1267     MaxStoresPerMemmove = 128;
1268     MaxLoadsPerMemcmp = 128;
1269   } else {
1270     MaxLoadsPerMemcmp = 8;
1271     MaxLoadsPerMemcmpOptSize = 4;
1272   }
1273 
1274   // Let the subtarget (CPU) decide if a predictable select is more expensive
1275   // than the corresponding branch. This information is used in CGP to decide
1276   // when to convert selects into branches.
1277   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1278 }
1279 
1280 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1281 /// the desired ByVal argument alignment.
1282 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1283   if (MaxAlign == MaxMaxAlign)
1284     return;
1285   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1286     if (MaxMaxAlign >= 32 &&
1287         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1288       MaxAlign = Align(32);
1289     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1290              MaxAlign < 16)
1291       MaxAlign = Align(16);
1292   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1293     Align EltAlign;
1294     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1295     if (EltAlign > MaxAlign)
1296       MaxAlign = EltAlign;
1297   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1298     for (auto *EltTy : STy->elements()) {
1299       Align EltAlign;
1300       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1301       if (EltAlign > MaxAlign)
1302         MaxAlign = EltAlign;
1303       if (MaxAlign == MaxMaxAlign)
1304         break;
1305     }
1306   }
1307 }
1308 
1309 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1310 /// function arguments in the caller parameter area.
1311 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1312                                                   const DataLayout &DL) const {
1313   // 16byte and wider vectors are passed on 16byte boundary.
1314   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1315   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1316   if (Subtarget.hasAltivec())
1317     getMaxByValAlign(Ty, Alignment, Align(16));
1318   return Alignment.value();
1319 }
1320 
1321 bool PPCTargetLowering::useSoftFloat() const {
1322   return Subtarget.useSoftFloat();
1323 }
1324 
1325 bool PPCTargetLowering::hasSPE() const {
1326   return Subtarget.hasSPE();
1327 }
1328 
1329 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1330   return VT.isScalarInteger();
1331 }
1332 
1333 /// isMulhCheaperThanMulShift - Return true if a mulh[s|u] node for a specific
1334 /// type is cheaper than a multiply followed by a shift.
1335 /// This is true for words and doublewords on 64-bit PowerPC.
1336 bool PPCTargetLowering::isMulhCheaperThanMulShift(EVT Type) const {
1337   if (Subtarget.isPPC64() && (isOperationLegal(ISD::MULHS, Type) ||
1338                               isOperationLegal(ISD::MULHU, Type)))
1339     return true;
1340   return TargetLowering::isMulhCheaperThanMulShift(Type);
1341 }
1342 
1343 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1344   switch ((PPCISD::NodeType)Opcode) {
1345   case PPCISD::FIRST_NUMBER:    break;
1346   case PPCISD::FSEL:            return "PPCISD::FSEL";
1347   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1348   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1349   case PPCISD::FCFID:           return "PPCISD::FCFID";
1350   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1351   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1352   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1353   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1354   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1355   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1356   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1357   case PPCISD::FP_TO_UINT_IN_VSR:
1358                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1359   case PPCISD::FP_TO_SINT_IN_VSR:
1360                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1361   case PPCISD::FRE:             return "PPCISD::FRE";
1362   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1363   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1364   case PPCISD::VPERM:           return "PPCISD::VPERM";
1365   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1366   case PPCISD::XXSPLTI_SP_TO_DP:
1367     return "PPCISD::XXSPLTI_SP_TO_DP";
1368   case PPCISD::XXSPLTI32DX:
1369     return "PPCISD::XXSPLTI32DX";
1370   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1371   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1372   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1373   case PPCISD::CMPB:            return "PPCISD::CMPB";
1374   case PPCISD::Hi:              return "PPCISD::Hi";
1375   case PPCISD::Lo:              return "PPCISD::Lo";
1376   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1377   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1378   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1379   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1380   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1381   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1382   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1383   case PPCISD::SRL:             return "PPCISD::SRL";
1384   case PPCISD::SRA:             return "PPCISD::SRA";
1385   case PPCISD::SHL:             return "PPCISD::SHL";
1386   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1387   case PPCISD::CALL:            return "PPCISD::CALL";
1388   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1389   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1390   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1391   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1392   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1393   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1394   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1395   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1396   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1397   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1398   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1399   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1400   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1401   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1402   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1403   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1404     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1405   case PPCISD::ANDI_rec_1_EQ_BIT:
1406     return "PPCISD::ANDI_rec_1_EQ_BIT";
1407   case PPCISD::ANDI_rec_1_GT_BIT:
1408     return "PPCISD::ANDI_rec_1_GT_BIT";
1409   case PPCISD::VCMP:            return "PPCISD::VCMP";
1410   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1411   case PPCISD::LBRX:            return "PPCISD::LBRX";
1412   case PPCISD::STBRX:           return "PPCISD::STBRX";
1413   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1414   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1415   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1416   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1417   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1418   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1419   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1420   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1421   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1422   case PPCISD::ST_VSR_SCAL_INT:
1423                                 return "PPCISD::ST_VSR_SCAL_INT";
1424   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1425   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1426   case PPCISD::BDZ:             return "PPCISD::BDZ";
1427   case PPCISD::MFFS:            return "PPCISD::MFFS";
1428   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1429   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1430   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1431   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1432   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1433   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1434   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1435   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1436   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1437   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1438   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1439   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1440   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1441   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1442   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1443   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1444   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1445   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1446   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1447   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1448   case PPCISD::SC:              return "PPCISD::SC";
1449   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1450   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1451   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1452   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1453   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1454   case PPCISD::VABSD:           return "PPCISD::VABSD";
1455   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1456   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1457   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1458   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1459   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1460   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1461   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1462   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1463   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1464   }
1465   return nullptr;
1466 }
1467 
1468 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1469                                           EVT VT) const {
1470   if (!VT.isVector())
1471     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1472 
1473   return VT.changeVectorElementTypeToInteger();
1474 }
1475 
1476 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1477   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1478   return true;
1479 }
1480 
1481 //===----------------------------------------------------------------------===//
1482 // Node matching predicates, for use by the tblgen matching code.
1483 //===----------------------------------------------------------------------===//
1484 
1485 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1486 static bool isFloatingPointZero(SDValue Op) {
1487   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1488     return CFP->getValueAPF().isZero();
1489   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1490     // Maybe this has already been legalized into the constant pool?
1491     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1492       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1493         return CFP->getValueAPF().isZero();
1494   }
1495   return false;
1496 }
1497 
1498 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1499 /// true if Op is undef or if it matches the specified value.
1500 static bool isConstantOrUndef(int Op, int Val) {
1501   return Op < 0 || Op == Val;
1502 }
1503 
1504 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1505 /// VPKUHUM instruction.
1506 /// The ShuffleKind distinguishes between big-endian operations with
1507 /// two different inputs (0), either-endian operations with two identical
1508 /// inputs (1), and little-endian operations with two different inputs (2).
1509 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1510 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1511                                SelectionDAG &DAG) {
1512   bool IsLE = DAG.getDataLayout().isLittleEndian();
1513   if (ShuffleKind == 0) {
1514     if (IsLE)
1515       return false;
1516     for (unsigned i = 0; i != 16; ++i)
1517       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1518         return false;
1519   } else if (ShuffleKind == 2) {
1520     if (!IsLE)
1521       return false;
1522     for (unsigned i = 0; i != 16; ++i)
1523       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1524         return false;
1525   } else if (ShuffleKind == 1) {
1526     unsigned j = IsLE ? 0 : 1;
1527     for (unsigned i = 0; i != 8; ++i)
1528       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1529           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1530         return false;
1531   }
1532   return true;
1533 }
1534 
1535 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1536 /// VPKUWUM instruction.
1537 /// The ShuffleKind distinguishes between big-endian operations with
1538 /// two different inputs (0), either-endian operations with two identical
1539 /// inputs (1), and little-endian operations with two different inputs (2).
1540 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1541 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1542                                SelectionDAG &DAG) {
1543   bool IsLE = DAG.getDataLayout().isLittleEndian();
1544   if (ShuffleKind == 0) {
1545     if (IsLE)
1546       return false;
1547     for (unsigned i = 0; i != 16; i += 2)
1548       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1549           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1550         return false;
1551   } else if (ShuffleKind == 2) {
1552     if (!IsLE)
1553       return false;
1554     for (unsigned i = 0; i != 16; i += 2)
1555       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1556           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1557         return false;
1558   } else if (ShuffleKind == 1) {
1559     unsigned j = IsLE ? 0 : 2;
1560     for (unsigned i = 0; i != 8; i += 2)
1561       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1562           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1563           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1564           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1565         return false;
1566   }
1567   return true;
1568 }
1569 
1570 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1571 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1572 /// current subtarget.
1573 ///
1574 /// The ShuffleKind distinguishes between big-endian operations with
1575 /// two different inputs (0), either-endian operations with two identical
1576 /// inputs (1), and little-endian operations with two different inputs (2).
1577 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1578 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1579                                SelectionDAG &DAG) {
1580   const PPCSubtarget& Subtarget =
1581       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1582   if (!Subtarget.hasP8Vector())
1583     return false;
1584 
1585   bool IsLE = DAG.getDataLayout().isLittleEndian();
1586   if (ShuffleKind == 0) {
1587     if (IsLE)
1588       return false;
1589     for (unsigned i = 0; i != 16; i += 4)
1590       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1591           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1592           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1593           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1594         return false;
1595   } else if (ShuffleKind == 2) {
1596     if (!IsLE)
1597       return false;
1598     for (unsigned i = 0; i != 16; i += 4)
1599       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1600           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1601           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1602           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1603         return false;
1604   } else if (ShuffleKind == 1) {
1605     unsigned j = IsLE ? 0 : 4;
1606     for (unsigned i = 0; i != 8; i += 4)
1607       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1608           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1609           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1610           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1611           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1612           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1613           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1614           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1615         return false;
1616   }
1617   return true;
1618 }
1619 
1620 /// isVMerge - Common function, used to match vmrg* shuffles.
1621 ///
1622 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1623                      unsigned LHSStart, unsigned RHSStart) {
1624   if (N->getValueType(0) != MVT::v16i8)
1625     return false;
1626   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1627          "Unsupported merge size!");
1628 
1629   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1630     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1631       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1632                              LHSStart+j+i*UnitSize) ||
1633           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1634                              RHSStart+j+i*UnitSize))
1635         return false;
1636     }
1637   return true;
1638 }
1639 
1640 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1641 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1642 /// The ShuffleKind distinguishes between big-endian merges with two
1643 /// different inputs (0), either-endian merges with two identical inputs (1),
1644 /// and little-endian merges with two different inputs (2).  For the latter,
1645 /// the input operands are swapped (see PPCInstrAltivec.td).
1646 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1647                              unsigned ShuffleKind, SelectionDAG &DAG) {
1648   if (DAG.getDataLayout().isLittleEndian()) {
1649     if (ShuffleKind == 1) // unary
1650       return isVMerge(N, UnitSize, 0, 0);
1651     else if (ShuffleKind == 2) // swapped
1652       return isVMerge(N, UnitSize, 0, 16);
1653     else
1654       return false;
1655   } else {
1656     if (ShuffleKind == 1) // unary
1657       return isVMerge(N, UnitSize, 8, 8);
1658     else if (ShuffleKind == 0) // normal
1659       return isVMerge(N, UnitSize, 8, 24);
1660     else
1661       return false;
1662   }
1663 }
1664 
1665 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1666 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1667 /// The ShuffleKind distinguishes between big-endian merges with two
1668 /// different inputs (0), either-endian merges with two identical inputs (1),
1669 /// and little-endian merges with two different inputs (2).  For the latter,
1670 /// the input operands are swapped (see PPCInstrAltivec.td).
1671 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1672                              unsigned ShuffleKind, SelectionDAG &DAG) {
1673   if (DAG.getDataLayout().isLittleEndian()) {
1674     if (ShuffleKind == 1) // unary
1675       return isVMerge(N, UnitSize, 8, 8);
1676     else if (ShuffleKind == 2) // swapped
1677       return isVMerge(N, UnitSize, 8, 24);
1678     else
1679       return false;
1680   } else {
1681     if (ShuffleKind == 1) // unary
1682       return isVMerge(N, UnitSize, 0, 0);
1683     else if (ShuffleKind == 0) // normal
1684       return isVMerge(N, UnitSize, 0, 16);
1685     else
1686       return false;
1687   }
1688 }
1689 
1690 /**
1691  * Common function used to match vmrgew and vmrgow shuffles
1692  *
1693  * The indexOffset determines whether to look for even or odd words in
1694  * the shuffle mask. This is based on the of the endianness of the target
1695  * machine.
1696  *   - Little Endian:
1697  *     - Use offset of 0 to check for odd elements
1698  *     - Use offset of 4 to check for even elements
1699  *   - Big Endian:
1700  *     - Use offset of 0 to check for even elements
1701  *     - Use offset of 4 to check for odd elements
1702  * A detailed description of the vector element ordering for little endian and
1703  * big endian can be found at
1704  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1705  * Targeting your applications - what little endian and big endian IBM XL C/C++
1706  * compiler differences mean to you
1707  *
1708  * The mask to the shuffle vector instruction specifies the indices of the
1709  * elements from the two input vectors to place in the result. The elements are
1710  * numbered in array-access order, starting with the first vector. These vectors
1711  * are always of type v16i8, thus each vector will contain 16 elements of size
1712  * 8. More info on the shuffle vector can be found in the
1713  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1714  * Language Reference.
1715  *
1716  * The RHSStartValue indicates whether the same input vectors are used (unary)
1717  * or two different input vectors are used, based on the following:
1718  *   - If the instruction uses the same vector for both inputs, the range of the
1719  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1720  *     be 0.
1721  *   - If the instruction has two different vectors then the range of the
1722  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1723  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1724  *     to 31 specify elements in the second vector).
1725  *
1726  * \param[in] N The shuffle vector SD Node to analyze
1727  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1728  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1729  * vector to the shuffle_vector instruction
1730  * \return true iff this shuffle vector represents an even or odd word merge
1731  */
1732 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1733                      unsigned RHSStartValue) {
1734   if (N->getValueType(0) != MVT::v16i8)
1735     return false;
1736 
1737   for (unsigned i = 0; i < 2; ++i)
1738     for (unsigned j = 0; j < 4; ++j)
1739       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1740                              i*RHSStartValue+j+IndexOffset) ||
1741           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1742                              i*RHSStartValue+j+IndexOffset+8))
1743         return false;
1744   return true;
1745 }
1746 
1747 /**
1748  * Determine if the specified shuffle mask is suitable for the vmrgew or
1749  * vmrgow instructions.
1750  *
1751  * \param[in] N The shuffle vector SD Node to analyze
1752  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1753  * \param[in] ShuffleKind Identify the type of merge:
1754  *   - 0 = big-endian merge with two different inputs;
1755  *   - 1 = either-endian merge with two identical inputs;
1756  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1757  *     little-endian merges).
1758  * \param[in] DAG The current SelectionDAG
1759  * \return true iff this shuffle mask
1760  */
1761 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1762                               unsigned ShuffleKind, SelectionDAG &DAG) {
1763   if (DAG.getDataLayout().isLittleEndian()) {
1764     unsigned indexOffset = CheckEven ? 4 : 0;
1765     if (ShuffleKind == 1) // Unary
1766       return isVMerge(N, indexOffset, 0);
1767     else if (ShuffleKind == 2) // swapped
1768       return isVMerge(N, indexOffset, 16);
1769     else
1770       return false;
1771   }
1772   else {
1773     unsigned indexOffset = CheckEven ? 0 : 4;
1774     if (ShuffleKind == 1) // Unary
1775       return isVMerge(N, indexOffset, 0);
1776     else if (ShuffleKind == 0) // Normal
1777       return isVMerge(N, indexOffset, 16);
1778     else
1779       return false;
1780   }
1781   return false;
1782 }
1783 
1784 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1785 /// amount, otherwise return -1.
1786 /// The ShuffleKind distinguishes between big-endian operations with two
1787 /// different inputs (0), either-endian operations with two identical inputs
1788 /// (1), and little-endian operations with two different inputs (2).  For the
1789 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1790 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1791                              SelectionDAG &DAG) {
1792   if (N->getValueType(0) != MVT::v16i8)
1793     return -1;
1794 
1795   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1796 
1797   // Find the first non-undef value in the shuffle mask.
1798   unsigned i;
1799   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1800     /*search*/;
1801 
1802   if (i == 16) return -1;  // all undef.
1803 
1804   // Otherwise, check to see if the rest of the elements are consecutively
1805   // numbered from this value.
1806   unsigned ShiftAmt = SVOp->getMaskElt(i);
1807   if (ShiftAmt < i) return -1;
1808 
1809   ShiftAmt -= i;
1810   bool isLE = DAG.getDataLayout().isLittleEndian();
1811 
1812   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1813     // Check the rest of the elements to see if they are consecutive.
1814     for (++i; i != 16; ++i)
1815       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1816         return -1;
1817   } else if (ShuffleKind == 1) {
1818     // Check the rest of the elements to see if they are consecutive.
1819     for (++i; i != 16; ++i)
1820       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1821         return -1;
1822   } else
1823     return -1;
1824 
1825   if (isLE)
1826     ShiftAmt = 16 - ShiftAmt;
1827 
1828   return ShiftAmt;
1829 }
1830 
1831 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1832 /// specifies a splat of a single element that is suitable for input to
1833 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1834 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1835   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1836          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1837 
1838   // The consecutive indices need to specify an element, not part of two
1839   // different elements.  So abandon ship early if this isn't the case.
1840   if (N->getMaskElt(0) % EltSize != 0)
1841     return false;
1842 
1843   // This is a splat operation if each element of the permute is the same, and
1844   // if the value doesn't reference the second vector.
1845   unsigned ElementBase = N->getMaskElt(0);
1846 
1847   // FIXME: Handle UNDEF elements too!
1848   if (ElementBase >= 16)
1849     return false;
1850 
1851   // Check that the indices are consecutive, in the case of a multi-byte element
1852   // splatted with a v16i8 mask.
1853   for (unsigned i = 1; i != EltSize; ++i)
1854     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1855       return false;
1856 
1857   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1858     if (N->getMaskElt(i) < 0) continue;
1859     for (unsigned j = 0; j != EltSize; ++j)
1860       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1861         return false;
1862   }
1863   return true;
1864 }
1865 
1866 /// Check that the mask is shuffling N byte elements. Within each N byte
1867 /// element of the mask, the indices could be either in increasing or
1868 /// decreasing order as long as they are consecutive.
1869 /// \param[in] N the shuffle vector SD Node to analyze
1870 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1871 /// Word/DoubleWord/QuadWord).
1872 /// \param[in] StepLen the delta indices number among the N byte element, if
1873 /// the mask is in increasing/decreasing order then it is 1/-1.
1874 /// \return true iff the mask is shuffling N byte elements.
1875 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1876                                    int StepLen) {
1877   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1878          "Unexpected element width.");
1879   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1880 
1881   unsigned NumOfElem = 16 / Width;
1882   unsigned MaskVal[16]; //  Width is never greater than 16
1883   for (unsigned i = 0; i < NumOfElem; ++i) {
1884     MaskVal[0] = N->getMaskElt(i * Width);
1885     if ((StepLen == 1) && (MaskVal[0] % Width)) {
1886       return false;
1887     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1888       return false;
1889     }
1890 
1891     for (unsigned int j = 1; j < Width; ++j) {
1892       MaskVal[j] = N->getMaskElt(i * Width + j);
1893       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1894         return false;
1895       }
1896     }
1897   }
1898 
1899   return true;
1900 }
1901 
1902 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1903                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1904   if (!isNByteElemShuffleMask(N, 4, 1))
1905     return false;
1906 
1907   // Now we look at mask elements 0,4,8,12
1908   unsigned M0 = N->getMaskElt(0) / 4;
1909   unsigned M1 = N->getMaskElt(4) / 4;
1910   unsigned M2 = N->getMaskElt(8) / 4;
1911   unsigned M3 = N->getMaskElt(12) / 4;
1912   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1913   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1914 
1915   // Below, let H and L be arbitrary elements of the shuffle mask
1916   // where H is in the range [4,7] and L is in the range [0,3].
1917   // H, 1, 2, 3 or L, 5, 6, 7
1918   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1919       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1920     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1921     InsertAtByte = IsLE ? 12 : 0;
1922     Swap = M0 < 4;
1923     return true;
1924   }
1925   // 0, H, 2, 3 or 4, L, 6, 7
1926   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1927       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1928     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1929     InsertAtByte = IsLE ? 8 : 4;
1930     Swap = M1 < 4;
1931     return true;
1932   }
1933   // 0, 1, H, 3 or 4, 5, L, 7
1934   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1935       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1936     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1937     InsertAtByte = IsLE ? 4 : 8;
1938     Swap = M2 < 4;
1939     return true;
1940   }
1941   // 0, 1, 2, H or 4, 5, 6, L
1942   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1943       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1944     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1945     InsertAtByte = IsLE ? 0 : 12;
1946     Swap = M3 < 4;
1947     return true;
1948   }
1949 
1950   // If both vector operands for the shuffle are the same vector, the mask will
1951   // contain only elements from the first one and the second one will be undef.
1952   if (N->getOperand(1).isUndef()) {
1953     ShiftElts = 0;
1954     Swap = true;
1955     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1956     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1957       InsertAtByte = IsLE ? 12 : 0;
1958       return true;
1959     }
1960     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1961       InsertAtByte = IsLE ? 8 : 4;
1962       return true;
1963     }
1964     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1965       InsertAtByte = IsLE ? 4 : 8;
1966       return true;
1967     }
1968     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1969       InsertAtByte = IsLE ? 0 : 12;
1970       return true;
1971     }
1972   }
1973 
1974   return false;
1975 }
1976 
1977 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1978                                bool &Swap, bool IsLE) {
1979   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1980   // Ensure each byte index of the word is consecutive.
1981   if (!isNByteElemShuffleMask(N, 4, 1))
1982     return false;
1983 
1984   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
1985   unsigned M0 = N->getMaskElt(0) / 4;
1986   unsigned M1 = N->getMaskElt(4) / 4;
1987   unsigned M2 = N->getMaskElt(8) / 4;
1988   unsigned M3 = N->getMaskElt(12) / 4;
1989 
1990   // If both vector operands for the shuffle are the same vector, the mask will
1991   // contain only elements from the first one and the second one will be undef.
1992   if (N->getOperand(1).isUndef()) {
1993     assert(M0 < 4 && "Indexing into an undef vector?");
1994     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
1995       return false;
1996 
1997     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
1998     Swap = false;
1999     return true;
2000   }
2001 
2002   // Ensure each word index of the ShuffleVector Mask is consecutive.
2003   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2004     return false;
2005 
2006   if (IsLE) {
2007     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2008       // Input vectors don't need to be swapped if the leading element
2009       // of the result is one of the 3 left elements of the second vector
2010       // (or if there is no shift to be done at all).
2011       Swap = false;
2012       ShiftElts = (8 - M0) % 8;
2013     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2014       // Input vectors need to be swapped if the leading element
2015       // of the result is one of the 3 left elements of the first vector
2016       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2017       Swap = true;
2018       ShiftElts = (4 - M0) % 4;
2019     }
2020 
2021     return true;
2022   } else {                                          // BE
2023     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2024       // Input vectors don't need to be swapped if the leading element
2025       // of the result is one of the 4 elements of the first vector.
2026       Swap = false;
2027       ShiftElts = M0;
2028     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2029       // Input vectors need to be swapped if the leading element
2030       // of the result is one of the 4 elements of the right vector.
2031       Swap = true;
2032       ShiftElts = M0 - 4;
2033     }
2034 
2035     return true;
2036   }
2037 }
2038 
2039 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2040   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2041 
2042   if (!isNByteElemShuffleMask(N, Width, -1))
2043     return false;
2044 
2045   for (int i = 0; i < 16; i += Width)
2046     if (N->getMaskElt(i) != i + Width - 1)
2047       return false;
2048 
2049   return true;
2050 }
2051 
2052 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2053   return isXXBRShuffleMaskHelper(N, 2);
2054 }
2055 
2056 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2057   return isXXBRShuffleMaskHelper(N, 4);
2058 }
2059 
2060 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2061   return isXXBRShuffleMaskHelper(N, 8);
2062 }
2063 
2064 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2065   return isXXBRShuffleMaskHelper(N, 16);
2066 }
2067 
2068 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2069 /// if the inputs to the instruction should be swapped and set \p DM to the
2070 /// value for the immediate.
2071 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2072 /// AND element 0 of the result comes from the first input (LE) or second input
2073 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2074 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2075 /// mask.
2076 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2077                                bool &Swap, bool IsLE) {
2078   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2079 
2080   // Ensure each byte index of the double word is consecutive.
2081   if (!isNByteElemShuffleMask(N, 8, 1))
2082     return false;
2083 
2084   unsigned M0 = N->getMaskElt(0) / 8;
2085   unsigned M1 = N->getMaskElt(8) / 8;
2086   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2087 
2088   // If both vector operands for the shuffle are the same vector, the mask will
2089   // contain only elements from the first one and the second one will be undef.
2090   if (N->getOperand(1).isUndef()) {
2091     if ((M0 | M1) < 2) {
2092       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2093       Swap = false;
2094       return true;
2095     } else
2096       return false;
2097   }
2098 
2099   if (IsLE) {
2100     if (M0 > 1 && M1 < 2) {
2101       Swap = false;
2102     } else if (M0 < 2 && M1 > 1) {
2103       M0 = (M0 + 2) % 4;
2104       M1 = (M1 + 2) % 4;
2105       Swap = true;
2106     } else
2107       return false;
2108 
2109     // Note: if control flow comes here that means Swap is already set above
2110     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2111     return true;
2112   } else { // BE
2113     if (M0 < 2 && M1 > 1) {
2114       Swap = false;
2115     } else if (M0 > 1 && M1 < 2) {
2116       M0 = (M0 + 2) % 4;
2117       M1 = (M1 + 2) % 4;
2118       Swap = true;
2119     } else
2120       return false;
2121 
2122     // Note: if control flow comes here that means Swap is already set above
2123     DM = (M0 << 1) + (M1 & 1);
2124     return true;
2125   }
2126 }
2127 
2128 
2129 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2130 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2131 /// elements are counted from the left of the vector register).
2132 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2133                                          SelectionDAG &DAG) {
2134   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2135   assert(isSplatShuffleMask(SVOp, EltSize));
2136   if (DAG.getDataLayout().isLittleEndian())
2137     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2138   else
2139     return SVOp->getMaskElt(0) / EltSize;
2140 }
2141 
2142 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2143 /// by using a vspltis[bhw] instruction of the specified element size, return
2144 /// the constant being splatted.  The ByteSize field indicates the number of
2145 /// bytes of each element [124] -> [bhw].
2146 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2147   SDValue OpVal(nullptr, 0);
2148 
2149   // If ByteSize of the splat is bigger than the element size of the
2150   // build_vector, then we have a case where we are checking for a splat where
2151   // multiple elements of the buildvector are folded together into a single
2152   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2153   unsigned EltSize = 16/N->getNumOperands();
2154   if (EltSize < ByteSize) {
2155     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2156     SDValue UniquedVals[4];
2157     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2158 
2159     // See if all of the elements in the buildvector agree across.
2160     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2161       if (N->getOperand(i).isUndef()) continue;
2162       // If the element isn't a constant, bail fully out.
2163       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2164 
2165       if (!UniquedVals[i&(Multiple-1)].getNode())
2166         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2167       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2168         return SDValue();  // no match.
2169     }
2170 
2171     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2172     // either constant or undef values that are identical for each chunk.  See
2173     // if these chunks can form into a larger vspltis*.
2174 
2175     // Check to see if all of the leading entries are either 0 or -1.  If
2176     // neither, then this won't fit into the immediate field.
2177     bool LeadingZero = true;
2178     bool LeadingOnes = true;
2179     for (unsigned i = 0; i != Multiple-1; ++i) {
2180       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2181 
2182       LeadingZero &= isNullConstant(UniquedVals[i]);
2183       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2184     }
2185     // Finally, check the least significant entry.
2186     if (LeadingZero) {
2187       if (!UniquedVals[Multiple-1].getNode())
2188         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2189       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2190       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2191         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2192     }
2193     if (LeadingOnes) {
2194       if (!UniquedVals[Multiple-1].getNode())
2195         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2196       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2197       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2198         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2199     }
2200 
2201     return SDValue();
2202   }
2203 
2204   // Check to see if this buildvec has a single non-undef value in its elements.
2205   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2206     if (N->getOperand(i).isUndef()) continue;
2207     if (!OpVal.getNode())
2208       OpVal = N->getOperand(i);
2209     else if (OpVal != N->getOperand(i))
2210       return SDValue();
2211   }
2212 
2213   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2214 
2215   unsigned ValSizeInBytes = EltSize;
2216   uint64_t Value = 0;
2217   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2218     Value = CN->getZExtValue();
2219   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2220     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2221     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2222   }
2223 
2224   // If the splat value is larger than the element value, then we can never do
2225   // this splat.  The only case that we could fit the replicated bits into our
2226   // immediate field for would be zero, and we prefer to use vxor for it.
2227   if (ValSizeInBytes < ByteSize) return SDValue();
2228 
2229   // If the element value is larger than the splat value, check if it consists
2230   // of a repeated bit pattern of size ByteSize.
2231   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2232     return SDValue();
2233 
2234   // Properly sign extend the value.
2235   int MaskVal = SignExtend32(Value, ByteSize * 8);
2236 
2237   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2238   if (MaskVal == 0) return SDValue();
2239 
2240   // Finally, if this value fits in a 5 bit sext field, return it
2241   if (SignExtend32<5>(MaskVal) == MaskVal)
2242     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2243   return SDValue();
2244 }
2245 
2246 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2247 /// amount, otherwise return -1.
2248 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2249   EVT VT = N->getValueType(0);
2250   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2251     return -1;
2252 
2253   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2254 
2255   // Find the first non-undef value in the shuffle mask.
2256   unsigned i;
2257   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2258     /*search*/;
2259 
2260   if (i == 4) return -1;  // all undef.
2261 
2262   // Otherwise, check to see if the rest of the elements are consecutively
2263   // numbered from this value.
2264   unsigned ShiftAmt = SVOp->getMaskElt(i);
2265   if (ShiftAmt < i) return -1;
2266   ShiftAmt -= i;
2267 
2268   // Check the rest of the elements to see if they are consecutive.
2269   for (++i; i != 4; ++i)
2270     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2271       return -1;
2272 
2273   return ShiftAmt;
2274 }
2275 
2276 //===----------------------------------------------------------------------===//
2277 //  Addressing Mode Selection
2278 //===----------------------------------------------------------------------===//
2279 
2280 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2281 /// or 64-bit immediate, and if the value can be accurately represented as a
2282 /// sign extension from a 16-bit value.  If so, this returns true and the
2283 /// immediate.
2284 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2285   if (!isa<ConstantSDNode>(N))
2286     return false;
2287 
2288   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2289   if (N->getValueType(0) == MVT::i32)
2290     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2291   else
2292     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2293 }
2294 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2295   return isIntS16Immediate(Op.getNode(), Imm);
2296 }
2297 
2298 
2299 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2300 /// be represented as an indexed [r+r] operation.
2301 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2302                                                SDValue &Index,
2303                                                SelectionDAG &DAG) const {
2304   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2305       UI != E; ++UI) {
2306     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2307       if (Memop->getMemoryVT() == MVT::f64) {
2308           Base = N.getOperand(0);
2309           Index = N.getOperand(1);
2310           return true;
2311       }
2312     }
2313   }
2314   return false;
2315 }
2316 
2317 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2318 /// can be represented as an indexed [r+r] operation.  Returns false if it
2319 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2320 /// non-zero and N can be represented by a base register plus a signed 16-bit
2321 /// displacement, make a more precise judgement by checking (displacement % \p
2322 /// EncodingAlignment).
2323 bool PPCTargetLowering::SelectAddressRegReg(
2324     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2325     MaybeAlign EncodingAlignment) const {
2326   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2327   // a [pc+imm].
2328   if (SelectAddressPCRel(N, Base))
2329     return false;
2330 
2331   int16_t Imm = 0;
2332   if (N.getOpcode() == ISD::ADD) {
2333     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2334     // SPE load/store can only handle 8-bit offsets.
2335     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2336         return true;
2337     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2338         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2339       return false; // r+i
2340     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2341       return false;    // r+i
2342 
2343     Base = N.getOperand(0);
2344     Index = N.getOperand(1);
2345     return true;
2346   } else if (N.getOpcode() == ISD::OR) {
2347     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2348         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2349       return false; // r+i can fold it if we can.
2350 
2351     // If this is an or of disjoint bitfields, we can codegen this as an add
2352     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2353     // disjoint.
2354     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2355 
2356     if (LHSKnown.Zero.getBoolValue()) {
2357       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2358       // If all of the bits are known zero on the LHS or RHS, the add won't
2359       // carry.
2360       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2361         Base = N.getOperand(0);
2362         Index = N.getOperand(1);
2363         return true;
2364       }
2365     }
2366   }
2367 
2368   return false;
2369 }
2370 
2371 // If we happen to be doing an i64 load or store into a stack slot that has
2372 // less than a 4-byte alignment, then the frame-index elimination may need to
2373 // use an indexed load or store instruction (because the offset may not be a
2374 // multiple of 4). The extra register needed to hold the offset comes from the
2375 // register scavenger, and it is possible that the scavenger will need to use
2376 // an emergency spill slot. As a result, we need to make sure that a spill slot
2377 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2378 // stack slot.
2379 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2380   // FIXME: This does not handle the LWA case.
2381   if (VT != MVT::i64)
2382     return;
2383 
2384   // NOTE: We'll exclude negative FIs here, which come from argument
2385   // lowering, because there are no known test cases triggering this problem
2386   // using packed structures (or similar). We can remove this exclusion if
2387   // we find such a test case. The reason why this is so test-case driven is
2388   // because this entire 'fixup' is only to prevent crashes (from the
2389   // register scavenger) on not-really-valid inputs. For example, if we have:
2390   //   %a = alloca i1
2391   //   %b = bitcast i1* %a to i64*
2392   //   store i64* a, i64 b
2393   // then the store should really be marked as 'align 1', but is not. If it
2394   // were marked as 'align 1' then the indexed form would have been
2395   // instruction-selected initially, and the problem this 'fixup' is preventing
2396   // won't happen regardless.
2397   if (FrameIdx < 0)
2398     return;
2399 
2400   MachineFunction &MF = DAG.getMachineFunction();
2401   MachineFrameInfo &MFI = MF.getFrameInfo();
2402 
2403   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2404     return;
2405 
2406   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2407   FuncInfo->setHasNonRISpills();
2408 }
2409 
2410 /// Returns true if the address N can be represented by a base register plus
2411 /// a signed 16-bit displacement [r+imm], and if it is not better
2412 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2413 /// displacements that are multiples of that value.
2414 bool PPCTargetLowering::SelectAddressRegImm(
2415     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2416     MaybeAlign EncodingAlignment) const {
2417   // FIXME dl should come from parent load or store, not from address
2418   SDLoc dl(N);
2419 
2420   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2421   // a [pc+imm].
2422   if (SelectAddressPCRel(N, Base))
2423     return false;
2424 
2425   // If this can be more profitably realized as r+r, fail.
2426   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2427     return false;
2428 
2429   if (N.getOpcode() == ISD::ADD) {
2430     int16_t imm = 0;
2431     if (isIntS16Immediate(N.getOperand(1), imm) &&
2432         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2433       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2434       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2435         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2436         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2437       } else {
2438         Base = N.getOperand(0);
2439       }
2440       return true; // [r+i]
2441     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2442       // Match LOAD (ADD (X, Lo(G))).
2443       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2444              && "Cannot handle constant offsets yet!");
2445       Disp = N.getOperand(1).getOperand(0);  // The global address.
2446       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2447              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2448              Disp.getOpcode() == ISD::TargetConstantPool ||
2449              Disp.getOpcode() == ISD::TargetJumpTable);
2450       Base = N.getOperand(0);
2451       return true;  // [&g+r]
2452     }
2453   } else if (N.getOpcode() == ISD::OR) {
2454     int16_t imm = 0;
2455     if (isIntS16Immediate(N.getOperand(1), imm) &&
2456         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2457       // If this is an or of disjoint bitfields, we can codegen this as an add
2458       // (for better address arithmetic) if the LHS and RHS of the OR are
2459       // provably disjoint.
2460       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2461 
2462       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2463         // If all of the bits are known zero on the LHS or RHS, the add won't
2464         // carry.
2465         if (FrameIndexSDNode *FI =
2466               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2467           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2468           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2469         } else {
2470           Base = N.getOperand(0);
2471         }
2472         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2473         return true;
2474       }
2475     }
2476   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2477     // Loading from a constant address.
2478 
2479     // If this address fits entirely in a 16-bit sext immediate field, codegen
2480     // this as "d, 0"
2481     int16_t Imm;
2482     if (isIntS16Immediate(CN, Imm) &&
2483         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2484       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2485       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2486                              CN->getValueType(0));
2487       return true;
2488     }
2489 
2490     // Handle 32-bit sext immediates with LIS + addr mode.
2491     if ((CN->getValueType(0) == MVT::i32 ||
2492          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2493         (!EncodingAlignment ||
2494          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2495       int Addr = (int)CN->getZExtValue();
2496 
2497       // Otherwise, break this down into an LIS + disp.
2498       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2499 
2500       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2501                                    MVT::i32);
2502       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2503       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2504       return true;
2505     }
2506   }
2507 
2508   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2509   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2510     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2511     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2512   } else
2513     Base = N;
2514   return true;      // [r+0]
2515 }
2516 
2517 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2518 /// represented as an indexed [r+r] operation.
2519 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2520                                                 SDValue &Index,
2521                                                 SelectionDAG &DAG) const {
2522   // Check to see if we can easily represent this as an [r+r] address.  This
2523   // will fail if it thinks that the address is more profitably represented as
2524   // reg+imm, e.g. where imm = 0.
2525   if (SelectAddressRegReg(N, Base, Index, DAG))
2526     return true;
2527 
2528   // If the address is the result of an add, we will utilize the fact that the
2529   // address calculation includes an implicit add.  However, we can reduce
2530   // register pressure if we do not materialize a constant just for use as the
2531   // index register.  We only get rid of the add if it is not an add of a
2532   // value and a 16-bit signed constant and both have a single use.
2533   int16_t imm = 0;
2534   if (N.getOpcode() == ISD::ADD &&
2535       (!isIntS16Immediate(N.getOperand(1), imm) ||
2536        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2537     Base = N.getOperand(0);
2538     Index = N.getOperand(1);
2539     return true;
2540   }
2541 
2542   // Otherwise, do it the hard way, using R0 as the base register.
2543   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2544                          N.getValueType());
2545   Index = N;
2546   return true;
2547 }
2548 
2549 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2550   Ty *PCRelCand = dyn_cast<Ty>(N);
2551   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2552 }
2553 
2554 /// Returns true if this address is a PC Relative address.
2555 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2556 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2557 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2558   // This is a materialize PC Relative node. Always select this as PC Relative.
2559   Base = N;
2560   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2561     return true;
2562   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2563       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2564       isValidPCRelNode<JumpTableSDNode>(N) ||
2565       isValidPCRelNode<BlockAddressSDNode>(N))
2566     return true;
2567   return false;
2568 }
2569 
2570 /// Returns true if we should use a direct load into vector instruction
2571 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2572 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2573 
2574   // If there are any other uses other than scalar to vector, then we should
2575   // keep it as a scalar load -> direct move pattern to prevent multiple
2576   // loads.
2577   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2578   if (!LD)
2579     return false;
2580 
2581   EVT MemVT = LD->getMemoryVT();
2582   if (!MemVT.isSimple())
2583     return false;
2584   switch(MemVT.getSimpleVT().SimpleTy) {
2585   case MVT::i64:
2586     break;
2587   case MVT::i32:
2588     if (!ST.hasP8Vector())
2589       return false;
2590     break;
2591   case MVT::i16:
2592   case MVT::i8:
2593     if (!ST.hasP9Vector())
2594       return false;
2595     break;
2596   default:
2597     return false;
2598   }
2599 
2600   SDValue LoadedVal(N, 0);
2601   if (!LoadedVal.hasOneUse())
2602     return false;
2603 
2604   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2605        UI != UE; ++UI)
2606     if (UI.getUse().get().getResNo() == 0 &&
2607         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2608         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2609       return false;
2610 
2611   return true;
2612 }
2613 
2614 /// getPreIndexedAddressParts - returns true by value, base pointer and
2615 /// offset pointer and addressing mode by reference if the node's address
2616 /// can be legally represented as pre-indexed load / store address.
2617 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2618                                                   SDValue &Offset,
2619                                                   ISD::MemIndexedMode &AM,
2620                                                   SelectionDAG &DAG) const {
2621   if (DisablePPCPreinc) return false;
2622 
2623   bool isLoad = true;
2624   SDValue Ptr;
2625   EVT VT;
2626   unsigned Alignment;
2627   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2628     Ptr = LD->getBasePtr();
2629     VT = LD->getMemoryVT();
2630     Alignment = LD->getAlignment();
2631   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2632     Ptr = ST->getBasePtr();
2633     VT  = ST->getMemoryVT();
2634     Alignment = ST->getAlignment();
2635     isLoad = false;
2636   } else
2637     return false;
2638 
2639   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2640   // instructions because we can fold these into a more efficient instruction
2641   // instead, (such as LXSD).
2642   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2643     return false;
2644   }
2645 
2646   // PowerPC doesn't have preinc load/store instructions for vectors
2647   if (VT.isVector())
2648     return false;
2649 
2650   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2651     // Common code will reject creating a pre-inc form if the base pointer
2652     // is a frame index, or if N is a store and the base pointer is either
2653     // the same as or a predecessor of the value being stored.  Check for
2654     // those situations here, and try with swapped Base/Offset instead.
2655     bool Swap = false;
2656 
2657     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2658       Swap = true;
2659     else if (!isLoad) {
2660       SDValue Val = cast<StoreSDNode>(N)->getValue();
2661       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2662         Swap = true;
2663     }
2664 
2665     if (Swap)
2666       std::swap(Base, Offset);
2667 
2668     AM = ISD::PRE_INC;
2669     return true;
2670   }
2671 
2672   // LDU/STU can only handle immediates that are a multiple of 4.
2673   if (VT != MVT::i64) {
2674     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2675       return false;
2676   } else {
2677     // LDU/STU need an address with at least 4-byte alignment.
2678     if (Alignment < 4)
2679       return false;
2680 
2681     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2682       return false;
2683   }
2684 
2685   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2686     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2687     // sext i32 to i64 when addr mode is r+i.
2688     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2689         LD->getExtensionType() == ISD::SEXTLOAD &&
2690         isa<ConstantSDNode>(Offset))
2691       return false;
2692   }
2693 
2694   AM = ISD::PRE_INC;
2695   return true;
2696 }
2697 
2698 //===----------------------------------------------------------------------===//
2699 //  LowerOperation implementation
2700 //===----------------------------------------------------------------------===//
2701 
2702 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2703 /// and LoOpFlags to the target MO flags.
2704 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2705                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2706                                const GlobalValue *GV = nullptr) {
2707   HiOpFlags = PPCII::MO_HA;
2708   LoOpFlags = PPCII::MO_LO;
2709 
2710   // Don't use the pic base if not in PIC relocation model.
2711   if (IsPIC) {
2712     HiOpFlags |= PPCII::MO_PIC_FLAG;
2713     LoOpFlags |= PPCII::MO_PIC_FLAG;
2714   }
2715 }
2716 
2717 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2718                              SelectionDAG &DAG) {
2719   SDLoc DL(HiPart);
2720   EVT PtrVT = HiPart.getValueType();
2721   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2722 
2723   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2724   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2725 
2726   // With PIC, the first instruction is actually "GR+hi(&G)".
2727   if (isPIC)
2728     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2729                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2730 
2731   // Generate non-pic code that has direct accesses to the constant pool.
2732   // The address of the global is just (hi(&g)+lo(&g)).
2733   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2734 }
2735 
2736 static void setUsesTOCBasePtr(MachineFunction &MF) {
2737   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2738   FuncInfo->setUsesTOCBasePtr();
2739 }
2740 
2741 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2742   setUsesTOCBasePtr(DAG.getMachineFunction());
2743 }
2744 
2745 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2746                                        SDValue GA) const {
2747   const bool Is64Bit = Subtarget.isPPC64();
2748   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2749   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2750                         : Subtarget.isAIXABI()
2751                               ? DAG.getRegister(PPC::R2, VT)
2752                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2753   SDValue Ops[] = { GA, Reg };
2754   return DAG.getMemIntrinsicNode(
2755       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2756       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2757       MachineMemOperand::MOLoad);
2758 }
2759 
2760 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2761                                              SelectionDAG &DAG) const {
2762   EVT PtrVT = Op.getValueType();
2763   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2764   const Constant *C = CP->getConstVal();
2765 
2766   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2767   // The actual address of the GlobalValue is stored in the TOC.
2768   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2769     if (Subtarget.isUsingPCRelativeCalls()) {
2770       SDLoc DL(CP);
2771       EVT Ty = getPointerTy(DAG.getDataLayout());
2772       SDValue ConstPool = DAG.getTargetConstantPool(
2773           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2774       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2775     }
2776     setUsesTOCBasePtr(DAG);
2777     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2778     return getTOCEntry(DAG, SDLoc(CP), GA);
2779   }
2780 
2781   unsigned MOHiFlag, MOLoFlag;
2782   bool IsPIC = isPositionIndependent();
2783   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2784 
2785   if (IsPIC && Subtarget.isSVR4ABI()) {
2786     SDValue GA =
2787         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2788     return getTOCEntry(DAG, SDLoc(CP), GA);
2789   }
2790 
2791   SDValue CPIHi =
2792       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2793   SDValue CPILo =
2794       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2795   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2796 }
2797 
2798 // For 64-bit PowerPC, prefer the more compact relative encodings.
2799 // This trades 32 bits per jump table entry for one or two instructions
2800 // on the jump site.
2801 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2802   if (isJumpTableRelative())
2803     return MachineJumpTableInfo::EK_LabelDifference32;
2804 
2805   return TargetLowering::getJumpTableEncoding();
2806 }
2807 
2808 bool PPCTargetLowering::isJumpTableRelative() const {
2809   if (UseAbsoluteJumpTables)
2810     return false;
2811   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2812     return true;
2813   return TargetLowering::isJumpTableRelative();
2814 }
2815 
2816 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2817                                                     SelectionDAG &DAG) const {
2818   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2819     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2820 
2821   switch (getTargetMachine().getCodeModel()) {
2822   case CodeModel::Small:
2823   case CodeModel::Medium:
2824     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2825   default:
2826     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2827                        getPointerTy(DAG.getDataLayout()));
2828   }
2829 }
2830 
2831 const MCExpr *
2832 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2833                                                 unsigned JTI,
2834                                                 MCContext &Ctx) const {
2835   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2836     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2837 
2838   switch (getTargetMachine().getCodeModel()) {
2839   case CodeModel::Small:
2840   case CodeModel::Medium:
2841     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2842   default:
2843     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2844   }
2845 }
2846 
2847 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2848   EVT PtrVT = Op.getValueType();
2849   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2850 
2851   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2852   if (Subtarget.isUsingPCRelativeCalls()) {
2853     SDLoc DL(JT);
2854     EVT Ty = getPointerTy(DAG.getDataLayout());
2855     SDValue GA =
2856         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
2857     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2858     return MatAddr;
2859   }
2860 
2861   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2862   // The actual address of the GlobalValue is stored in the TOC.
2863   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2864     setUsesTOCBasePtr(DAG);
2865     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2866     return getTOCEntry(DAG, SDLoc(JT), GA);
2867   }
2868 
2869   unsigned MOHiFlag, MOLoFlag;
2870   bool IsPIC = isPositionIndependent();
2871   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2872 
2873   if (IsPIC && Subtarget.isSVR4ABI()) {
2874     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2875                                         PPCII::MO_PIC_FLAG);
2876     return getTOCEntry(DAG, SDLoc(GA), GA);
2877   }
2878 
2879   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2880   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2881   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2882 }
2883 
2884 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2885                                              SelectionDAG &DAG) const {
2886   EVT PtrVT = Op.getValueType();
2887   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2888   const BlockAddress *BA = BASDN->getBlockAddress();
2889 
2890   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2891   if (Subtarget.isUsingPCRelativeCalls()) {
2892     SDLoc DL(BASDN);
2893     EVT Ty = getPointerTy(DAG.getDataLayout());
2894     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
2895                                            PPCII::MO_PCREL_FLAG);
2896     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2897     return MatAddr;
2898   }
2899 
2900   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2901   // The actual BlockAddress is stored in the TOC.
2902   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2903     setUsesTOCBasePtr(DAG);
2904     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2905     return getTOCEntry(DAG, SDLoc(BASDN), GA);
2906   }
2907 
2908   // 32-bit position-independent ELF stores the BlockAddress in the .got.
2909   if (Subtarget.is32BitELFABI() && isPositionIndependent())
2910     return getTOCEntry(
2911         DAG, SDLoc(BASDN),
2912         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
2913 
2914   unsigned MOHiFlag, MOLoFlag;
2915   bool IsPIC = isPositionIndependent();
2916   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2917   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2918   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2919   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2920 }
2921 
2922 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2923                                               SelectionDAG &DAG) const {
2924   // FIXME: TLS addresses currently use medium model code sequences,
2925   // which is the most useful form.  Eventually support for small and
2926   // large models could be added if users need it, at the cost of
2927   // additional complexity.
2928   if (Subtarget.isUsingPCRelativeCalls() && !EnablePPCPCRelTLS)
2929     report_fatal_error("Thread local storage is not supported with pc-relative"
2930                        " addressing - please compile with -mno-pcrel");
2931   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2932   if (DAG.getTarget().useEmulatedTLS())
2933     return LowerToTLSEmulatedModel(GA, DAG);
2934 
2935   SDLoc dl(GA);
2936   const GlobalValue *GV = GA->getGlobal();
2937   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2938   bool is64bit = Subtarget.isPPC64();
2939   const Module *M = DAG.getMachineFunction().getFunction().getParent();
2940   PICLevel::Level picLevel = M->getPICLevel();
2941 
2942   const TargetMachine &TM = getTargetMachine();
2943   TLSModel::Model Model = TM.getTLSModel(GV);
2944 
2945   if (Model == TLSModel::LocalExec) {
2946     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2947                                                PPCII::MO_TPREL_HA);
2948     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2949                                                PPCII::MO_TPREL_LO);
2950     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
2951                              : DAG.getRegister(PPC::R2, MVT::i32);
2952 
2953     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
2954     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
2955   }
2956 
2957   if (Model == TLSModel::InitialExec) {
2958     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2959     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2960                                                 PPCII::MO_TLS);
2961     SDValue GOTPtr;
2962     if (is64bit) {
2963       setUsesTOCBasePtr(DAG);
2964       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2965       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
2966                            PtrVT, GOTReg, TGA);
2967     } else {
2968       if (!TM.isPositionIndependent())
2969         GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
2970       else if (picLevel == PICLevel::SmallPIC)
2971         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2972       else
2973         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2974     }
2975     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
2976                                    PtrVT, TGA, GOTPtr);
2977     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
2978   }
2979 
2980   if (Model == TLSModel::GeneralDynamic) {
2981     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2982     SDValue GOTPtr;
2983     if (is64bit) {
2984       setUsesTOCBasePtr(DAG);
2985       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2986       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
2987                                    GOTReg, TGA);
2988     } else {
2989       if (picLevel == PICLevel::SmallPIC)
2990         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2991       else
2992         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2993     }
2994     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
2995                        GOTPtr, TGA, TGA);
2996   }
2997 
2998   if (Model == TLSModel::LocalDynamic) {
2999     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3000     SDValue GOTPtr;
3001     if (is64bit) {
3002       setUsesTOCBasePtr(DAG);
3003       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3004       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3005                            GOTReg, TGA);
3006     } else {
3007       if (picLevel == PICLevel::SmallPIC)
3008         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3009       else
3010         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3011     }
3012     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3013                                   PtrVT, GOTPtr, TGA, TGA);
3014     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3015                                       PtrVT, TLSAddr, TGA);
3016     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3017   }
3018 
3019   llvm_unreachable("Unknown TLS model!");
3020 }
3021 
3022 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3023                                               SelectionDAG &DAG) const {
3024   EVT PtrVT = Op.getValueType();
3025   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3026   SDLoc DL(GSDN);
3027   const GlobalValue *GV = GSDN->getGlobal();
3028 
3029   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3030   // The actual address of the GlobalValue is stored in the TOC.
3031   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3032     if (Subtarget.isUsingPCRelativeCalls()) {
3033       EVT Ty = getPointerTy(DAG.getDataLayout());
3034       if (isAccessedAsGotIndirect(Op)) {
3035         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3036                                                 PPCII::MO_PCREL_FLAG |
3037                                                     PPCII::MO_GOT_FLAG);
3038         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3039         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3040                                    MachinePointerInfo());
3041         return Load;
3042       } else {
3043         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3044                                                 PPCII::MO_PCREL_FLAG);
3045         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3046       }
3047     }
3048     setUsesTOCBasePtr(DAG);
3049     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3050     return getTOCEntry(DAG, DL, GA);
3051   }
3052 
3053   unsigned MOHiFlag, MOLoFlag;
3054   bool IsPIC = isPositionIndependent();
3055   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3056 
3057   if (IsPIC && Subtarget.isSVR4ABI()) {
3058     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3059                                             GSDN->getOffset(),
3060                                             PPCII::MO_PIC_FLAG);
3061     return getTOCEntry(DAG, DL, GA);
3062   }
3063 
3064   SDValue GAHi =
3065     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3066   SDValue GALo =
3067     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3068 
3069   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3070 }
3071 
3072 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3073   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3074   SDLoc dl(Op);
3075 
3076   if (Op.getValueType() == MVT::v2i64) {
3077     // When the operands themselves are v2i64 values, we need to do something
3078     // special because VSX has no underlying comparison operations for these.
3079     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3080       // Equality can be handled by casting to the legal type for Altivec
3081       // comparisons, everything else needs to be expanded.
3082       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3083         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3084                  DAG.getSetCC(dl, MVT::v4i32,
3085                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3086                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3087                    CC));
3088       }
3089 
3090       return SDValue();
3091     }
3092 
3093     // We handle most of these in the usual way.
3094     return Op;
3095   }
3096 
3097   // If we're comparing for equality to zero, expose the fact that this is
3098   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3099   // fold the new nodes.
3100   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3101     return V;
3102 
3103   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3104     // Leave comparisons against 0 and -1 alone for now, since they're usually
3105     // optimized.  FIXME: revisit this when we can custom lower all setcc
3106     // optimizations.
3107     if (C->isAllOnesValue() || C->isNullValue())
3108       return SDValue();
3109   }
3110 
3111   // If we have an integer seteq/setne, turn it into a compare against zero
3112   // by xor'ing the rhs with the lhs, which is faster than setting a
3113   // condition register, reading it back out, and masking the correct bit.  The
3114   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3115   // the result to other bit-twiddling opportunities.
3116   EVT LHSVT = Op.getOperand(0).getValueType();
3117   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3118     EVT VT = Op.getValueType();
3119     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3120                                 Op.getOperand(1));
3121     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3122   }
3123   return SDValue();
3124 }
3125 
3126 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3127   SDNode *Node = Op.getNode();
3128   EVT VT = Node->getValueType(0);
3129   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3130   SDValue InChain = Node->getOperand(0);
3131   SDValue VAListPtr = Node->getOperand(1);
3132   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3133   SDLoc dl(Node);
3134 
3135   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3136 
3137   // gpr_index
3138   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3139                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3140   InChain = GprIndex.getValue(1);
3141 
3142   if (VT == MVT::i64) {
3143     // Check if GprIndex is even
3144     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3145                                  DAG.getConstant(1, dl, MVT::i32));
3146     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3147                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3148     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3149                                           DAG.getConstant(1, dl, MVT::i32));
3150     // Align GprIndex to be even if it isn't
3151     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3152                            GprIndex);
3153   }
3154 
3155   // fpr index is 1 byte after gpr
3156   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3157                                DAG.getConstant(1, dl, MVT::i32));
3158 
3159   // fpr
3160   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3161                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3162   InChain = FprIndex.getValue(1);
3163 
3164   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3165                                        DAG.getConstant(8, dl, MVT::i32));
3166 
3167   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3168                                         DAG.getConstant(4, dl, MVT::i32));
3169 
3170   // areas
3171   SDValue OverflowArea =
3172       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3173   InChain = OverflowArea.getValue(1);
3174 
3175   SDValue RegSaveArea =
3176       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3177   InChain = RegSaveArea.getValue(1);
3178 
3179   // select overflow_area if index > 8
3180   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3181                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3182 
3183   // adjustment constant gpr_index * 4/8
3184   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3185                                     VT.isInteger() ? GprIndex : FprIndex,
3186                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3187                                                     MVT::i32));
3188 
3189   // OurReg = RegSaveArea + RegConstant
3190   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3191                                RegConstant);
3192 
3193   // Floating types are 32 bytes into RegSaveArea
3194   if (VT.isFloatingPoint())
3195     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3196                          DAG.getConstant(32, dl, MVT::i32));
3197 
3198   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3199   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3200                                    VT.isInteger() ? GprIndex : FprIndex,
3201                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3202                                                    MVT::i32));
3203 
3204   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3205                               VT.isInteger() ? VAListPtr : FprPtr,
3206                               MachinePointerInfo(SV), MVT::i8);
3207 
3208   // determine if we should load from reg_save_area or overflow_area
3209   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3210 
3211   // increase overflow_area by 4/8 if gpr/fpr > 8
3212   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3213                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3214                                           dl, MVT::i32));
3215 
3216   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3217                              OverflowAreaPlusN);
3218 
3219   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3220                               MachinePointerInfo(), MVT::i32);
3221 
3222   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3223 }
3224 
3225 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3226   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3227 
3228   // We have to copy the entire va_list struct:
3229   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3230   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3231                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3232                        false, true, false, MachinePointerInfo(),
3233                        MachinePointerInfo());
3234 }
3235 
3236 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3237                                                   SelectionDAG &DAG) const {
3238   if (Subtarget.isAIXABI())
3239     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3240 
3241   return Op.getOperand(0);
3242 }
3243 
3244 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3245                                                 SelectionDAG &DAG) const {
3246   if (Subtarget.isAIXABI())
3247     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3248 
3249   SDValue Chain = Op.getOperand(0);
3250   SDValue Trmp = Op.getOperand(1); // trampoline
3251   SDValue FPtr = Op.getOperand(2); // nested function
3252   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3253   SDLoc dl(Op);
3254 
3255   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3256   bool isPPC64 = (PtrVT == MVT::i64);
3257   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3258 
3259   TargetLowering::ArgListTy Args;
3260   TargetLowering::ArgListEntry Entry;
3261 
3262   Entry.Ty = IntPtrTy;
3263   Entry.Node = Trmp; Args.push_back(Entry);
3264 
3265   // TrampSize == (isPPC64 ? 48 : 40);
3266   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3267                                isPPC64 ? MVT::i64 : MVT::i32);
3268   Args.push_back(Entry);
3269 
3270   Entry.Node = FPtr; Args.push_back(Entry);
3271   Entry.Node = Nest; Args.push_back(Entry);
3272 
3273   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3274   TargetLowering::CallLoweringInfo CLI(DAG);
3275   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3276       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3277       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3278 
3279   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3280   return CallResult.second;
3281 }
3282 
3283 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3284   MachineFunction &MF = DAG.getMachineFunction();
3285   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3286   EVT PtrVT = getPointerTy(MF.getDataLayout());
3287 
3288   SDLoc dl(Op);
3289 
3290   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3291     // vastart just stores the address of the VarArgsFrameIndex slot into the
3292     // memory location argument.
3293     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3294     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3295     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3296                         MachinePointerInfo(SV));
3297   }
3298 
3299   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3300   // We suppose the given va_list is already allocated.
3301   //
3302   // typedef struct {
3303   //  char gpr;     /* index into the array of 8 GPRs
3304   //                 * stored in the register save area
3305   //                 * gpr=0 corresponds to r3,
3306   //                 * gpr=1 to r4, etc.
3307   //                 */
3308   //  char fpr;     /* index into the array of 8 FPRs
3309   //                 * stored in the register save area
3310   //                 * fpr=0 corresponds to f1,
3311   //                 * fpr=1 to f2, etc.
3312   //                 */
3313   //  char *overflow_arg_area;
3314   //                /* location on stack that holds
3315   //                 * the next overflow argument
3316   //                 */
3317   //  char *reg_save_area;
3318   //               /* where r3:r10 and f1:f8 (if saved)
3319   //                * are stored
3320   //                */
3321   // } va_list[1];
3322 
3323   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3324   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3325   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3326                                             PtrVT);
3327   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3328                                  PtrVT);
3329 
3330   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3331   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3332 
3333   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3334   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3335 
3336   uint64_t FPROffset = 1;
3337   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3338 
3339   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3340 
3341   // Store first byte : number of int regs
3342   SDValue firstStore =
3343       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3344                         MachinePointerInfo(SV), MVT::i8);
3345   uint64_t nextOffset = FPROffset;
3346   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3347                                   ConstFPROffset);
3348 
3349   // Store second byte : number of float regs
3350   SDValue secondStore =
3351       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3352                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3353   nextOffset += StackOffset;
3354   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3355 
3356   // Store second word : arguments given on stack
3357   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3358                                     MachinePointerInfo(SV, nextOffset));
3359   nextOffset += FrameOffset;
3360   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3361 
3362   // Store third word : arguments given in registers
3363   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3364                       MachinePointerInfo(SV, nextOffset));
3365 }
3366 
3367 /// FPR - The set of FP registers that should be allocated for arguments
3368 /// on Darwin and AIX.
3369 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3370                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3371                                 PPC::F11, PPC::F12, PPC::F13};
3372 
3373 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3374 /// the stack.
3375 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3376                                        unsigned PtrByteSize) {
3377   unsigned ArgSize = ArgVT.getStoreSize();
3378   if (Flags.isByVal())
3379     ArgSize = Flags.getByValSize();
3380 
3381   // Round up to multiples of the pointer size, except for array members,
3382   // which are always packed.
3383   if (!Flags.isInConsecutiveRegs())
3384     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3385 
3386   return ArgSize;
3387 }
3388 
3389 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3390 /// on the stack.
3391 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3392                                          ISD::ArgFlagsTy Flags,
3393                                          unsigned PtrByteSize) {
3394   Align Alignment(PtrByteSize);
3395 
3396   // Altivec parameters are padded to a 16 byte boundary.
3397   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3398       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3399       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3400       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3401     Alignment = Align(16);
3402 
3403   // ByVal parameters are aligned as requested.
3404   if (Flags.isByVal()) {
3405     auto BVAlign = Flags.getNonZeroByValAlign();
3406     if (BVAlign > PtrByteSize) {
3407       if (BVAlign.value() % PtrByteSize != 0)
3408         llvm_unreachable(
3409             "ByVal alignment is not a multiple of the pointer size");
3410 
3411       Alignment = BVAlign;
3412     }
3413   }
3414 
3415   // Array members are always packed to their original alignment.
3416   if (Flags.isInConsecutiveRegs()) {
3417     // If the array member was split into multiple registers, the first
3418     // needs to be aligned to the size of the full type.  (Except for
3419     // ppcf128, which is only aligned as its f64 components.)
3420     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3421       Alignment = Align(OrigVT.getStoreSize());
3422     else
3423       Alignment = Align(ArgVT.getStoreSize());
3424   }
3425 
3426   return Alignment;
3427 }
3428 
3429 /// CalculateStackSlotUsed - Return whether this argument will use its
3430 /// stack slot (instead of being passed in registers).  ArgOffset,
3431 /// AvailableFPRs, and AvailableVRs must hold the current argument
3432 /// position, and will be updated to account for this argument.
3433 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
3434                                    unsigned PtrByteSize, unsigned LinkageSize,
3435                                    unsigned ParamAreaSize, unsigned &ArgOffset,
3436                                    unsigned &AvailableFPRs,
3437                                    unsigned &AvailableVRs) {
3438   bool UseMemory = false;
3439 
3440   // Respect alignment of argument on the stack.
3441   Align Alignment =
3442       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3443   ArgOffset = alignTo(ArgOffset, Alignment);
3444   // If there's no space left in the argument save area, we must
3445   // use memory (this check also catches zero-sized arguments).
3446   if (ArgOffset >= LinkageSize + ParamAreaSize)
3447     UseMemory = true;
3448 
3449   // Allocate argument on the stack.
3450   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3451   if (Flags.isInConsecutiveRegsLast())
3452     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3453   // If we overran the argument save area, we must use memory
3454   // (this check catches arguments passed partially in memory)
3455   if (ArgOffset > LinkageSize + ParamAreaSize)
3456     UseMemory = true;
3457 
3458   // However, if the argument is actually passed in an FPR or a VR,
3459   // we don't use memory after all.
3460   if (!Flags.isByVal()) {
3461     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3462       if (AvailableFPRs > 0) {
3463         --AvailableFPRs;
3464         return false;
3465       }
3466     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3467         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3468         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3469         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3470       if (AvailableVRs > 0) {
3471         --AvailableVRs;
3472         return false;
3473       }
3474   }
3475 
3476   return UseMemory;
3477 }
3478 
3479 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3480 /// ensure minimum alignment required for target.
3481 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3482                                      unsigned NumBytes) {
3483   return alignTo(NumBytes, Lowering->getStackAlign());
3484 }
3485 
3486 SDValue PPCTargetLowering::LowerFormalArguments(
3487     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3488     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3489     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3490   if (Subtarget.isAIXABI())
3491     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3492                                     InVals);
3493   if (Subtarget.is64BitELFABI())
3494     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3495                                        InVals);
3496   if (Subtarget.is32BitELFABI())
3497     return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3498                                        InVals);
3499 
3500   return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3501                                      InVals);
3502 }
3503 
3504 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3505     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3506     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3507     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3508 
3509   // 32-bit SVR4 ABI Stack Frame Layout:
3510   //              +-----------------------------------+
3511   //        +-->  |            Back chain             |
3512   //        |     +-----------------------------------+
3513   //        |     | Floating-point register save area |
3514   //        |     +-----------------------------------+
3515   //        |     |    General register save area     |
3516   //        |     +-----------------------------------+
3517   //        |     |          CR save word             |
3518   //        |     +-----------------------------------+
3519   //        |     |         VRSAVE save word          |
3520   //        |     +-----------------------------------+
3521   //        |     |         Alignment padding         |
3522   //        |     +-----------------------------------+
3523   //        |     |     Vector register save area     |
3524   //        |     +-----------------------------------+
3525   //        |     |       Local variable space        |
3526   //        |     +-----------------------------------+
3527   //        |     |        Parameter list area        |
3528   //        |     +-----------------------------------+
3529   //        |     |           LR save word            |
3530   //        |     +-----------------------------------+
3531   // SP-->  +---  |            Back chain             |
3532   //              +-----------------------------------+
3533   //
3534   // Specifications:
3535   //   System V Application Binary Interface PowerPC Processor Supplement
3536   //   AltiVec Technology Programming Interface Manual
3537 
3538   MachineFunction &MF = DAG.getMachineFunction();
3539   MachineFrameInfo &MFI = MF.getFrameInfo();
3540   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3541 
3542   EVT PtrVT = getPointerTy(MF.getDataLayout());
3543   // Potential tail calls could cause overwriting of argument stack slots.
3544   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3545                        (CallConv == CallingConv::Fast));
3546   const Align PtrAlign(4);
3547 
3548   // Assign locations to all of the incoming arguments.
3549   SmallVector<CCValAssign, 16> ArgLocs;
3550   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3551                  *DAG.getContext());
3552 
3553   // Reserve space for the linkage area on the stack.
3554   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3555   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3556   if (useSoftFloat())
3557     CCInfo.PreAnalyzeFormalArguments(Ins);
3558 
3559   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3560   CCInfo.clearWasPPCF128();
3561 
3562   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3563     CCValAssign &VA = ArgLocs[i];
3564 
3565     // Arguments stored in registers.
3566     if (VA.isRegLoc()) {
3567       const TargetRegisterClass *RC;
3568       EVT ValVT = VA.getValVT();
3569 
3570       switch (ValVT.getSimpleVT().SimpleTy) {
3571         default:
3572           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3573         case MVT::i1:
3574         case MVT::i32:
3575           RC = &PPC::GPRCRegClass;
3576           break;
3577         case MVT::f32:
3578           if (Subtarget.hasP8Vector())
3579             RC = &PPC::VSSRCRegClass;
3580           else if (Subtarget.hasSPE())
3581             RC = &PPC::GPRCRegClass;
3582           else
3583             RC = &PPC::F4RCRegClass;
3584           break;
3585         case MVT::f64:
3586           if (Subtarget.hasVSX())
3587             RC = &PPC::VSFRCRegClass;
3588           else if (Subtarget.hasSPE())
3589             // SPE passes doubles in GPR pairs.
3590             RC = &PPC::GPRCRegClass;
3591           else
3592             RC = &PPC::F8RCRegClass;
3593           break;
3594         case MVT::v16i8:
3595         case MVT::v8i16:
3596         case MVT::v4i32:
3597           RC = &PPC::VRRCRegClass;
3598           break;
3599         case MVT::v4f32:
3600           RC = &PPC::VRRCRegClass;
3601           break;
3602         case MVT::v2f64:
3603         case MVT::v2i64:
3604           RC = &PPC::VRRCRegClass;
3605           break;
3606       }
3607 
3608       SDValue ArgValue;
3609       // Transform the arguments stored in physical registers into
3610       // virtual ones.
3611       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3612         assert(i + 1 < e && "No second half of double precision argument");
3613         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3614         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3615         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3616         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3617         if (!Subtarget.isLittleEndian())
3618           std::swap (ArgValueLo, ArgValueHi);
3619         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3620                                ArgValueHi);
3621       } else {
3622         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3623         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3624                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3625         if (ValVT == MVT::i1)
3626           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3627       }
3628 
3629       InVals.push_back(ArgValue);
3630     } else {
3631       // Argument stored in memory.
3632       assert(VA.isMemLoc());
3633 
3634       // Get the extended size of the argument type in stack
3635       unsigned ArgSize = VA.getLocVT().getStoreSize();
3636       // Get the actual size of the argument type
3637       unsigned ObjSize = VA.getValVT().getStoreSize();
3638       unsigned ArgOffset = VA.getLocMemOffset();
3639       // Stack objects in PPC32 are right justified.
3640       ArgOffset += ArgSize - ObjSize;
3641       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3642 
3643       // Create load nodes to retrieve arguments from the stack.
3644       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3645       InVals.push_back(
3646           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3647     }
3648   }
3649 
3650   // Assign locations to all of the incoming aggregate by value arguments.
3651   // Aggregates passed by value are stored in the local variable space of the
3652   // caller's stack frame, right above the parameter list area.
3653   SmallVector<CCValAssign, 16> ByValArgLocs;
3654   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3655                       ByValArgLocs, *DAG.getContext());
3656 
3657   // Reserve stack space for the allocations in CCInfo.
3658   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3659 
3660   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3661 
3662   // Area that is at least reserved in the caller of this function.
3663   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3664   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3665 
3666   // Set the size that is at least reserved in caller of this function.  Tail
3667   // call optimized function's reserved stack space needs to be aligned so that
3668   // taking the difference between two stack areas will result in an aligned
3669   // stack.
3670   MinReservedArea =
3671       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3672   FuncInfo->setMinReservedArea(MinReservedArea);
3673 
3674   SmallVector<SDValue, 8> MemOps;
3675 
3676   // If the function takes variable number of arguments, make a frame index for
3677   // the start of the first vararg value... for expansion of llvm.va_start.
3678   if (isVarArg) {
3679     static const MCPhysReg GPArgRegs[] = {
3680       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3681       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3682     };
3683     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3684 
3685     static const MCPhysReg FPArgRegs[] = {
3686       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3687       PPC::F8
3688     };
3689     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3690 
3691     if (useSoftFloat() || hasSPE())
3692        NumFPArgRegs = 0;
3693 
3694     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3695     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3696 
3697     // Make room for NumGPArgRegs and NumFPArgRegs.
3698     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3699                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3700 
3701     FuncInfo->setVarArgsStackOffset(
3702       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3703                             CCInfo.getNextStackOffset(), true));
3704 
3705     FuncInfo->setVarArgsFrameIndex(
3706         MFI.CreateStackObject(Depth, Align(8), false));
3707     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3708 
3709     // The fixed integer arguments of a variadic function are stored to the
3710     // VarArgsFrameIndex on the stack so that they may be loaded by
3711     // dereferencing the result of va_next.
3712     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3713       // Get an existing live-in vreg, or add a new one.
3714       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3715       if (!VReg)
3716         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3717 
3718       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3719       SDValue Store =
3720           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3721       MemOps.push_back(Store);
3722       // Increment the address by four for the next argument to store
3723       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3724       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3725     }
3726 
3727     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3728     // is set.
3729     // The double arguments are stored to the VarArgsFrameIndex
3730     // on the stack.
3731     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3732       // Get an existing live-in vreg, or add a new one.
3733       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3734       if (!VReg)
3735         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3736 
3737       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3738       SDValue Store =
3739           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3740       MemOps.push_back(Store);
3741       // Increment the address by eight for the next argument to store
3742       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3743                                          PtrVT);
3744       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3745     }
3746   }
3747 
3748   if (!MemOps.empty())
3749     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3750 
3751   return Chain;
3752 }
3753 
3754 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3755 // value to MVT::i64 and then truncate to the correct register size.
3756 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3757                                              EVT ObjectVT, SelectionDAG &DAG,
3758                                              SDValue ArgVal,
3759                                              const SDLoc &dl) const {
3760   if (Flags.isSExt())
3761     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3762                          DAG.getValueType(ObjectVT));
3763   else if (Flags.isZExt())
3764     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3765                          DAG.getValueType(ObjectVT));
3766 
3767   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3768 }
3769 
3770 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3771     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3772     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3773     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3774   // TODO: add description of PPC stack frame format, or at least some docs.
3775   //
3776   bool isELFv2ABI = Subtarget.isELFv2ABI();
3777   bool isLittleEndian = Subtarget.isLittleEndian();
3778   MachineFunction &MF = DAG.getMachineFunction();
3779   MachineFrameInfo &MFI = MF.getFrameInfo();
3780   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3781 
3782   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3783          "fastcc not supported on varargs functions");
3784 
3785   EVT PtrVT = getPointerTy(MF.getDataLayout());
3786   // Potential tail calls could cause overwriting of argument stack slots.
3787   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3788                        (CallConv == CallingConv::Fast));
3789   unsigned PtrByteSize = 8;
3790   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3791 
3792   static const MCPhysReg GPR[] = {
3793     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3794     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3795   };
3796   static const MCPhysReg VR[] = {
3797     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3798     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3799   };
3800 
3801   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3802   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3803   const unsigned Num_VR_Regs  = array_lengthof(VR);
3804 
3805   // Do a first pass over the arguments to determine whether the ABI
3806   // guarantees that our caller has allocated the parameter save area
3807   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3808   // in the ELFv2 ABI, it is true if this is a vararg function or if
3809   // any parameter is located in a stack slot.
3810 
3811   bool HasParameterArea = !isELFv2ABI || isVarArg;
3812   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3813   unsigned NumBytes = LinkageSize;
3814   unsigned AvailableFPRs = Num_FPR_Regs;
3815   unsigned AvailableVRs = Num_VR_Regs;
3816   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3817     if (Ins[i].Flags.isNest())
3818       continue;
3819 
3820     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3821                                PtrByteSize, LinkageSize, ParamAreaSize,
3822                                NumBytes, AvailableFPRs, AvailableVRs))
3823       HasParameterArea = true;
3824   }
3825 
3826   // Add DAG nodes to load the arguments or copy them out of registers.  On
3827   // entry to a function on PPC, the arguments start after the linkage area,
3828   // although the first ones are often in registers.
3829 
3830   unsigned ArgOffset = LinkageSize;
3831   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3832   SmallVector<SDValue, 8> MemOps;
3833   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3834   unsigned CurArgIdx = 0;
3835   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3836     SDValue ArgVal;
3837     bool needsLoad = false;
3838     EVT ObjectVT = Ins[ArgNo].VT;
3839     EVT OrigVT = Ins[ArgNo].ArgVT;
3840     unsigned ObjSize = ObjectVT.getStoreSize();
3841     unsigned ArgSize = ObjSize;
3842     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3843     if (Ins[ArgNo].isOrigArg()) {
3844       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3845       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3846     }
3847     // We re-align the argument offset for each argument, except when using the
3848     // fast calling convention, when we need to make sure we do that only when
3849     // we'll actually use a stack slot.
3850     unsigned CurArgOffset;
3851     Align Alignment;
3852     auto ComputeArgOffset = [&]() {
3853       /* Respect alignment of argument on the stack.  */
3854       Alignment =
3855           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3856       ArgOffset = alignTo(ArgOffset, Alignment);
3857       CurArgOffset = ArgOffset;
3858     };
3859 
3860     if (CallConv != CallingConv::Fast) {
3861       ComputeArgOffset();
3862 
3863       /* Compute GPR index associated with argument offset.  */
3864       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3865       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3866     }
3867 
3868     // FIXME the codegen can be much improved in some cases.
3869     // We do not have to keep everything in memory.
3870     if (Flags.isByVal()) {
3871       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3872 
3873       if (CallConv == CallingConv::Fast)
3874         ComputeArgOffset();
3875 
3876       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3877       ObjSize = Flags.getByValSize();
3878       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3879       // Empty aggregate parameters do not take up registers.  Examples:
3880       //   struct { } a;
3881       //   union  { } b;
3882       //   int c[0];
3883       // etc.  However, we have to provide a place-holder in InVals, so
3884       // pretend we have an 8-byte item at the current address for that
3885       // purpose.
3886       if (!ObjSize) {
3887         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3888         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3889         InVals.push_back(FIN);
3890         continue;
3891       }
3892 
3893       // Create a stack object covering all stack doublewords occupied
3894       // by the argument.  If the argument is (fully or partially) on
3895       // the stack, or if the argument is fully in registers but the
3896       // caller has allocated the parameter save anyway, we can refer
3897       // directly to the caller's stack frame.  Otherwise, create a
3898       // local copy in our own frame.
3899       int FI;
3900       if (HasParameterArea ||
3901           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3902         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3903       else
3904         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
3905       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3906 
3907       // Handle aggregates smaller than 8 bytes.
3908       if (ObjSize < PtrByteSize) {
3909         // The value of the object is its address, which differs from the
3910         // address of the enclosing doubleword on big-endian systems.
3911         SDValue Arg = FIN;
3912         if (!isLittleEndian) {
3913           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3914           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3915         }
3916         InVals.push_back(Arg);
3917 
3918         if (GPR_idx != Num_GPR_Regs) {
3919           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3920           FuncInfo->addLiveInAttr(VReg, Flags);
3921           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3922           SDValue Store;
3923 
3924           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3925             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3926                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
3927             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
3928                                       MachinePointerInfo(&*FuncArg), ObjType);
3929           } else {
3930             // For sizes that don't fit a truncating store (3, 5, 6, 7),
3931             // store the whole register as-is to the parameter save area
3932             // slot.
3933             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3934                                  MachinePointerInfo(&*FuncArg));
3935           }
3936 
3937           MemOps.push_back(Store);
3938         }
3939         // Whether we copied from a register or not, advance the offset
3940         // into the parameter save area by a full doubleword.
3941         ArgOffset += PtrByteSize;
3942         continue;
3943       }
3944 
3945       // The value of the object is its address, which is the address of
3946       // its first stack doubleword.
3947       InVals.push_back(FIN);
3948 
3949       // Store whatever pieces of the object are in registers to memory.
3950       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3951         if (GPR_idx == Num_GPR_Regs)
3952           break;
3953 
3954         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3955         FuncInfo->addLiveInAttr(VReg, Flags);
3956         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3957         SDValue Addr = FIN;
3958         if (j) {
3959           SDValue Off = DAG.getConstant(j, dl, PtrVT);
3960           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
3961         }
3962         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
3963                                      MachinePointerInfo(&*FuncArg, j));
3964         MemOps.push_back(Store);
3965         ++GPR_idx;
3966       }
3967       ArgOffset += ArgSize;
3968       continue;
3969     }
3970 
3971     switch (ObjectVT.getSimpleVT().SimpleTy) {
3972     default: llvm_unreachable("Unhandled argument type!");
3973     case MVT::i1:
3974     case MVT::i32:
3975     case MVT::i64:
3976       if (Flags.isNest()) {
3977         // The 'nest' parameter, if any, is passed in R11.
3978         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3979         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3980 
3981         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3982           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3983 
3984         break;
3985       }
3986 
3987       // These can be scalar arguments or elements of an integer array type
3988       // passed directly.  Clang may use those instead of "byval" aggregate
3989       // types to avoid forcing arguments to memory unnecessarily.
3990       if (GPR_idx != Num_GPR_Regs) {
3991         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3992         FuncInfo->addLiveInAttr(VReg, Flags);
3993         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3994 
3995         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3996           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3997           // value to MVT::i64 and then truncate to the correct register size.
3998           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3999       } else {
4000         if (CallConv == CallingConv::Fast)
4001           ComputeArgOffset();
4002 
4003         needsLoad = true;
4004         ArgSize = PtrByteSize;
4005       }
4006       if (CallConv != CallingConv::Fast || needsLoad)
4007         ArgOffset += 8;
4008       break;
4009 
4010     case MVT::f32:
4011     case MVT::f64:
4012       // These can be scalar arguments or elements of a float array type
4013       // passed directly.  The latter are used to implement ELFv2 homogenous
4014       // float aggregates.
4015       if (FPR_idx != Num_FPR_Regs) {
4016         unsigned VReg;
4017 
4018         if (ObjectVT == MVT::f32)
4019           VReg = MF.addLiveIn(FPR[FPR_idx],
4020                               Subtarget.hasP8Vector()
4021                                   ? &PPC::VSSRCRegClass
4022                                   : &PPC::F4RCRegClass);
4023         else
4024           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4025                                                 ? &PPC::VSFRCRegClass
4026                                                 : &PPC::F8RCRegClass);
4027 
4028         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4029         ++FPR_idx;
4030       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4031         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4032         // once we support fp <-> gpr moves.
4033 
4034         // This can only ever happen in the presence of f32 array types,
4035         // since otherwise we never run out of FPRs before running out
4036         // of GPRs.
4037         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4038         FuncInfo->addLiveInAttr(VReg, Flags);
4039         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4040 
4041         if (ObjectVT == MVT::f32) {
4042           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4043             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4044                                  DAG.getConstant(32, dl, MVT::i32));
4045           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4046         }
4047 
4048         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4049       } else {
4050         if (CallConv == CallingConv::Fast)
4051           ComputeArgOffset();
4052 
4053         needsLoad = true;
4054       }
4055 
4056       // When passing an array of floats, the array occupies consecutive
4057       // space in the argument area; only round up to the next doubleword
4058       // at the end of the array.  Otherwise, each float takes 8 bytes.
4059       if (CallConv != CallingConv::Fast || needsLoad) {
4060         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4061         ArgOffset += ArgSize;
4062         if (Flags.isInConsecutiveRegsLast())
4063           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4064       }
4065       break;
4066     case MVT::v4f32:
4067     case MVT::v4i32:
4068     case MVT::v8i16:
4069     case MVT::v16i8:
4070     case MVT::v2f64:
4071     case MVT::v2i64:
4072     case MVT::v1i128:
4073     case MVT::f128:
4074       // These can be scalar arguments or elements of a vector array type
4075       // passed directly.  The latter are used to implement ELFv2 homogenous
4076       // vector aggregates.
4077       if (VR_idx != Num_VR_Regs) {
4078         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4079         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4080         ++VR_idx;
4081       } else {
4082         if (CallConv == CallingConv::Fast)
4083           ComputeArgOffset();
4084         needsLoad = true;
4085       }
4086       if (CallConv != CallingConv::Fast || needsLoad)
4087         ArgOffset += 16;
4088       break;
4089     }
4090 
4091     // We need to load the argument to a virtual register if we determined
4092     // above that we ran out of physical registers of the appropriate type.
4093     if (needsLoad) {
4094       if (ObjSize < ArgSize && !isLittleEndian)
4095         CurArgOffset += ArgSize - ObjSize;
4096       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4097       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4098       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4099     }
4100 
4101     InVals.push_back(ArgVal);
4102   }
4103 
4104   // Area that is at least reserved in the caller of this function.
4105   unsigned MinReservedArea;
4106   if (HasParameterArea)
4107     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4108   else
4109     MinReservedArea = LinkageSize;
4110 
4111   // Set the size that is at least reserved in caller of this function.  Tail
4112   // call optimized functions' reserved stack space needs to be aligned so that
4113   // taking the difference between two stack areas will result in an aligned
4114   // stack.
4115   MinReservedArea =
4116       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4117   FuncInfo->setMinReservedArea(MinReservedArea);
4118 
4119   // If the function takes variable number of arguments, make a frame index for
4120   // the start of the first vararg value... for expansion of llvm.va_start.
4121   // On ELFv2ABI spec, it writes:
4122   // C programs that are intended to be *portable* across different compilers
4123   // and architectures must use the header file <stdarg.h> to deal with variable
4124   // argument lists.
4125   if (isVarArg && MFI.hasVAStart()) {
4126     int Depth = ArgOffset;
4127 
4128     FuncInfo->setVarArgsFrameIndex(
4129       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4130     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4131 
4132     // If this function is vararg, store any remaining integer argument regs
4133     // to their spots on the stack so that they may be loaded by dereferencing
4134     // the result of va_next.
4135     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4136          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4137       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4138       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4139       SDValue Store =
4140           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4141       MemOps.push_back(Store);
4142       // Increment the address by four for the next argument to store
4143       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4144       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4145     }
4146   }
4147 
4148   if (!MemOps.empty())
4149     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4150 
4151   return Chain;
4152 }
4153 
4154 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4155     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4156     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4157     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4158   // TODO: add description of PPC stack frame format, or at least some docs.
4159   //
4160   MachineFunction &MF = DAG.getMachineFunction();
4161   MachineFrameInfo &MFI = MF.getFrameInfo();
4162   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4163 
4164   EVT PtrVT = getPointerTy(MF.getDataLayout());
4165   bool isPPC64 = PtrVT == MVT::i64;
4166   // Potential tail calls could cause overwriting of argument stack slots.
4167   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4168                        (CallConv == CallingConv::Fast));
4169   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4170   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4171   unsigned ArgOffset = LinkageSize;
4172   // Area that is at least reserved in caller of this function.
4173   unsigned MinReservedArea = ArgOffset;
4174 
4175   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4176     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4177     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4178   };
4179   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4180     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4181     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4182   };
4183   static const MCPhysReg VR[] = {
4184     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4185     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4186   };
4187 
4188   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4189   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4190   const unsigned Num_VR_Regs  = array_lengthof( VR);
4191 
4192   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4193 
4194   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4195 
4196   // In 32-bit non-varargs functions, the stack space for vectors is after the
4197   // stack space for non-vectors.  We do not use this space unless we have
4198   // too many vectors to fit in registers, something that only occurs in
4199   // constructed examples:), but we have to walk the arglist to figure
4200   // that out...for the pathological case, compute VecArgOffset as the
4201   // start of the vector parameter area.  Computing VecArgOffset is the
4202   // entire point of the following loop.
4203   unsigned VecArgOffset = ArgOffset;
4204   if (!isVarArg && !isPPC64) {
4205     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4206          ++ArgNo) {
4207       EVT ObjectVT = Ins[ArgNo].VT;
4208       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4209 
4210       if (Flags.isByVal()) {
4211         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4212         unsigned ObjSize = Flags.getByValSize();
4213         unsigned ArgSize =
4214                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4215         VecArgOffset += ArgSize;
4216         continue;
4217       }
4218 
4219       switch(ObjectVT.getSimpleVT().SimpleTy) {
4220       default: llvm_unreachable("Unhandled argument type!");
4221       case MVT::i1:
4222       case MVT::i32:
4223       case MVT::f32:
4224         VecArgOffset += 4;
4225         break;
4226       case MVT::i64:  // PPC64
4227       case MVT::f64:
4228         // FIXME: We are guaranteed to be !isPPC64 at this point.
4229         // Does MVT::i64 apply?
4230         VecArgOffset += 8;
4231         break;
4232       case MVT::v4f32:
4233       case MVT::v4i32:
4234       case MVT::v8i16:
4235       case MVT::v16i8:
4236         // Nothing to do, we're only looking at Nonvector args here.
4237         break;
4238       }
4239     }
4240   }
4241   // We've found where the vector parameter area in memory is.  Skip the
4242   // first 12 parameters; these don't use that memory.
4243   VecArgOffset = ((VecArgOffset+15)/16)*16;
4244   VecArgOffset += 12*16;
4245 
4246   // Add DAG nodes to load the arguments or copy them out of registers.  On
4247   // entry to a function on PPC, the arguments start after the linkage area,
4248   // although the first ones are often in registers.
4249 
4250   SmallVector<SDValue, 8> MemOps;
4251   unsigned nAltivecParamsAtEnd = 0;
4252   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4253   unsigned CurArgIdx = 0;
4254   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4255     SDValue ArgVal;
4256     bool needsLoad = false;
4257     EVT ObjectVT = Ins[ArgNo].VT;
4258     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4259     unsigned ArgSize = ObjSize;
4260     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4261     if (Ins[ArgNo].isOrigArg()) {
4262       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4263       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4264     }
4265     unsigned CurArgOffset = ArgOffset;
4266 
4267     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4268     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4269         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4270       if (isVarArg || isPPC64) {
4271         MinReservedArea = ((MinReservedArea+15)/16)*16;
4272         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4273                                                   Flags,
4274                                                   PtrByteSize);
4275       } else  nAltivecParamsAtEnd++;
4276     } else
4277       // Calculate min reserved area.
4278       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4279                                                 Flags,
4280                                                 PtrByteSize);
4281 
4282     // FIXME the codegen can be much improved in some cases.
4283     // We do not have to keep everything in memory.
4284     if (Flags.isByVal()) {
4285       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4286 
4287       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4288       ObjSize = Flags.getByValSize();
4289       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4290       // Objects of size 1 and 2 are right justified, everything else is
4291       // left justified.  This means the memory address is adjusted forwards.
4292       if (ObjSize==1 || ObjSize==2) {
4293         CurArgOffset = CurArgOffset + (4 - ObjSize);
4294       }
4295       // The value of the object is its address.
4296       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4297       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4298       InVals.push_back(FIN);
4299       if (ObjSize==1 || ObjSize==2) {
4300         if (GPR_idx != Num_GPR_Regs) {
4301           unsigned VReg;
4302           if (isPPC64)
4303             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4304           else
4305             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4306           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4307           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4308           SDValue Store =
4309               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4310                                 MachinePointerInfo(&*FuncArg), ObjType);
4311           MemOps.push_back(Store);
4312           ++GPR_idx;
4313         }
4314 
4315         ArgOffset += PtrByteSize;
4316 
4317         continue;
4318       }
4319       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4320         // Store whatever pieces of the object are in registers
4321         // to memory.  ArgOffset will be the address of the beginning
4322         // of the object.
4323         if (GPR_idx != Num_GPR_Regs) {
4324           unsigned VReg;
4325           if (isPPC64)
4326             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4327           else
4328             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4329           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4330           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4331           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4332           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4333                                        MachinePointerInfo(&*FuncArg, j));
4334           MemOps.push_back(Store);
4335           ++GPR_idx;
4336           ArgOffset += PtrByteSize;
4337         } else {
4338           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4339           break;
4340         }
4341       }
4342       continue;
4343     }
4344 
4345     switch (ObjectVT.getSimpleVT().SimpleTy) {
4346     default: llvm_unreachable("Unhandled argument type!");
4347     case MVT::i1:
4348     case MVT::i32:
4349       if (!isPPC64) {
4350         if (GPR_idx != Num_GPR_Regs) {
4351           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4352           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4353 
4354           if (ObjectVT == MVT::i1)
4355             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4356 
4357           ++GPR_idx;
4358         } else {
4359           needsLoad = true;
4360           ArgSize = PtrByteSize;
4361         }
4362         // All int arguments reserve stack space in the Darwin ABI.
4363         ArgOffset += PtrByteSize;
4364         break;
4365       }
4366       LLVM_FALLTHROUGH;
4367     case MVT::i64:  // PPC64
4368       if (GPR_idx != Num_GPR_Regs) {
4369         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4370         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4371 
4372         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4373           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4374           // value to MVT::i64 and then truncate to the correct register size.
4375           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4376 
4377         ++GPR_idx;
4378       } else {
4379         needsLoad = true;
4380         ArgSize = PtrByteSize;
4381       }
4382       // All int arguments reserve stack space in the Darwin ABI.
4383       ArgOffset += 8;
4384       break;
4385 
4386     case MVT::f32:
4387     case MVT::f64:
4388       // Every 4 bytes of argument space consumes one of the GPRs available for
4389       // argument passing.
4390       if (GPR_idx != Num_GPR_Regs) {
4391         ++GPR_idx;
4392         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4393           ++GPR_idx;
4394       }
4395       if (FPR_idx != Num_FPR_Regs) {
4396         unsigned VReg;
4397 
4398         if (ObjectVT == MVT::f32)
4399           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4400         else
4401           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4402 
4403         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4404         ++FPR_idx;
4405       } else {
4406         needsLoad = true;
4407       }
4408 
4409       // All FP arguments reserve stack space in the Darwin ABI.
4410       ArgOffset += isPPC64 ? 8 : ObjSize;
4411       break;
4412     case MVT::v4f32:
4413     case MVT::v4i32:
4414     case MVT::v8i16:
4415     case MVT::v16i8:
4416       // Note that vector arguments in registers don't reserve stack space,
4417       // except in varargs functions.
4418       if (VR_idx != Num_VR_Regs) {
4419         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4420         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4421         if (isVarArg) {
4422           while ((ArgOffset % 16) != 0) {
4423             ArgOffset += PtrByteSize;
4424             if (GPR_idx != Num_GPR_Regs)
4425               GPR_idx++;
4426           }
4427           ArgOffset += 16;
4428           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4429         }
4430         ++VR_idx;
4431       } else {
4432         if (!isVarArg && !isPPC64) {
4433           // Vectors go after all the nonvectors.
4434           CurArgOffset = VecArgOffset;
4435           VecArgOffset += 16;
4436         } else {
4437           // Vectors are aligned.
4438           ArgOffset = ((ArgOffset+15)/16)*16;
4439           CurArgOffset = ArgOffset;
4440           ArgOffset += 16;
4441         }
4442         needsLoad = true;
4443       }
4444       break;
4445     }
4446 
4447     // We need to load the argument to a virtual register if we determined above
4448     // that we ran out of physical registers of the appropriate type.
4449     if (needsLoad) {
4450       int FI = MFI.CreateFixedObject(ObjSize,
4451                                      CurArgOffset + (ArgSize - ObjSize),
4452                                      isImmutable);
4453       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4454       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4455     }
4456 
4457     InVals.push_back(ArgVal);
4458   }
4459 
4460   // Allow for Altivec parameters at the end, if needed.
4461   if (nAltivecParamsAtEnd) {
4462     MinReservedArea = ((MinReservedArea+15)/16)*16;
4463     MinReservedArea += 16*nAltivecParamsAtEnd;
4464   }
4465 
4466   // Area that is at least reserved in the caller of this function.
4467   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4468 
4469   // Set the size that is at least reserved in caller of this function.  Tail
4470   // call optimized functions' reserved stack space needs to be aligned so that
4471   // taking the difference between two stack areas will result in an aligned
4472   // stack.
4473   MinReservedArea =
4474       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4475   FuncInfo->setMinReservedArea(MinReservedArea);
4476 
4477   // If the function takes variable number of arguments, make a frame index for
4478   // the start of the first vararg value... for expansion of llvm.va_start.
4479   if (isVarArg) {
4480     int Depth = ArgOffset;
4481 
4482     FuncInfo->setVarArgsFrameIndex(
4483       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4484                             Depth, true));
4485     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4486 
4487     // If this function is vararg, store any remaining integer argument regs
4488     // to their spots on the stack so that they may be loaded by dereferencing
4489     // the result of va_next.
4490     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4491       unsigned VReg;
4492 
4493       if (isPPC64)
4494         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4495       else
4496         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4497 
4498       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4499       SDValue Store =
4500           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4501       MemOps.push_back(Store);
4502       // Increment the address by four for the next argument to store
4503       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4504       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4505     }
4506   }
4507 
4508   if (!MemOps.empty())
4509     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4510 
4511   return Chain;
4512 }
4513 
4514 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4515 /// adjusted to accommodate the arguments for the tailcall.
4516 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4517                                    unsigned ParamSize) {
4518 
4519   if (!isTailCall) return 0;
4520 
4521   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4522   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4523   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4524   // Remember only if the new adjustment is bigger.
4525   if (SPDiff < FI->getTailCallSPDelta())
4526     FI->setTailCallSPDelta(SPDiff);
4527 
4528   return SPDiff;
4529 }
4530 
4531 static bool isFunctionGlobalAddress(SDValue Callee);
4532 
4533 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4534                               const TargetMachine &TM) {
4535   // It does not make sense to call callsShareTOCBase() with a caller that
4536   // is PC Relative since PC Relative callers do not have a TOC.
4537 #ifndef NDEBUG
4538   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4539   assert(!STICaller->isUsingPCRelativeCalls() &&
4540          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4541 #endif
4542 
4543   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4544   // don't have enough information to determine if the caller and callee share
4545   // the same  TOC base, so we have to pessimistically assume they don't for
4546   // correctness.
4547   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4548   if (!G)
4549     return false;
4550 
4551   const GlobalValue *GV = G->getGlobal();
4552 
4553   // If the callee is preemptable, then the static linker will use a plt-stub
4554   // which saves the toc to the stack, and needs a nop after the call
4555   // instruction to convert to a toc-restore.
4556   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4557     return false;
4558 
4559   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4560   // We may need a TOC restore in the situation where the caller requires a
4561   // valid TOC but the callee is PC Relative and does not.
4562   const Function *F = dyn_cast<Function>(GV);
4563   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4564 
4565   // If we have an Alias we can try to get the function from there.
4566   if (Alias) {
4567     const GlobalObject *GlobalObj = Alias->getBaseObject();
4568     F = dyn_cast<Function>(GlobalObj);
4569   }
4570 
4571   // If we still have no valid function pointer we do not have enough
4572   // information to determine if the callee uses PC Relative calls so we must
4573   // assume that it does.
4574   if (!F)
4575     return false;
4576 
4577   // If the callee uses PC Relative we cannot guarantee that the callee won't
4578   // clobber the TOC of the caller and so we must assume that the two
4579   // functions do not share a TOC base.
4580   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4581   if (STICallee->isUsingPCRelativeCalls())
4582     return false;
4583 
4584   // The medium and large code models are expected to provide a sufficiently
4585   // large TOC to provide all data addressing needs of a module with a
4586   // single TOC.
4587   if (CodeModel::Medium == TM.getCodeModel() ||
4588       CodeModel::Large == TM.getCodeModel())
4589     return true;
4590 
4591   // Otherwise we need to ensure callee and caller are in the same section,
4592   // since the linker may allocate multiple TOCs, and we don't know which
4593   // sections will belong to the same TOC base.
4594   if (!GV->isStrongDefinitionForLinker())
4595     return false;
4596 
4597   // Any explicitly-specified sections and section prefixes must also match.
4598   // Also, if we're using -ffunction-sections, then each function is always in
4599   // a different section (the same is true for COMDAT functions).
4600   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4601       GV->getSection() != Caller->getSection())
4602     return false;
4603   if (const auto *F = dyn_cast<Function>(GV)) {
4604     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4605       return false;
4606   }
4607 
4608   return true;
4609 }
4610 
4611 static bool
4612 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4613                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4614   assert(Subtarget.is64BitELFABI());
4615 
4616   const unsigned PtrByteSize = 8;
4617   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4618 
4619   static const MCPhysReg GPR[] = {
4620     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4621     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4622   };
4623   static const MCPhysReg VR[] = {
4624     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4625     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4626   };
4627 
4628   const unsigned NumGPRs = array_lengthof(GPR);
4629   const unsigned NumFPRs = 13;
4630   const unsigned NumVRs = array_lengthof(VR);
4631   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4632 
4633   unsigned NumBytes = LinkageSize;
4634   unsigned AvailableFPRs = NumFPRs;
4635   unsigned AvailableVRs = NumVRs;
4636 
4637   for (const ISD::OutputArg& Param : Outs) {
4638     if (Param.Flags.isNest()) continue;
4639 
4640     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4641                                LinkageSize, ParamAreaSize, NumBytes,
4642                                AvailableFPRs, AvailableVRs))
4643       return true;
4644   }
4645   return false;
4646 }
4647 
4648 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4649   if (CB.arg_size() != CallerFn->arg_size())
4650     return false;
4651 
4652   auto CalleeArgIter = CB.arg_begin();
4653   auto CalleeArgEnd = CB.arg_end();
4654   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4655 
4656   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4657     const Value* CalleeArg = *CalleeArgIter;
4658     const Value* CallerArg = &(*CallerArgIter);
4659     if (CalleeArg == CallerArg)
4660       continue;
4661 
4662     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4663     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4664     //      }
4665     // 1st argument of callee is undef and has the same type as caller.
4666     if (CalleeArg->getType() == CallerArg->getType() &&
4667         isa<UndefValue>(CalleeArg))
4668       continue;
4669 
4670     return false;
4671   }
4672 
4673   return true;
4674 }
4675 
4676 // Returns true if TCO is possible between the callers and callees
4677 // calling conventions.
4678 static bool
4679 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4680                                     CallingConv::ID CalleeCC) {
4681   // Tail calls are possible with fastcc and ccc.
4682   auto isTailCallableCC  = [] (CallingConv::ID CC){
4683       return  CC == CallingConv::C || CC == CallingConv::Fast;
4684   };
4685   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4686     return false;
4687 
4688   // We can safely tail call both fastcc and ccc callees from a c calling
4689   // convention caller. If the caller is fastcc, we may have less stack space
4690   // than a non-fastcc caller with the same signature so disable tail-calls in
4691   // that case.
4692   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4693 }
4694 
4695 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4696     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4697     const SmallVectorImpl<ISD::OutputArg> &Outs,
4698     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4699   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4700 
4701   if (DisableSCO && !TailCallOpt) return false;
4702 
4703   // Variadic argument functions are not supported.
4704   if (isVarArg) return false;
4705 
4706   auto &Caller = DAG.getMachineFunction().getFunction();
4707   // Check that the calling conventions are compatible for tco.
4708   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4709     return false;
4710 
4711   // Caller contains any byval parameter is not supported.
4712   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4713     return false;
4714 
4715   // Callee contains any byval parameter is not supported, too.
4716   // Note: This is a quick work around, because in some cases, e.g.
4717   // caller's stack size > callee's stack size, we are still able to apply
4718   // sibling call optimization. For example, gcc is able to do SCO for caller1
4719   // in the following example, but not for caller2.
4720   //   struct test {
4721   //     long int a;
4722   //     char ary[56];
4723   //   } gTest;
4724   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4725   //     b->a = v.a;
4726   //     return 0;
4727   //   }
4728   //   void caller1(struct test a, struct test c, struct test *b) {
4729   //     callee(gTest, b); }
4730   //   void caller2(struct test *b) { callee(gTest, b); }
4731   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4732     return false;
4733 
4734   // If callee and caller use different calling conventions, we cannot pass
4735   // parameters on stack since offsets for the parameter area may be different.
4736   if (Caller.getCallingConv() != CalleeCC &&
4737       needStackSlotPassParameters(Subtarget, Outs))
4738     return false;
4739 
4740   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4741   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4742   // callee potentially have different TOC bases then we cannot tail call since
4743   // we need to restore the TOC pointer after the call.
4744   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4745   // We cannot guarantee this for indirect calls or calls to external functions.
4746   // When PC-Relative addressing is used, the concept of the TOC is no longer
4747   // applicable so this check is not required.
4748   // Check first for indirect calls.
4749   if (!Subtarget.isUsingPCRelativeCalls() &&
4750       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4751     return false;
4752 
4753   // Check if we share the TOC base.
4754   if (!Subtarget.isUsingPCRelativeCalls() &&
4755       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4756     return false;
4757 
4758   // TCO allows altering callee ABI, so we don't have to check further.
4759   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4760     return true;
4761 
4762   if (DisableSCO) return false;
4763 
4764   // If callee use the same argument list that caller is using, then we can
4765   // apply SCO on this case. If it is not, then we need to check if callee needs
4766   // stack for passing arguments.
4767   // PC Relative tail calls may not have a CallBase.
4768   // If there is no CallBase we cannot verify if we have the same argument
4769   // list so assume that we don't have the same argument list.
4770   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4771       needStackSlotPassParameters(Subtarget, Outs))
4772     return false;
4773   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4774     return false;
4775 
4776   return true;
4777 }
4778 
4779 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4780 /// for tail call optimization. Targets which want to do tail call
4781 /// optimization should implement this function.
4782 bool
4783 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4784                                                      CallingConv::ID CalleeCC,
4785                                                      bool isVarArg,
4786                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4787                                                      SelectionDAG& DAG) const {
4788   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4789     return false;
4790 
4791   // Variable argument functions are not supported.
4792   if (isVarArg)
4793     return false;
4794 
4795   MachineFunction &MF = DAG.getMachineFunction();
4796   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4797   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4798     // Functions containing by val parameters are not supported.
4799     for (unsigned i = 0; i != Ins.size(); i++) {
4800        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4801        if (Flags.isByVal()) return false;
4802     }
4803 
4804     // Non-PIC/GOT tail calls are supported.
4805     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4806       return true;
4807 
4808     // At the moment we can only do local tail calls (in same module, hidden
4809     // or protected) if we are generating PIC.
4810     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4811       return G->getGlobal()->hasHiddenVisibility()
4812           || G->getGlobal()->hasProtectedVisibility();
4813   }
4814 
4815   return false;
4816 }
4817 
4818 /// isCallCompatibleAddress - Return the immediate to use if the specified
4819 /// 32-bit value is representable in the immediate field of a BxA instruction.
4820 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4821   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4822   if (!C) return nullptr;
4823 
4824   int Addr = C->getZExtValue();
4825   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4826       SignExtend32<26>(Addr) != Addr)
4827     return nullptr;  // Top 6 bits have to be sext of immediate.
4828 
4829   return DAG
4830       .getConstant(
4831           (int)C->getZExtValue() >> 2, SDLoc(Op),
4832           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4833       .getNode();
4834 }
4835 
4836 namespace {
4837 
4838 struct TailCallArgumentInfo {
4839   SDValue Arg;
4840   SDValue FrameIdxOp;
4841   int FrameIdx = 0;
4842 
4843   TailCallArgumentInfo() = default;
4844 };
4845 
4846 } // end anonymous namespace
4847 
4848 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4849 static void StoreTailCallArgumentsToStackSlot(
4850     SelectionDAG &DAG, SDValue Chain,
4851     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4852     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4853   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4854     SDValue Arg = TailCallArgs[i].Arg;
4855     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4856     int FI = TailCallArgs[i].FrameIdx;
4857     // Store relative to framepointer.
4858     MemOpChains.push_back(DAG.getStore(
4859         Chain, dl, Arg, FIN,
4860         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4861   }
4862 }
4863 
4864 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4865 /// the appropriate stack slot for the tail call optimized function call.
4866 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4867                                              SDValue OldRetAddr, SDValue OldFP,
4868                                              int SPDiff, const SDLoc &dl) {
4869   if (SPDiff) {
4870     // Calculate the new stack slot for the return address.
4871     MachineFunction &MF = DAG.getMachineFunction();
4872     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4873     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4874     bool isPPC64 = Subtarget.isPPC64();
4875     int SlotSize = isPPC64 ? 8 : 4;
4876     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4877     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4878                                                          NewRetAddrLoc, true);
4879     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4880     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4881     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4882                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4883   }
4884   return Chain;
4885 }
4886 
4887 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4888 /// the position of the argument.
4889 static void
4890 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4891                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4892                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4893   int Offset = ArgOffset + SPDiff;
4894   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4895   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4896   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4897   SDValue FIN = DAG.getFrameIndex(FI, VT);
4898   TailCallArgumentInfo Info;
4899   Info.Arg = Arg;
4900   Info.FrameIdxOp = FIN;
4901   Info.FrameIdx = FI;
4902   TailCallArguments.push_back(Info);
4903 }
4904 
4905 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4906 /// stack slot. Returns the chain as result and the loaded frame pointers in
4907 /// LROpOut/FPOpout. Used when tail calling.
4908 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4909     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4910     SDValue &FPOpOut, const SDLoc &dl) const {
4911   if (SPDiff) {
4912     // Load the LR and FP stack slot for later adjusting.
4913     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4914     LROpOut = getReturnAddrFrameIndex(DAG);
4915     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4916     Chain = SDValue(LROpOut.getNode(), 1);
4917   }
4918   return Chain;
4919 }
4920 
4921 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4922 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4923 /// specified by the specific parameter attribute. The copy will be passed as
4924 /// a byval function parameter.
4925 /// Sometimes what we are copying is the end of a larger object, the part that
4926 /// does not fit in registers.
4927 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4928                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4929                                          SelectionDAG &DAG, const SDLoc &dl) {
4930   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4931   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
4932                        Flags.getNonZeroByValAlign(), false, false, false,
4933                        MachinePointerInfo(), MachinePointerInfo());
4934 }
4935 
4936 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4937 /// tail calls.
4938 static void LowerMemOpCallTo(
4939     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4940     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4941     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4942     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4943   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4944   if (!isTailCall) {
4945     if (isVector) {
4946       SDValue StackPtr;
4947       if (isPPC64)
4948         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4949       else
4950         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4951       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4952                            DAG.getConstant(ArgOffset, dl, PtrVT));
4953     }
4954     MemOpChains.push_back(
4955         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4956     // Calculate and remember argument location.
4957   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4958                                   TailCallArguments);
4959 }
4960 
4961 static void
4962 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4963                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4964                 SDValue FPOp,
4965                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4966   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4967   // might overwrite each other in case of tail call optimization.
4968   SmallVector<SDValue, 8> MemOpChains2;
4969   // Do not flag preceding copytoreg stuff together with the following stuff.
4970   InFlag = SDValue();
4971   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4972                                     MemOpChains2, dl);
4973   if (!MemOpChains2.empty())
4974     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4975 
4976   // Store the return address to the appropriate stack slot.
4977   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4978 
4979   // Emit callseq_end just before tailcall node.
4980   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4981                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4982   InFlag = Chain.getValue(1);
4983 }
4984 
4985 // Is this global address that of a function that can be called by name? (as
4986 // opposed to something that must hold a descriptor for an indirect call).
4987 static bool isFunctionGlobalAddress(SDValue Callee) {
4988   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4989     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4990         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4991       return false;
4992 
4993     return G->getGlobal()->getValueType()->isFunctionTy();
4994   }
4995 
4996   return false;
4997 }
4998 
4999 SDValue PPCTargetLowering::LowerCallResult(
5000     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5001     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5002     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5003   SmallVector<CCValAssign, 16> RVLocs;
5004   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5005                     *DAG.getContext());
5006 
5007   CCRetInfo.AnalyzeCallResult(
5008       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5009                ? RetCC_PPC_Cold
5010                : RetCC_PPC);
5011 
5012   // Copy all of the result registers out of their specified physreg.
5013   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5014     CCValAssign &VA = RVLocs[i];
5015     assert(VA.isRegLoc() && "Can only return in registers!");
5016 
5017     SDValue Val;
5018 
5019     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5020       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5021                                       InFlag);
5022       Chain = Lo.getValue(1);
5023       InFlag = Lo.getValue(2);
5024       VA = RVLocs[++i]; // skip ahead to next loc
5025       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5026                                       InFlag);
5027       Chain = Hi.getValue(1);
5028       InFlag = Hi.getValue(2);
5029       if (!Subtarget.isLittleEndian())
5030         std::swap (Lo, Hi);
5031       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5032     } else {
5033       Val = DAG.getCopyFromReg(Chain, dl,
5034                                VA.getLocReg(), VA.getLocVT(), InFlag);
5035       Chain = Val.getValue(1);
5036       InFlag = Val.getValue(2);
5037     }
5038 
5039     switch (VA.getLocInfo()) {
5040     default: llvm_unreachable("Unknown loc info!");
5041     case CCValAssign::Full: break;
5042     case CCValAssign::AExt:
5043       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5044       break;
5045     case CCValAssign::ZExt:
5046       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5047                         DAG.getValueType(VA.getValVT()));
5048       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5049       break;
5050     case CCValAssign::SExt:
5051       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5052                         DAG.getValueType(VA.getValVT()));
5053       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5054       break;
5055     }
5056 
5057     InVals.push_back(Val);
5058   }
5059 
5060   return Chain;
5061 }
5062 
5063 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5064                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5065   // PatchPoint calls are not indirect.
5066   if (isPatchPoint)
5067     return false;
5068 
5069   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5070     return false;
5071 
5072   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5073   // becuase the immediate function pointer points to a descriptor instead of
5074   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5075   // pointer immediate points to the global entry point, while the BLA would
5076   // need to jump to the local entry point (see rL211174).
5077   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5078       isBLACompatibleAddress(Callee, DAG))
5079     return false;
5080 
5081   return true;
5082 }
5083 
5084 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5085 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5086   return Subtarget.isAIXABI() ||
5087          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5088 }
5089 
5090 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5091                               const Function &Caller,
5092                               const SDValue &Callee,
5093                               const PPCSubtarget &Subtarget,
5094                               const TargetMachine &TM) {
5095   if (CFlags.IsTailCall)
5096     return PPCISD::TC_RETURN;
5097 
5098   // This is a call through a function pointer.
5099   if (CFlags.IsIndirect) {
5100     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5101     // indirect calls. The save of the caller's TOC pointer to the stack will be
5102     // inserted into the DAG as part of call lowering. The restore of the TOC
5103     // pointer is modeled by using a pseudo instruction for the call opcode that
5104     // represents the 2 instruction sequence of an indirect branch and link,
5105     // immediately followed by a load of the TOC pointer from the the stack save
5106     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5107     // as it is not saved or used.
5108     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5109                                                : PPCISD::BCTRL;
5110   }
5111 
5112   if (Subtarget.isUsingPCRelativeCalls()) {
5113     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5114     return PPCISD::CALL_NOTOC;
5115   }
5116 
5117   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5118   // immediately following the call instruction if the caller and callee may
5119   // have different TOC bases. At link time if the linker determines the calls
5120   // may not share a TOC base, the call is redirected to a trampoline inserted
5121   // by the linker. The trampoline will (among other things) save the callers
5122   // TOC pointer at an ABI designated offset in the linkage area and the linker
5123   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5124   // into gpr2.
5125   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5126     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5127                                                   : PPCISD::CALL_NOP;
5128 
5129   return PPCISD::CALL;
5130 }
5131 
5132 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5133                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5134   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5135     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5136       return SDValue(Dest, 0);
5137 
5138   // Returns true if the callee is local, and false otherwise.
5139   auto isLocalCallee = [&]() {
5140     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5141     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5142     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5143 
5144     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5145            !dyn_cast_or_null<GlobalIFunc>(GV);
5146   };
5147 
5148   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5149   // a static relocation model causes some versions of GNU LD (2.17.50, at
5150   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5151   // built with secure-PLT.
5152   bool UsePlt =
5153       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5154       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5155 
5156   const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5157     const TargetMachine &TM = Subtarget.getTargetMachine();
5158     const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5159     MCSymbolXCOFF *S =
5160         cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5161 
5162     if (GV->isDeclaration() && !S->hasRepresentedCsectSet()) {
5163       // On AIX, an undefined symbol needs to be associated with a
5164       // MCSectionXCOFF to get the correct storage mapping class.
5165       // In this case, XCOFF::XMC_PR.
5166       const XCOFF::StorageClass SC =
5167           TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GV);
5168       auto &Context = DAG.getMachineFunction().getMMI().getContext();
5169       MCSectionXCOFF *Sec = Context.getXCOFFSection(
5170           S->getSymbolTableName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC,
5171           SectionKind::getMetadata());
5172       S->setRepresentedCsect(Sec);
5173     }
5174 
5175     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5176     return DAG.getMCSymbol(S, PtrVT);
5177   };
5178 
5179   if (isFunctionGlobalAddress(Callee)) {
5180     const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5181 
5182     if (Subtarget.isAIXABI()) {
5183       assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5184       return getAIXFuncEntryPointSymbolSDNode(GV);
5185     }
5186     return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5187                                       UsePlt ? PPCII::MO_PLT : 0);
5188   }
5189 
5190   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5191     const char *SymName = S->getSymbol();
5192     if (Subtarget.isAIXABI()) {
5193       // If there exists a user-declared function whose name is the same as the
5194       // ExternalSymbol's, then we pick up the user-declared version.
5195       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5196       if (const Function *F =
5197               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5198         return getAIXFuncEntryPointSymbolSDNode(F);
5199 
5200       // On AIX, direct function calls reference the symbol for the function's
5201       // entry point, which is named by prepending a "." before the function's
5202       // C-linkage name.
5203       const auto getFunctionEntryPointSymbol = [&](StringRef SymName) {
5204         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5205         return cast<MCSymbolXCOFF>(
5206             Context.getOrCreateSymbol(Twine(".") + Twine(SymName)));
5207       };
5208 
5209       SymName = getFunctionEntryPointSymbol(SymName)->getName().data();
5210     }
5211     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5212                                        UsePlt ? PPCII::MO_PLT : 0);
5213   }
5214 
5215   // No transformation needed.
5216   assert(Callee.getNode() && "What no callee?");
5217   return Callee;
5218 }
5219 
5220 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5221   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5222          "Expected a CALLSEQ_STARTSDNode.");
5223 
5224   // The last operand is the chain, except when the node has glue. If the node
5225   // has glue, then the last operand is the glue, and the chain is the second
5226   // last operand.
5227   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5228   if (LastValue.getValueType() != MVT::Glue)
5229     return LastValue;
5230 
5231   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5232 }
5233 
5234 // Creates the node that moves a functions address into the count register
5235 // to prepare for an indirect call instruction.
5236 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5237                                 SDValue &Glue, SDValue &Chain,
5238                                 const SDLoc &dl) {
5239   SDValue MTCTROps[] = {Chain, Callee, Glue};
5240   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5241   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5242                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5243   // The glue is the second value produced.
5244   Glue = Chain.getValue(1);
5245 }
5246 
5247 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5248                                           SDValue &Glue, SDValue &Chain,
5249                                           SDValue CallSeqStart,
5250                                           const CallBase *CB, const SDLoc &dl,
5251                                           bool hasNest,
5252                                           const PPCSubtarget &Subtarget) {
5253   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5254   // entry point, but to the function descriptor (the function entry point
5255   // address is part of the function descriptor though).
5256   // The function descriptor is a three doubleword structure with the
5257   // following fields: function entry point, TOC base address and
5258   // environment pointer.
5259   // Thus for a call through a function pointer, the following actions need
5260   // to be performed:
5261   //   1. Save the TOC of the caller in the TOC save area of its stack
5262   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5263   //   2. Load the address of the function entry point from the function
5264   //      descriptor.
5265   //   3. Load the TOC of the callee from the function descriptor into r2.
5266   //   4. Load the environment pointer from the function descriptor into
5267   //      r11.
5268   //   5. Branch to the function entry point address.
5269   //   6. On return of the callee, the TOC of the caller needs to be
5270   //      restored (this is done in FinishCall()).
5271   //
5272   // The loads are scheduled at the beginning of the call sequence, and the
5273   // register copies are flagged together to ensure that no other
5274   // operations can be scheduled in between. E.g. without flagging the
5275   // copies together, a TOC access in the caller could be scheduled between
5276   // the assignment of the callee TOC and the branch to the callee, which leads
5277   // to incorrect code.
5278 
5279   // Start by loading the function address from the descriptor.
5280   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5281   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5282                       ? (MachineMemOperand::MODereferenceable |
5283                          MachineMemOperand::MOInvariant)
5284                       : MachineMemOperand::MONone;
5285 
5286   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5287 
5288   // Registers used in building the DAG.
5289   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5290   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5291 
5292   // Offsets of descriptor members.
5293   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5294   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5295 
5296   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5297   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5298 
5299   // One load for the functions entry point address.
5300   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5301                                     Alignment, MMOFlags);
5302 
5303   // One for loading the TOC anchor for the module that contains the called
5304   // function.
5305   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5306   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5307   SDValue TOCPtr =
5308       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5309                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5310 
5311   // One for loading the environment pointer.
5312   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5313   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5314   SDValue LoadEnvPtr =
5315       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5316                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5317 
5318 
5319   // Then copy the newly loaded TOC anchor to the TOC pointer.
5320   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5321   Chain = TOCVal.getValue(0);
5322   Glue = TOCVal.getValue(1);
5323 
5324   // If the function call has an explicit 'nest' parameter, it takes the
5325   // place of the environment pointer.
5326   assert((!hasNest || !Subtarget.isAIXABI()) &&
5327          "Nest parameter is not supported on AIX.");
5328   if (!hasNest) {
5329     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5330     Chain = EnvVal.getValue(0);
5331     Glue = EnvVal.getValue(1);
5332   }
5333 
5334   // The rest of the indirect call sequence is the same as the non-descriptor
5335   // DAG.
5336   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5337 }
5338 
5339 static void
5340 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5341                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5342                   SelectionDAG &DAG,
5343                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5344                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5345                   const PPCSubtarget &Subtarget) {
5346   const bool IsPPC64 = Subtarget.isPPC64();
5347   // MVT for a general purpose register.
5348   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5349 
5350   // First operand is always the chain.
5351   Ops.push_back(Chain);
5352 
5353   // If it's a direct call pass the callee as the second operand.
5354   if (!CFlags.IsIndirect)
5355     Ops.push_back(Callee);
5356   else {
5357     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5358 
5359     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5360     // on the stack (this would have been done in `LowerCall_64SVR4` or
5361     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5362     // represents both the indirect branch and a load that restores the TOC
5363     // pointer from the linkage area. The operand for the TOC restore is an add
5364     // of the TOC save offset to the stack pointer. This must be the second
5365     // operand: after the chain input but before any other variadic arguments.
5366     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5367     // saved or used.
5368     if (isTOCSaveRestoreRequired(Subtarget)) {
5369       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5370 
5371       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5372       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5373       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5374       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5375       Ops.push_back(AddTOC);
5376     }
5377 
5378     // Add the register used for the environment pointer.
5379     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5380       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5381                                     RegVT));
5382 
5383 
5384     // Add CTR register as callee so a bctr can be emitted later.
5385     if (CFlags.IsTailCall)
5386       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5387   }
5388 
5389   // If this is a tail call add stack pointer delta.
5390   if (CFlags.IsTailCall)
5391     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5392 
5393   // Add argument registers to the end of the list so that they are known live
5394   // into the call.
5395   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5396     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5397                                   RegsToPass[i].second.getValueType()));
5398 
5399   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5400   // no way to mark dependencies as implicit here.
5401   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5402   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5403        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5404     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5405 
5406   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5407   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5408     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5409 
5410   // Add a register mask operand representing the call-preserved registers.
5411   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5412   const uint32_t *Mask =
5413       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5414   assert(Mask && "Missing call preserved mask for calling convention");
5415   Ops.push_back(DAG.getRegisterMask(Mask));
5416 
5417   // If the glue is valid, it is the last operand.
5418   if (Glue.getNode())
5419     Ops.push_back(Glue);
5420 }
5421 
5422 SDValue PPCTargetLowering::FinishCall(
5423     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5424     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5425     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5426     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5427     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5428 
5429   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5430       Subtarget.isAIXABI())
5431     setUsesTOCBasePtr(DAG);
5432 
5433   unsigned CallOpc =
5434       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5435                     Subtarget, DAG.getTarget());
5436 
5437   if (!CFlags.IsIndirect)
5438     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5439   else if (Subtarget.usesFunctionDescriptors())
5440     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5441                                   dl, CFlags.HasNest, Subtarget);
5442   else
5443     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5444 
5445   // Build the operand list for the call instruction.
5446   SmallVector<SDValue, 8> Ops;
5447   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5448                     SPDiff, Subtarget);
5449 
5450   // Emit tail call.
5451   if (CFlags.IsTailCall) {
5452     // Indirect tail call when using PC Relative calls do not have the same
5453     // constraints.
5454     assert(((Callee.getOpcode() == ISD::Register &&
5455              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5456             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5457             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5458             isa<ConstantSDNode>(Callee) ||
5459             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5460            "Expecting a global address, external symbol, absolute value, "
5461            "register or an indirect tail call when PC Relative calls are "
5462            "used.");
5463     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5464     assert(CallOpc == PPCISD::TC_RETURN &&
5465            "Unexpected call opcode for a tail call.");
5466     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5467     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5468   }
5469 
5470   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5471   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5472   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5473   Glue = Chain.getValue(1);
5474 
5475   // When performing tail call optimization the callee pops its arguments off
5476   // the stack. Account for this here so these bytes can be pushed back on in
5477   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5478   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5479                          getTargetMachine().Options.GuaranteedTailCallOpt)
5480                             ? NumBytes
5481                             : 0;
5482 
5483   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5484                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5485                              Glue, dl);
5486   Glue = Chain.getValue(1);
5487 
5488   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5489                          DAG, InVals);
5490 }
5491 
5492 SDValue
5493 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5494                              SmallVectorImpl<SDValue> &InVals) const {
5495   SelectionDAG &DAG                     = CLI.DAG;
5496   SDLoc &dl                             = CLI.DL;
5497   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5498   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5499   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5500   SDValue Chain                         = CLI.Chain;
5501   SDValue Callee                        = CLI.Callee;
5502   bool &isTailCall                      = CLI.IsTailCall;
5503   CallingConv::ID CallConv              = CLI.CallConv;
5504   bool isVarArg                         = CLI.IsVarArg;
5505   bool isPatchPoint                     = CLI.IsPatchPoint;
5506   const CallBase *CB                    = CLI.CB;
5507 
5508   if (isTailCall) {
5509     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5510       isTailCall = false;
5511     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5512       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5513           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5514     else
5515       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5516                                                      Ins, DAG);
5517     if (isTailCall) {
5518       ++NumTailCalls;
5519       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5520         ++NumSiblingCalls;
5521 
5522       // PC Relative calls no longer guarantee that the callee is a Global
5523       // Address Node. The callee could be an indirect tail call in which
5524       // case the SDValue for the callee could be a load (to load the address
5525       // of a function pointer) or it may be a register copy (to move the
5526       // address of the callee from a function parameter into a virtual
5527       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5528       assert((Subtarget.isUsingPCRelativeCalls() ||
5529               isa<GlobalAddressSDNode>(Callee)) &&
5530              "Callee should be an llvm::Function object.");
5531 
5532       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5533                         << "\nTCO callee: ");
5534       LLVM_DEBUG(Callee.dump());
5535     }
5536   }
5537 
5538   if (!isTailCall && CB && CB->isMustTailCall())
5539     report_fatal_error("failed to perform tail call elimination on a call "
5540                        "site marked musttail");
5541 
5542   // When long calls (i.e. indirect calls) are always used, calls are always
5543   // made via function pointer. If we have a function name, first translate it
5544   // into a pointer.
5545   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5546       !isTailCall)
5547     Callee = LowerGlobalAddress(Callee, DAG);
5548 
5549   CallFlags CFlags(
5550       CallConv, isTailCall, isVarArg, isPatchPoint,
5551       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5552       // hasNest
5553       Subtarget.is64BitELFABI() &&
5554           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5555       CLI.NoMerge);
5556 
5557   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5558     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5559                             InVals, CB);
5560 
5561   if (Subtarget.isSVR4ABI())
5562     return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5563                             InVals, CB);
5564 
5565   if (Subtarget.isAIXABI())
5566     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5567                          InVals, CB);
5568 
5569   return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5570                           InVals, CB);
5571 }
5572 
5573 SDValue PPCTargetLowering::LowerCall_32SVR4(
5574     SDValue Chain, SDValue Callee, CallFlags CFlags,
5575     const SmallVectorImpl<ISD::OutputArg> &Outs,
5576     const SmallVectorImpl<SDValue> &OutVals,
5577     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5578     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5579     const CallBase *CB) const {
5580   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5581   // of the 32-bit SVR4 ABI stack frame layout.
5582 
5583   const CallingConv::ID CallConv = CFlags.CallConv;
5584   const bool IsVarArg = CFlags.IsVarArg;
5585   const bool IsTailCall = CFlags.IsTailCall;
5586 
5587   assert((CallConv == CallingConv::C ||
5588           CallConv == CallingConv::Cold ||
5589           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5590 
5591   const Align PtrAlign(4);
5592 
5593   MachineFunction &MF = DAG.getMachineFunction();
5594 
5595   // Mark this function as potentially containing a function that contains a
5596   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5597   // and restoring the callers stack pointer in this functions epilog. This is
5598   // done because by tail calling the called function might overwrite the value
5599   // in this function's (MF) stack pointer stack slot 0(SP).
5600   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5601       CallConv == CallingConv::Fast)
5602     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5603 
5604   // Count how many bytes are to be pushed on the stack, including the linkage
5605   // area, parameter list area and the part of the local variable space which
5606   // contains copies of aggregates which are passed by value.
5607 
5608   // Assign locations to all of the outgoing arguments.
5609   SmallVector<CCValAssign, 16> ArgLocs;
5610   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5611 
5612   // Reserve space for the linkage area on the stack.
5613   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5614                        PtrAlign);
5615   if (useSoftFloat())
5616     CCInfo.PreAnalyzeCallOperands(Outs);
5617 
5618   if (IsVarArg) {
5619     // Handle fixed and variable vector arguments differently.
5620     // Fixed vector arguments go into registers as long as registers are
5621     // available. Variable vector arguments always go into memory.
5622     unsigned NumArgs = Outs.size();
5623 
5624     for (unsigned i = 0; i != NumArgs; ++i) {
5625       MVT ArgVT = Outs[i].VT;
5626       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5627       bool Result;
5628 
5629       if (Outs[i].IsFixed) {
5630         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5631                                CCInfo);
5632       } else {
5633         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5634                                       ArgFlags, CCInfo);
5635       }
5636 
5637       if (Result) {
5638 #ifndef NDEBUG
5639         errs() << "Call operand #" << i << " has unhandled type "
5640              << EVT(ArgVT).getEVTString() << "\n";
5641 #endif
5642         llvm_unreachable(nullptr);
5643       }
5644     }
5645   } else {
5646     // All arguments are treated the same.
5647     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5648   }
5649   CCInfo.clearWasPPCF128();
5650 
5651   // Assign locations to all of the outgoing aggregate by value arguments.
5652   SmallVector<CCValAssign, 16> ByValArgLocs;
5653   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5654 
5655   // Reserve stack space for the allocations in CCInfo.
5656   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5657 
5658   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5659 
5660   // Size of the linkage area, parameter list area and the part of the local
5661   // space variable where copies of aggregates which are passed by value are
5662   // stored.
5663   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5664 
5665   // Calculate by how many bytes the stack has to be adjusted in case of tail
5666   // call optimization.
5667   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5668 
5669   // Adjust the stack pointer for the new arguments...
5670   // These operations are automatically eliminated by the prolog/epilog pass
5671   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5672   SDValue CallSeqStart = Chain;
5673 
5674   // Load the return address and frame pointer so it can be moved somewhere else
5675   // later.
5676   SDValue LROp, FPOp;
5677   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5678 
5679   // Set up a copy of the stack pointer for use loading and storing any
5680   // arguments that may not fit in the registers available for argument
5681   // passing.
5682   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5683 
5684   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5685   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5686   SmallVector<SDValue, 8> MemOpChains;
5687 
5688   bool seenFloatArg = false;
5689   // Walk the register/memloc assignments, inserting copies/loads.
5690   // i - Tracks the index into the list of registers allocated for the call
5691   // RealArgIdx - Tracks the index into the list of actual function arguments
5692   // j - Tracks the index into the list of byval arguments
5693   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5694        i != e;
5695        ++i, ++RealArgIdx) {
5696     CCValAssign &VA = ArgLocs[i];
5697     SDValue Arg = OutVals[RealArgIdx];
5698     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5699 
5700     if (Flags.isByVal()) {
5701       // Argument is an aggregate which is passed by value, thus we need to
5702       // create a copy of it in the local variable space of the current stack
5703       // frame (which is the stack frame of the caller) and pass the address of
5704       // this copy to the callee.
5705       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5706       CCValAssign &ByValVA = ByValArgLocs[j++];
5707       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5708 
5709       // Memory reserved in the local variable space of the callers stack frame.
5710       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5711 
5712       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5713       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5714                            StackPtr, PtrOff);
5715 
5716       // Create a copy of the argument in the local area of the current
5717       // stack frame.
5718       SDValue MemcpyCall =
5719         CreateCopyOfByValArgument(Arg, PtrOff,
5720                                   CallSeqStart.getNode()->getOperand(0),
5721                                   Flags, DAG, dl);
5722 
5723       // This must go outside the CALLSEQ_START..END.
5724       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5725                                                      SDLoc(MemcpyCall));
5726       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5727                              NewCallSeqStart.getNode());
5728       Chain = CallSeqStart = NewCallSeqStart;
5729 
5730       // Pass the address of the aggregate copy on the stack either in a
5731       // physical register or in the parameter list area of the current stack
5732       // frame to the callee.
5733       Arg = PtrOff;
5734     }
5735 
5736     // When useCRBits() is true, there can be i1 arguments.
5737     // It is because getRegisterType(MVT::i1) => MVT::i1,
5738     // and for other integer types getRegisterType() => MVT::i32.
5739     // Extend i1 and ensure callee will get i32.
5740     if (Arg.getValueType() == MVT::i1)
5741       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5742                         dl, MVT::i32, Arg);
5743 
5744     if (VA.isRegLoc()) {
5745       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5746       // Put argument in a physical register.
5747       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5748         bool IsLE = Subtarget.isLittleEndian();
5749         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5750                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5751         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5752         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5753                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5754         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5755                              SVal.getValue(0)));
5756       } else
5757         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5758     } else {
5759       // Put argument in the parameter list area of the current stack frame.
5760       assert(VA.isMemLoc());
5761       unsigned LocMemOffset = VA.getLocMemOffset();
5762 
5763       if (!IsTailCall) {
5764         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5765         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5766                              StackPtr, PtrOff);
5767 
5768         MemOpChains.push_back(
5769             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5770       } else {
5771         // Calculate and remember argument location.
5772         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5773                                  TailCallArguments);
5774       }
5775     }
5776   }
5777 
5778   if (!MemOpChains.empty())
5779     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5780 
5781   // Build a sequence of copy-to-reg nodes chained together with token chain
5782   // and flag operands which copy the outgoing args into the appropriate regs.
5783   SDValue InFlag;
5784   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5785     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5786                              RegsToPass[i].second, InFlag);
5787     InFlag = Chain.getValue(1);
5788   }
5789 
5790   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5791   // registers.
5792   if (IsVarArg) {
5793     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5794     SDValue Ops[] = { Chain, InFlag };
5795 
5796     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5797                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5798 
5799     InFlag = Chain.getValue(1);
5800   }
5801 
5802   if (IsTailCall)
5803     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5804                     TailCallArguments);
5805 
5806   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5807                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5808 }
5809 
5810 // Copy an argument into memory, being careful to do this outside the
5811 // call sequence for the call to which the argument belongs.
5812 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5813     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5814     SelectionDAG &DAG, const SDLoc &dl) const {
5815   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5816                         CallSeqStart.getNode()->getOperand(0),
5817                         Flags, DAG, dl);
5818   // The MEMCPY must go outside the CALLSEQ_START..END.
5819   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5820   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5821                                                  SDLoc(MemcpyCall));
5822   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5823                          NewCallSeqStart.getNode());
5824   return NewCallSeqStart;
5825 }
5826 
5827 SDValue PPCTargetLowering::LowerCall_64SVR4(
5828     SDValue Chain, SDValue Callee, CallFlags CFlags,
5829     const SmallVectorImpl<ISD::OutputArg> &Outs,
5830     const SmallVectorImpl<SDValue> &OutVals,
5831     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5832     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5833     const CallBase *CB) const {
5834   bool isELFv2ABI = Subtarget.isELFv2ABI();
5835   bool isLittleEndian = Subtarget.isLittleEndian();
5836   unsigned NumOps = Outs.size();
5837   bool IsSibCall = false;
5838   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5839 
5840   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5841   unsigned PtrByteSize = 8;
5842 
5843   MachineFunction &MF = DAG.getMachineFunction();
5844 
5845   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5846     IsSibCall = true;
5847 
5848   // Mark this function as potentially containing a function that contains a
5849   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5850   // and restoring the callers stack pointer in this functions epilog. This is
5851   // done because by tail calling the called function might overwrite the value
5852   // in this function's (MF) stack pointer stack slot 0(SP).
5853   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5854     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5855 
5856   assert(!(IsFastCall && CFlags.IsVarArg) &&
5857          "fastcc not supported on varargs functions");
5858 
5859   // Count how many bytes are to be pushed on the stack, including the linkage
5860   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5861   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5862   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5863   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5864   unsigned NumBytes = LinkageSize;
5865   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5866 
5867   static const MCPhysReg GPR[] = {
5868     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5869     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5870   };
5871   static const MCPhysReg VR[] = {
5872     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5873     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5874   };
5875 
5876   const unsigned NumGPRs = array_lengthof(GPR);
5877   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5878   const unsigned NumVRs  = array_lengthof(VR);
5879 
5880   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5881   // can be passed to the callee in registers.
5882   // For the fast calling convention, there is another check below.
5883   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5884   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5885   if (!HasParameterArea) {
5886     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5887     unsigned AvailableFPRs = NumFPRs;
5888     unsigned AvailableVRs = NumVRs;
5889     unsigned NumBytesTmp = NumBytes;
5890     for (unsigned i = 0; i != NumOps; ++i) {
5891       if (Outs[i].Flags.isNest()) continue;
5892       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5893                                  PtrByteSize, LinkageSize, ParamAreaSize,
5894                                  NumBytesTmp, AvailableFPRs, AvailableVRs))
5895         HasParameterArea = true;
5896     }
5897   }
5898 
5899   // When using the fast calling convention, we don't provide backing for
5900   // arguments that will be in registers.
5901   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5902 
5903   // Avoid allocating parameter area for fastcc functions if all the arguments
5904   // can be passed in the registers.
5905   if (IsFastCall)
5906     HasParameterArea = false;
5907 
5908   // Add up all the space actually used.
5909   for (unsigned i = 0; i != NumOps; ++i) {
5910     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5911     EVT ArgVT = Outs[i].VT;
5912     EVT OrigVT = Outs[i].ArgVT;
5913 
5914     if (Flags.isNest())
5915       continue;
5916 
5917     if (IsFastCall) {
5918       if (Flags.isByVal()) {
5919         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5920         if (NumGPRsUsed > NumGPRs)
5921           HasParameterArea = true;
5922       } else {
5923         switch (ArgVT.getSimpleVT().SimpleTy) {
5924         default: llvm_unreachable("Unexpected ValueType for argument!");
5925         case MVT::i1:
5926         case MVT::i32:
5927         case MVT::i64:
5928           if (++NumGPRsUsed <= NumGPRs)
5929             continue;
5930           break;
5931         case MVT::v4i32:
5932         case MVT::v8i16:
5933         case MVT::v16i8:
5934         case MVT::v2f64:
5935         case MVT::v2i64:
5936         case MVT::v1i128:
5937         case MVT::f128:
5938           if (++NumVRsUsed <= NumVRs)
5939             continue;
5940           break;
5941         case MVT::v4f32:
5942           if (++NumVRsUsed <= NumVRs)
5943             continue;
5944           break;
5945         case MVT::f32:
5946         case MVT::f64:
5947           if (++NumFPRsUsed <= NumFPRs)
5948             continue;
5949           break;
5950         }
5951         HasParameterArea = true;
5952       }
5953     }
5954 
5955     /* Respect alignment of argument on the stack.  */
5956     auto Alignement =
5957         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5958     NumBytes = alignTo(NumBytes, Alignement);
5959 
5960     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5961     if (Flags.isInConsecutiveRegsLast())
5962       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5963   }
5964 
5965   unsigned NumBytesActuallyUsed = NumBytes;
5966 
5967   // In the old ELFv1 ABI,
5968   // the prolog code of the callee may store up to 8 GPR argument registers to
5969   // the stack, allowing va_start to index over them in memory if its varargs.
5970   // Because we cannot tell if this is needed on the caller side, we have to
5971   // conservatively assume that it is needed.  As such, make sure we have at
5972   // least enough stack space for the caller to store the 8 GPRs.
5973   // In the ELFv2 ABI, we allocate the parameter area iff a callee
5974   // really requires memory operands, e.g. a vararg function.
5975   if (HasParameterArea)
5976     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5977   else
5978     NumBytes = LinkageSize;
5979 
5980   // Tail call needs the stack to be aligned.
5981   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5982     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5983 
5984   int SPDiff = 0;
5985 
5986   // Calculate by how many bytes the stack has to be adjusted in case of tail
5987   // call optimization.
5988   if (!IsSibCall)
5989     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
5990 
5991   // To protect arguments on the stack from being clobbered in a tail call,
5992   // force all the loads to happen before doing any other lowering.
5993   if (CFlags.IsTailCall)
5994     Chain = DAG.getStackArgumentTokenFactor(Chain);
5995 
5996   // Adjust the stack pointer for the new arguments...
5997   // These operations are automatically eliminated by the prolog/epilog pass
5998   if (!IsSibCall)
5999     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6000   SDValue CallSeqStart = Chain;
6001 
6002   // Load the return address and frame pointer so it can be move somewhere else
6003   // later.
6004   SDValue LROp, FPOp;
6005   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6006 
6007   // Set up a copy of the stack pointer for use loading and storing any
6008   // arguments that may not fit in the registers available for argument
6009   // passing.
6010   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6011 
6012   // Figure out which arguments are going to go in registers, and which in
6013   // memory.  Also, if this is a vararg function, floating point operations
6014   // must be stored to our stack, and loaded into integer regs as well, if
6015   // any integer regs are available for argument passing.
6016   unsigned ArgOffset = LinkageSize;
6017 
6018   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6019   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6020 
6021   SmallVector<SDValue, 8> MemOpChains;
6022   for (unsigned i = 0; i != NumOps; ++i) {
6023     SDValue Arg = OutVals[i];
6024     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6025     EVT ArgVT = Outs[i].VT;
6026     EVT OrigVT = Outs[i].ArgVT;
6027 
6028     // PtrOff will be used to store the current argument to the stack if a
6029     // register cannot be found for it.
6030     SDValue PtrOff;
6031 
6032     // We re-align the argument offset for each argument, except when using the
6033     // fast calling convention, when we need to make sure we do that only when
6034     // we'll actually use a stack slot.
6035     auto ComputePtrOff = [&]() {
6036       /* Respect alignment of argument on the stack.  */
6037       auto Alignment =
6038           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6039       ArgOffset = alignTo(ArgOffset, Alignment);
6040 
6041       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6042 
6043       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6044     };
6045 
6046     if (!IsFastCall) {
6047       ComputePtrOff();
6048 
6049       /* Compute GPR index associated with argument offset.  */
6050       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6051       GPR_idx = std::min(GPR_idx, NumGPRs);
6052     }
6053 
6054     // Promote integers to 64-bit values.
6055     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6056       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6057       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6058       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6059     }
6060 
6061     // FIXME memcpy is used way more than necessary.  Correctness first.
6062     // Note: "by value" is code for passing a structure by value, not
6063     // basic types.
6064     if (Flags.isByVal()) {
6065       // Note: Size includes alignment padding, so
6066       //   struct x { short a; char b; }
6067       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6068       // These are the proper values we need for right-justifying the
6069       // aggregate in a parameter register.
6070       unsigned Size = Flags.getByValSize();
6071 
6072       // An empty aggregate parameter takes up no storage and no
6073       // registers.
6074       if (Size == 0)
6075         continue;
6076 
6077       if (IsFastCall)
6078         ComputePtrOff();
6079 
6080       // All aggregates smaller than 8 bytes must be passed right-justified.
6081       if (Size==1 || Size==2 || Size==4) {
6082         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6083         if (GPR_idx != NumGPRs) {
6084           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6085                                         MachinePointerInfo(), VT);
6086           MemOpChains.push_back(Load.getValue(1));
6087           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6088 
6089           ArgOffset += PtrByteSize;
6090           continue;
6091         }
6092       }
6093 
6094       if (GPR_idx == NumGPRs && Size < 8) {
6095         SDValue AddPtr = PtrOff;
6096         if (!isLittleEndian) {
6097           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6098                                           PtrOff.getValueType());
6099           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6100         }
6101         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6102                                                           CallSeqStart,
6103                                                           Flags, DAG, dl);
6104         ArgOffset += PtrByteSize;
6105         continue;
6106       }
6107       // Copy entire object into memory.  There are cases where gcc-generated
6108       // code assumes it is there, even if it could be put entirely into
6109       // registers.  (This is not what the doc says.)
6110 
6111       // FIXME: The above statement is likely due to a misunderstanding of the
6112       // documents.  All arguments must be copied into the parameter area BY
6113       // THE CALLEE in the event that the callee takes the address of any
6114       // formal argument.  That has not yet been implemented.  However, it is
6115       // reasonable to use the stack area as a staging area for the register
6116       // load.
6117 
6118       // Skip this for small aggregates, as we will use the same slot for a
6119       // right-justified copy, below.
6120       if (Size >= 8)
6121         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6122                                                           CallSeqStart,
6123                                                           Flags, DAG, dl);
6124 
6125       // When a register is available, pass a small aggregate right-justified.
6126       if (Size < 8 && GPR_idx != NumGPRs) {
6127         // The easiest way to get this right-justified in a register
6128         // is to copy the structure into the rightmost portion of a
6129         // local variable slot, then load the whole slot into the
6130         // register.
6131         // FIXME: The memcpy seems to produce pretty awful code for
6132         // small aggregates, particularly for packed ones.
6133         // FIXME: It would be preferable to use the slot in the
6134         // parameter save area instead of a new local variable.
6135         SDValue AddPtr = PtrOff;
6136         if (!isLittleEndian) {
6137           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6138           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6139         }
6140         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6141                                                           CallSeqStart,
6142                                                           Flags, DAG, dl);
6143 
6144         // Load the slot into the register.
6145         SDValue Load =
6146             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6147         MemOpChains.push_back(Load.getValue(1));
6148         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6149 
6150         // Done with this argument.
6151         ArgOffset += PtrByteSize;
6152         continue;
6153       }
6154 
6155       // For aggregates larger than PtrByteSize, copy the pieces of the
6156       // object that fit into registers from the parameter save area.
6157       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6158         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6159         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6160         if (GPR_idx != NumGPRs) {
6161           SDValue Load =
6162               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6163           MemOpChains.push_back(Load.getValue(1));
6164           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6165           ArgOffset += PtrByteSize;
6166         } else {
6167           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6168           break;
6169         }
6170       }
6171       continue;
6172     }
6173 
6174     switch (Arg.getSimpleValueType().SimpleTy) {
6175     default: llvm_unreachable("Unexpected ValueType for argument!");
6176     case MVT::i1:
6177     case MVT::i32:
6178     case MVT::i64:
6179       if (Flags.isNest()) {
6180         // The 'nest' parameter, if any, is passed in R11.
6181         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6182         break;
6183       }
6184 
6185       // These can be scalar arguments or elements of an integer array type
6186       // passed directly.  Clang may use those instead of "byval" aggregate
6187       // types to avoid forcing arguments to memory unnecessarily.
6188       if (GPR_idx != NumGPRs) {
6189         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6190       } else {
6191         if (IsFastCall)
6192           ComputePtrOff();
6193 
6194         assert(HasParameterArea &&
6195                "Parameter area must exist to pass an argument in memory.");
6196         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6197                          true, CFlags.IsTailCall, false, MemOpChains,
6198                          TailCallArguments, dl);
6199         if (IsFastCall)
6200           ArgOffset += PtrByteSize;
6201       }
6202       if (!IsFastCall)
6203         ArgOffset += PtrByteSize;
6204       break;
6205     case MVT::f32:
6206     case MVT::f64: {
6207       // These can be scalar arguments or elements of a float array type
6208       // passed directly.  The latter are used to implement ELFv2 homogenous
6209       // float aggregates.
6210 
6211       // Named arguments go into FPRs first, and once they overflow, the
6212       // remaining arguments go into GPRs and then the parameter save area.
6213       // Unnamed arguments for vararg functions always go to GPRs and
6214       // then the parameter save area.  For now, put all arguments to vararg
6215       // routines always in both locations (FPR *and* GPR or stack slot).
6216       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6217       bool NeededLoad = false;
6218 
6219       // First load the argument into the next available FPR.
6220       if (FPR_idx != NumFPRs)
6221         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6222 
6223       // Next, load the argument into GPR or stack slot if needed.
6224       if (!NeedGPROrStack)
6225         ;
6226       else if (GPR_idx != NumGPRs && !IsFastCall) {
6227         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6228         // once we support fp <-> gpr moves.
6229 
6230         // In the non-vararg case, this can only ever happen in the
6231         // presence of f32 array types, since otherwise we never run
6232         // out of FPRs before running out of GPRs.
6233         SDValue ArgVal;
6234 
6235         // Double values are always passed in a single GPR.
6236         if (Arg.getValueType() != MVT::f32) {
6237           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6238 
6239         // Non-array float values are extended and passed in a GPR.
6240         } else if (!Flags.isInConsecutiveRegs()) {
6241           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6242           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6243 
6244         // If we have an array of floats, we collect every odd element
6245         // together with its predecessor into one GPR.
6246         } else if (ArgOffset % PtrByteSize != 0) {
6247           SDValue Lo, Hi;
6248           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6249           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6250           if (!isLittleEndian)
6251             std::swap(Lo, Hi);
6252           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6253 
6254         // The final element, if even, goes into the first half of a GPR.
6255         } else if (Flags.isInConsecutiveRegsLast()) {
6256           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6257           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6258           if (!isLittleEndian)
6259             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6260                                  DAG.getConstant(32, dl, MVT::i32));
6261 
6262         // Non-final even elements are skipped; they will be handled
6263         // together the with subsequent argument on the next go-around.
6264         } else
6265           ArgVal = SDValue();
6266 
6267         if (ArgVal.getNode())
6268           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6269       } else {
6270         if (IsFastCall)
6271           ComputePtrOff();
6272 
6273         // Single-precision floating-point values are mapped to the
6274         // second (rightmost) word of the stack doubleword.
6275         if (Arg.getValueType() == MVT::f32 &&
6276             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6277           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6278           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6279         }
6280 
6281         assert(HasParameterArea &&
6282                "Parameter area must exist to pass an argument in memory.");
6283         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6284                          true, CFlags.IsTailCall, false, MemOpChains,
6285                          TailCallArguments, dl);
6286 
6287         NeededLoad = true;
6288       }
6289       // When passing an array of floats, the array occupies consecutive
6290       // space in the argument area; only round up to the next doubleword
6291       // at the end of the array.  Otherwise, each float takes 8 bytes.
6292       if (!IsFastCall || NeededLoad) {
6293         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6294                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6295         if (Flags.isInConsecutiveRegsLast())
6296           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6297       }
6298       break;
6299     }
6300     case MVT::v4f32:
6301     case MVT::v4i32:
6302     case MVT::v8i16:
6303     case MVT::v16i8:
6304     case MVT::v2f64:
6305     case MVT::v2i64:
6306     case MVT::v1i128:
6307     case MVT::f128:
6308       // These can be scalar arguments or elements of a vector array type
6309       // passed directly.  The latter are used to implement ELFv2 homogenous
6310       // vector aggregates.
6311 
6312       // For a varargs call, named arguments go into VRs or on the stack as
6313       // usual; unnamed arguments always go to the stack or the corresponding
6314       // GPRs when within range.  For now, we always put the value in both
6315       // locations (or even all three).
6316       if (CFlags.IsVarArg) {
6317         assert(HasParameterArea &&
6318                "Parameter area must exist if we have a varargs call.");
6319         // We could elide this store in the case where the object fits
6320         // entirely in R registers.  Maybe later.
6321         SDValue Store =
6322             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6323         MemOpChains.push_back(Store);
6324         if (VR_idx != NumVRs) {
6325           SDValue Load =
6326               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6327           MemOpChains.push_back(Load.getValue(1));
6328           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6329         }
6330         ArgOffset += 16;
6331         for (unsigned i=0; i<16; i+=PtrByteSize) {
6332           if (GPR_idx == NumGPRs)
6333             break;
6334           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6335                                    DAG.getConstant(i, dl, PtrVT));
6336           SDValue Load =
6337               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6338           MemOpChains.push_back(Load.getValue(1));
6339           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6340         }
6341         break;
6342       }
6343 
6344       // Non-varargs Altivec params go into VRs or on the stack.
6345       if (VR_idx != NumVRs) {
6346         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6347       } else {
6348         if (IsFastCall)
6349           ComputePtrOff();
6350 
6351         assert(HasParameterArea &&
6352                "Parameter area must exist to pass an argument in memory.");
6353         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6354                          true, CFlags.IsTailCall, true, MemOpChains,
6355                          TailCallArguments, dl);
6356         if (IsFastCall)
6357           ArgOffset += 16;
6358       }
6359 
6360       if (!IsFastCall)
6361         ArgOffset += 16;
6362       break;
6363     }
6364   }
6365 
6366   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6367          "mismatch in size of parameter area");
6368   (void)NumBytesActuallyUsed;
6369 
6370   if (!MemOpChains.empty())
6371     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6372 
6373   // Check if this is an indirect call (MTCTR/BCTRL).
6374   // See prepareDescriptorIndirectCall and buildCallOperands for more
6375   // information about calls through function pointers in the 64-bit SVR4 ABI.
6376   if (CFlags.IsIndirect) {
6377     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6378     // caller in the TOC save area.
6379     if (isTOCSaveRestoreRequired(Subtarget)) {
6380       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6381       // Load r2 into a virtual register and store it to the TOC save area.
6382       setUsesTOCBasePtr(DAG);
6383       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6384       // TOC save area offset.
6385       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6386       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6387       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6388       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6389                            MachinePointerInfo::getStack(
6390                                DAG.getMachineFunction(), TOCSaveOffset));
6391     }
6392     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6393     // This does not mean the MTCTR instruction must use R12; it's easier
6394     // to model this as an extra parameter, so do that.
6395     if (isELFv2ABI && !CFlags.IsPatchPoint)
6396       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6397   }
6398 
6399   // Build a sequence of copy-to-reg nodes chained together with token chain
6400   // and flag operands which copy the outgoing args into the appropriate regs.
6401   SDValue InFlag;
6402   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6403     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6404                              RegsToPass[i].second, InFlag);
6405     InFlag = Chain.getValue(1);
6406   }
6407 
6408   if (CFlags.IsTailCall && !IsSibCall)
6409     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6410                     TailCallArguments);
6411 
6412   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6413                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6414 }
6415 
6416 SDValue PPCTargetLowering::LowerCall_Darwin(
6417     SDValue Chain, SDValue Callee, CallFlags CFlags,
6418     const SmallVectorImpl<ISD::OutputArg> &Outs,
6419     const SmallVectorImpl<SDValue> &OutVals,
6420     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6421     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6422     const CallBase *CB) const {
6423   unsigned NumOps = Outs.size();
6424 
6425   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6426   bool isPPC64 = PtrVT == MVT::i64;
6427   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6428 
6429   MachineFunction &MF = DAG.getMachineFunction();
6430 
6431   // Mark this function as potentially containing a function that contains a
6432   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6433   // and restoring the callers stack pointer in this functions epilog. This is
6434   // done because by tail calling the called function might overwrite the value
6435   // in this function's (MF) stack pointer stack slot 0(SP).
6436   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6437       CFlags.CallConv == CallingConv::Fast)
6438     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6439 
6440   // Count how many bytes are to be pushed on the stack, including the linkage
6441   // area, and parameter passing area.  We start with 24/48 bytes, which is
6442   // prereserved space for [SP][CR][LR][3 x unused].
6443   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6444   unsigned NumBytes = LinkageSize;
6445 
6446   // Add up all the space actually used.
6447   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6448   // they all go in registers, but we must reserve stack space for them for
6449   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6450   // assigned stack space in order, with padding so Altivec parameters are
6451   // 16-byte aligned.
6452   unsigned nAltivecParamsAtEnd = 0;
6453   for (unsigned i = 0; i != NumOps; ++i) {
6454     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6455     EVT ArgVT = Outs[i].VT;
6456     // Varargs Altivec parameters are padded to a 16 byte boundary.
6457     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6458         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6459         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6460       if (!CFlags.IsVarArg && !isPPC64) {
6461         // Non-varargs Altivec parameters go after all the non-Altivec
6462         // parameters; handle those later so we know how much padding we need.
6463         nAltivecParamsAtEnd++;
6464         continue;
6465       }
6466       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6467       NumBytes = ((NumBytes+15)/16)*16;
6468     }
6469     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6470   }
6471 
6472   // Allow for Altivec parameters at the end, if needed.
6473   if (nAltivecParamsAtEnd) {
6474     NumBytes = ((NumBytes+15)/16)*16;
6475     NumBytes += 16*nAltivecParamsAtEnd;
6476   }
6477 
6478   // The prolog code of the callee may store up to 8 GPR argument registers to
6479   // the stack, allowing va_start to index over them in memory if its varargs.
6480   // Because we cannot tell if this is needed on the caller side, we have to
6481   // conservatively assume that it is needed.  As such, make sure we have at
6482   // least enough stack space for the caller to store the 8 GPRs.
6483   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6484 
6485   // Tail call needs the stack to be aligned.
6486   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6487       CFlags.CallConv == CallingConv::Fast)
6488     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6489 
6490   // Calculate by how many bytes the stack has to be adjusted in case of tail
6491   // call optimization.
6492   int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6493 
6494   // To protect arguments on the stack from being clobbered in a tail call,
6495   // force all the loads to happen before doing any other lowering.
6496   if (CFlags.IsTailCall)
6497     Chain = DAG.getStackArgumentTokenFactor(Chain);
6498 
6499   // Adjust the stack pointer for the new arguments...
6500   // These operations are automatically eliminated by the prolog/epilog pass
6501   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6502   SDValue CallSeqStart = Chain;
6503 
6504   // Load the return address and frame pointer so it can be move somewhere else
6505   // later.
6506   SDValue LROp, FPOp;
6507   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6508 
6509   // Set up a copy of the stack pointer for use loading and storing any
6510   // arguments that may not fit in the registers available for argument
6511   // passing.
6512   SDValue StackPtr;
6513   if (isPPC64)
6514     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6515   else
6516     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6517 
6518   // Figure out which arguments are going to go in registers, and which in
6519   // memory.  Also, if this is a vararg function, floating point operations
6520   // must be stored to our stack, and loaded into integer regs as well, if
6521   // any integer regs are available for argument passing.
6522   unsigned ArgOffset = LinkageSize;
6523   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6524 
6525   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6526     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6527     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6528   };
6529   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6530     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6531     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6532   };
6533   static const MCPhysReg VR[] = {
6534     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6535     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6536   };
6537   const unsigned NumGPRs = array_lengthof(GPR_32);
6538   const unsigned NumFPRs = 13;
6539   const unsigned NumVRs  = array_lengthof(VR);
6540 
6541   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6542 
6543   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6544   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6545 
6546   SmallVector<SDValue, 8> MemOpChains;
6547   for (unsigned i = 0; i != NumOps; ++i) {
6548     SDValue Arg = OutVals[i];
6549     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6550 
6551     // PtrOff will be used to store the current argument to the stack if a
6552     // register cannot be found for it.
6553     SDValue PtrOff;
6554 
6555     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6556 
6557     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6558 
6559     // On PPC64, promote integers to 64-bit values.
6560     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6561       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6562       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6563       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6564     }
6565 
6566     // FIXME memcpy is used way more than necessary.  Correctness first.
6567     // Note: "by value" is code for passing a structure by value, not
6568     // basic types.
6569     if (Flags.isByVal()) {
6570       unsigned Size = Flags.getByValSize();
6571       // Very small objects are passed right-justified.  Everything else is
6572       // passed left-justified.
6573       if (Size==1 || Size==2) {
6574         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6575         if (GPR_idx != NumGPRs) {
6576           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6577                                         MachinePointerInfo(), VT);
6578           MemOpChains.push_back(Load.getValue(1));
6579           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6580 
6581           ArgOffset += PtrByteSize;
6582         } else {
6583           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6584                                           PtrOff.getValueType());
6585           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6586           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6587                                                             CallSeqStart,
6588                                                             Flags, DAG, dl);
6589           ArgOffset += PtrByteSize;
6590         }
6591         continue;
6592       }
6593       // Copy entire object into memory.  There are cases where gcc-generated
6594       // code assumes it is there, even if it could be put entirely into
6595       // registers.  (This is not what the doc says.)
6596       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6597                                                         CallSeqStart,
6598                                                         Flags, DAG, dl);
6599 
6600       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6601       // copy the pieces of the object that fit into registers from the
6602       // parameter save area.
6603       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6604         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6605         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6606         if (GPR_idx != NumGPRs) {
6607           SDValue Load =
6608               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6609           MemOpChains.push_back(Load.getValue(1));
6610           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6611           ArgOffset += PtrByteSize;
6612         } else {
6613           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6614           break;
6615         }
6616       }
6617       continue;
6618     }
6619 
6620     switch (Arg.getSimpleValueType().SimpleTy) {
6621     default: llvm_unreachable("Unexpected ValueType for argument!");
6622     case MVT::i1:
6623     case MVT::i32:
6624     case MVT::i64:
6625       if (GPR_idx != NumGPRs) {
6626         if (Arg.getValueType() == MVT::i1)
6627           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6628 
6629         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6630       } else {
6631         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6632                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6633                          TailCallArguments, dl);
6634       }
6635       ArgOffset += PtrByteSize;
6636       break;
6637     case MVT::f32:
6638     case MVT::f64:
6639       if (FPR_idx != NumFPRs) {
6640         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6641 
6642         if (CFlags.IsVarArg) {
6643           SDValue Store =
6644               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6645           MemOpChains.push_back(Store);
6646 
6647           // Float varargs are always shadowed in available integer registers
6648           if (GPR_idx != NumGPRs) {
6649             SDValue Load =
6650                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6651             MemOpChains.push_back(Load.getValue(1));
6652             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6653           }
6654           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6655             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6656             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6657             SDValue Load =
6658                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6659             MemOpChains.push_back(Load.getValue(1));
6660             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6661           }
6662         } else {
6663           // If we have any FPRs remaining, we may also have GPRs remaining.
6664           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6665           // GPRs.
6666           if (GPR_idx != NumGPRs)
6667             ++GPR_idx;
6668           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6669               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6670             ++GPR_idx;
6671         }
6672       } else
6673         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6674                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6675                          TailCallArguments, dl);
6676       if (isPPC64)
6677         ArgOffset += 8;
6678       else
6679         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6680       break;
6681     case MVT::v4f32:
6682     case MVT::v4i32:
6683     case MVT::v8i16:
6684     case MVT::v16i8:
6685       if (CFlags.IsVarArg) {
6686         // These go aligned on the stack, or in the corresponding R registers
6687         // when within range.  The Darwin PPC ABI doc claims they also go in
6688         // V registers; in fact gcc does this only for arguments that are
6689         // prototyped, not for those that match the ...  We do it for all
6690         // arguments, seems to work.
6691         while (ArgOffset % 16 !=0) {
6692           ArgOffset += PtrByteSize;
6693           if (GPR_idx != NumGPRs)
6694             GPR_idx++;
6695         }
6696         // We could elide this store in the case where the object fits
6697         // entirely in R registers.  Maybe later.
6698         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6699                              DAG.getConstant(ArgOffset, dl, PtrVT));
6700         SDValue Store =
6701             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6702         MemOpChains.push_back(Store);
6703         if (VR_idx != NumVRs) {
6704           SDValue Load =
6705               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6706           MemOpChains.push_back(Load.getValue(1));
6707           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6708         }
6709         ArgOffset += 16;
6710         for (unsigned i=0; i<16; i+=PtrByteSize) {
6711           if (GPR_idx == NumGPRs)
6712             break;
6713           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6714                                    DAG.getConstant(i, dl, PtrVT));
6715           SDValue Load =
6716               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6717           MemOpChains.push_back(Load.getValue(1));
6718           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6719         }
6720         break;
6721       }
6722 
6723       // Non-varargs Altivec params generally go in registers, but have
6724       // stack space allocated at the end.
6725       if (VR_idx != NumVRs) {
6726         // Doesn't have GPR space allocated.
6727         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6728       } else if (nAltivecParamsAtEnd==0) {
6729         // We are emitting Altivec params in order.
6730         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6731                          isPPC64, CFlags.IsTailCall, true, MemOpChains,
6732                          TailCallArguments, dl);
6733         ArgOffset += 16;
6734       }
6735       break;
6736     }
6737   }
6738   // If all Altivec parameters fit in registers, as they usually do,
6739   // they get stack space following the non-Altivec parameters.  We
6740   // don't track this here because nobody below needs it.
6741   // If there are more Altivec parameters than fit in registers emit
6742   // the stores here.
6743   if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
6744     unsigned j = 0;
6745     // Offset is aligned; skip 1st 12 params which go in V registers.
6746     ArgOffset = ((ArgOffset+15)/16)*16;
6747     ArgOffset += 12*16;
6748     for (unsigned i = 0; i != NumOps; ++i) {
6749       SDValue Arg = OutVals[i];
6750       EVT ArgType = Outs[i].VT;
6751       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6752           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6753         if (++j > NumVRs) {
6754           SDValue PtrOff;
6755           // We are emitting Altivec params in order.
6756           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6757                            isPPC64, CFlags.IsTailCall, true, MemOpChains,
6758                            TailCallArguments, dl);
6759           ArgOffset += 16;
6760         }
6761       }
6762     }
6763   }
6764 
6765   if (!MemOpChains.empty())
6766     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6767 
6768   // On Darwin, R12 must contain the address of an indirect callee.  This does
6769   // not mean the MTCTR instruction must use R12; it's easier to model this as
6770   // an extra parameter, so do that.
6771   if (CFlags.IsIndirect) {
6772     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
6773     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6774                                                    PPC::R12), Callee));
6775   }
6776 
6777   // Build a sequence of copy-to-reg nodes chained together with token chain
6778   // and flag operands which copy the outgoing args into the appropriate regs.
6779   SDValue InFlag;
6780   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6781     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6782                              RegsToPass[i].second, InFlag);
6783     InFlag = Chain.getValue(1);
6784   }
6785 
6786   if (CFlags.IsTailCall)
6787     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6788                     TailCallArguments);
6789 
6790   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6791                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6792 }
6793 
6794 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6795                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6796                    CCState &State) {
6797 
6798   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6799       State.getMachineFunction().getSubtarget());
6800   const bool IsPPC64 = Subtarget.isPPC64();
6801   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6802   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6803 
6804   assert((!ValVT.isInteger() ||
6805           (ValVT.getSizeInBits() <= RegVT.getSizeInBits())) &&
6806          "Integer argument exceeds register size: should have been legalized");
6807 
6808   if (ValVT == MVT::f128)
6809     report_fatal_error("f128 is unimplemented on AIX.");
6810 
6811   if (ArgFlags.isNest())
6812     report_fatal_error("Nest arguments are unimplemented.");
6813 
6814   if (ValVT.isVector() || LocVT.isVector())
6815     report_fatal_error("Vector arguments are unimplemented on AIX.");
6816 
6817   static const MCPhysReg GPR_32[] = {// 32-bit registers.
6818                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6819                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6820   static const MCPhysReg GPR_64[] = {// 64-bit registers.
6821                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6822                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6823 
6824   if (ArgFlags.isByVal()) {
6825     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6826       report_fatal_error("Pass-by-value arguments with alignment greater than "
6827                          "register width are not supported.");
6828 
6829     const unsigned ByValSize = ArgFlags.getByValSize();
6830 
6831     // An empty aggregate parameter takes up no storage and no registers,
6832     // but needs a MemLoc for a stack slot for the formal arguments side.
6833     if (ByValSize == 0) {
6834       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6835                                        State.getNextStackOffset(), RegVT,
6836                                        LocInfo));
6837       return false;
6838     }
6839 
6840     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6841     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6842     for (const unsigned E = Offset + StackSize; Offset < E;
6843          Offset += PtrAlign.value()) {
6844       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6845         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6846       else {
6847         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6848                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
6849                                          LocInfo));
6850         break;
6851       }
6852     }
6853     return false;
6854   }
6855 
6856   // Arguments always reserve parameter save area.
6857   switch (ValVT.SimpleTy) {
6858   default:
6859     report_fatal_error("Unhandled value type for argument.");
6860   case MVT::i64:
6861     // i64 arguments should have been split to i32 for PPC32.
6862     assert(IsPPC64 && "PPC32 should have split i64 values.");
6863     LLVM_FALLTHROUGH;
6864   case MVT::i1:
6865   case MVT::i32: {
6866     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6867     // AIX integer arguments are always passed in register width.
6868     if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
6869       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6870                                   : CCValAssign::LocInfo::ZExt;
6871     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6872       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6873     else
6874       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6875 
6876     return false;
6877   }
6878   case MVT::f32:
6879   case MVT::f64: {
6880     // Parameter save area (PSA) is reserved even if the float passes in fpr.
6881     const unsigned StoreSize = LocVT.getStoreSize();
6882     // Floats are always 4-byte aligned in the PSA on AIX.
6883     // This includes f64 in 64-bit mode for ABI compatibility.
6884     const unsigned Offset =
6885         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6886     unsigned FReg = State.AllocateReg(FPR);
6887     if (FReg)
6888       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6889 
6890     // Reserve and initialize GPRs or initialize the PSA as required.
6891     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6892       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6893         assert(FReg && "An FPR should be available when a GPR is reserved.");
6894         if (State.isVarArg()) {
6895           // Successfully reserved GPRs are only initialized for vararg calls.
6896           // Custom handling is required for:
6897           //   f64 in PPC32 needs to be split into 2 GPRs.
6898           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6899           State.addLoc(
6900               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6901         }
6902       } else {
6903         // If there are insufficient GPRs, the PSA needs to be initialized.
6904         // Initialization occurs even if an FPR was initialized for
6905         // compatibility with the AIX XL compiler. The full memory for the
6906         // argument will be initialized even if a prior word is saved in GPR.
6907         // A custom memLoc is used when the argument also passes in FPR so
6908         // that the callee handling can skip over it easily.
6909         State.addLoc(
6910             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6911                                              LocInfo)
6912                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6913         break;
6914       }
6915     }
6916 
6917     return false;
6918   }
6919   }
6920   return true;
6921 }
6922 
6923 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6924                                                     bool IsPPC64) {
6925   assert((IsPPC64 || SVT != MVT::i64) &&
6926          "i64 should have been split for 32-bit codegen.");
6927 
6928   switch (SVT) {
6929   default:
6930     report_fatal_error("Unexpected value type for formal argument");
6931   case MVT::i1:
6932   case MVT::i32:
6933   case MVT::i64:
6934     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6935   case MVT::f32:
6936     return &PPC::F4RCRegClass;
6937   case MVT::f64:
6938     return &PPC::F8RCRegClass;
6939   }
6940 }
6941 
6942 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6943                                         SelectionDAG &DAG, SDValue ArgValue,
6944                                         MVT LocVT, const SDLoc &dl) {
6945   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
6946   assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
6947 
6948   if (Flags.isSExt())
6949     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6950                            DAG.getValueType(ValVT));
6951   else if (Flags.isZExt())
6952     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6953                            DAG.getValueType(ValVT));
6954 
6955   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
6956 }
6957 
6958 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
6959   const unsigned LASize = FL->getLinkageSize();
6960 
6961   if (PPC::GPRCRegClass.contains(Reg)) {
6962     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
6963            "Reg must be a valid argument register!");
6964     return LASize + 4 * (Reg - PPC::R3);
6965   }
6966 
6967   if (PPC::G8RCRegClass.contains(Reg)) {
6968     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
6969            "Reg must be a valid argument register!");
6970     return LASize + 8 * (Reg - PPC::X3);
6971   }
6972 
6973   llvm_unreachable("Only general purpose registers expected.");
6974 }
6975 
6976 //   AIX ABI Stack Frame Layout:
6977 //
6978 //   Low Memory +--------------------------------------------+
6979 //   SP   +---> | Back chain                                 | ---+
6980 //        |     +--------------------------------------------+    |
6981 //        |     | Saved Condition Register                   |    |
6982 //        |     +--------------------------------------------+    |
6983 //        |     | Saved Linkage Register                     |    |
6984 //        |     +--------------------------------------------+    | Linkage Area
6985 //        |     | Reserved for compilers                     |    |
6986 //        |     +--------------------------------------------+    |
6987 //        |     | Reserved for binders                       |    |
6988 //        |     +--------------------------------------------+    |
6989 //        |     | Saved TOC pointer                          | ---+
6990 //        |     +--------------------------------------------+
6991 //        |     | Parameter save area                        |
6992 //        |     +--------------------------------------------+
6993 //        |     | Alloca space                               |
6994 //        |     +--------------------------------------------+
6995 //        |     | Local variable space                       |
6996 //        |     +--------------------------------------------+
6997 //        |     | Float/int conversion temporary             |
6998 //        |     +--------------------------------------------+
6999 //        |     | Save area for AltiVec registers            |
7000 //        |     +--------------------------------------------+
7001 //        |     | AltiVec alignment padding                  |
7002 //        |     +--------------------------------------------+
7003 //        |     | Save area for VRSAVE register              |
7004 //        |     +--------------------------------------------+
7005 //        |     | Save area for General Purpose registers    |
7006 //        |     +--------------------------------------------+
7007 //        |     | Save area for Floating Point registers     |
7008 //        |     +--------------------------------------------+
7009 //        +---- | Back chain                                 |
7010 // High Memory  +--------------------------------------------+
7011 //
7012 //  Specifications:
7013 //  AIX 7.2 Assembler Language Reference
7014 //  Subroutine linkage convention
7015 
7016 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7017     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7018     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7019     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7020 
7021   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7022           CallConv == CallingConv::Fast) &&
7023          "Unexpected calling convention!");
7024 
7025   if (getTargetMachine().Options.GuaranteedTailCallOpt)
7026     report_fatal_error("Tail call support is unimplemented on AIX.");
7027 
7028   if (useSoftFloat())
7029     report_fatal_error("Soft float support is unimplemented on AIX.");
7030 
7031   const PPCSubtarget &Subtarget =
7032       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7033 
7034   const bool IsPPC64 = Subtarget.isPPC64();
7035   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7036 
7037   // Assign locations to all of the incoming arguments.
7038   SmallVector<CCValAssign, 16> ArgLocs;
7039   MachineFunction &MF = DAG.getMachineFunction();
7040   MachineFrameInfo &MFI = MF.getFrameInfo();
7041   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7042 
7043   const EVT PtrVT = getPointerTy(MF.getDataLayout());
7044   // Reserve space for the linkage area on the stack.
7045   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7046   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7047   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7048 
7049   SmallVector<SDValue, 8> MemOps;
7050 
7051   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
7052     CCValAssign &VA = ArgLocs[I++];
7053     MVT LocVT = VA.getLocVT();
7054     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7055 
7056     // For compatibility with the AIX XL compiler, the float args in the
7057     // parameter save area are initialized even if the argument is available
7058     // in register.  The caller is required to initialize both the register
7059     // and memory, however, the callee can choose to expect it in either.
7060     // The memloc is dismissed here because the argument is retrieved from
7061     // the register.
7062     if (VA.isMemLoc() && VA.needsCustom())
7063       continue;
7064 
7065     if (Flags.isByVal() && VA.isMemLoc()) {
7066       const unsigned Size =
7067           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7068                   PtrByteSize);
7069       const int FI = MF.getFrameInfo().CreateFixedObject(
7070           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7071           /* IsAliased */ true);
7072       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7073       InVals.push_back(FIN);
7074 
7075       continue;
7076     }
7077 
7078     if (Flags.isByVal()) {
7079       assert(VA.isRegLoc() && "MemLocs should already be handled.");
7080 
7081       const MCPhysReg ArgReg = VA.getLocReg();
7082       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7083 
7084       if (Flags.getNonZeroByValAlign() > PtrByteSize)
7085         report_fatal_error("Over aligned byvals not supported yet.");
7086 
7087       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7088       const int FI = MF.getFrameInfo().CreateFixedObject(
7089           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7090           /* IsAliased */ true);
7091       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7092       InVals.push_back(FIN);
7093 
7094       // Add live ins for all the RegLocs for the same ByVal.
7095       const TargetRegisterClass *RegClass =
7096           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7097 
7098       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7099                                                unsigned Offset) {
7100         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
7101         // Since the callers side has left justified the aggregate in the
7102         // register, we can simply store the entire register into the stack
7103         // slot.
7104         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7105         // The store to the fixedstack object is needed becuase accessing a
7106         // field of the ByVal will use a gep and load. Ideally we will optimize
7107         // to extracting the value from the register directly, and elide the
7108         // stores when the arguments address is not taken, but that will need to
7109         // be future work.
7110         SDValue Store =
7111             DAG.getStore(CopyFrom.getValue(1), dl, CopyFrom,
7112                          DAG.getObjectPtrOffset(dl, FIN, Offset),
7113                          MachinePointerInfo::getFixedStack(MF, FI, Offset));
7114 
7115         MemOps.push_back(Store);
7116       };
7117 
7118       unsigned Offset = 0;
7119       HandleRegLoc(VA.getLocReg(), Offset);
7120       Offset += PtrByteSize;
7121       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7122            Offset += PtrByteSize) {
7123         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7124                "RegLocs should be for ByVal argument.");
7125 
7126         const CCValAssign RL = ArgLocs[I++];
7127         HandleRegLoc(RL.getLocReg(), Offset);
7128       }
7129 
7130       if (Offset != StackSize) {
7131         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7132                "Expected MemLoc for remaining bytes.");
7133         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7134         // Consume the MemLoc.The InVal has already been emitted, so nothing
7135         // more needs to be done.
7136         ++I;
7137       }
7138 
7139       continue;
7140     }
7141 
7142     EVT ValVT = VA.getValVT();
7143     if (VA.isRegLoc() && !VA.needsCustom()) {
7144       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7145       unsigned VReg =
7146           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7147       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7148       if (ValVT.isScalarInteger() &&
7149           (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7150         ArgValue =
7151             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7152       }
7153       InVals.push_back(ArgValue);
7154       continue;
7155     }
7156     if (VA.isMemLoc()) {
7157       const unsigned LocSize = LocVT.getStoreSize();
7158       const unsigned ValSize = ValVT.getStoreSize();
7159       assert((ValSize <= LocSize) &&
7160              "Object size is larger than size of MemLoc");
7161       int CurArgOffset = VA.getLocMemOffset();
7162       // Objects are right-justified because AIX is big-endian.
7163       if (LocSize > ValSize)
7164         CurArgOffset += LocSize - ValSize;
7165       // Potential tail calls could cause overwriting of argument stack slots.
7166       const bool IsImmutable =
7167           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
7168             (CallConv == CallingConv::Fast));
7169       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7170       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7171       SDValue ArgValue =
7172           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7173       InVals.push_back(ArgValue);
7174       continue;
7175     }
7176   }
7177 
7178   // On AIX a minimum of 8 words is saved to the parameter save area.
7179   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7180   // Area that is at least reserved in the caller of this function.
7181   unsigned CallerReservedArea =
7182       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7183 
7184   // Set the size that is at least reserved in caller of this function. Tail
7185   // call optimized function's reserved stack space needs to be aligned so
7186   // that taking the difference between two stack areas will result in an
7187   // aligned stack.
7188   CallerReservedArea =
7189       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7190   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7191   FuncInfo->setMinReservedArea(CallerReservedArea);
7192 
7193   if (isVarArg) {
7194     FuncInfo->setVarArgsFrameIndex(
7195         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7196     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7197 
7198     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7199                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7200 
7201     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7202                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7203     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7204 
7205     // The fixed integer arguments of a variadic function are stored to the
7206     // VarArgsFrameIndex on the stack so that they may be loaded by
7207     // dereferencing the result of va_next.
7208     for (unsigned GPRIndex =
7209              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7210          GPRIndex < NumGPArgRegs; ++GPRIndex) {
7211 
7212       const unsigned VReg =
7213           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7214                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7215 
7216       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7217       SDValue Store =
7218           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7219       MemOps.push_back(Store);
7220       // Increment the address for the next argument to store.
7221       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7222       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7223     }
7224   }
7225 
7226   if (!MemOps.empty())
7227     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7228 
7229   return Chain;
7230 }
7231 
7232 SDValue PPCTargetLowering::LowerCall_AIX(
7233     SDValue Chain, SDValue Callee, CallFlags CFlags,
7234     const SmallVectorImpl<ISD::OutputArg> &Outs,
7235     const SmallVectorImpl<SDValue> &OutVals,
7236     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7237     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7238     const CallBase *CB) const {
7239   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7240   // AIX ABI stack frame layout.
7241 
7242   assert((CFlags.CallConv == CallingConv::C ||
7243           CFlags.CallConv == CallingConv::Cold ||
7244           CFlags.CallConv == CallingConv::Fast) &&
7245          "Unexpected calling convention!");
7246 
7247   if (CFlags.IsPatchPoint)
7248     report_fatal_error("This call type is unimplemented on AIX.");
7249 
7250   const PPCSubtarget& Subtarget =
7251       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7252   if (Subtarget.hasAltivec())
7253     report_fatal_error("Altivec support is unimplemented on AIX.");
7254 
7255   MachineFunction &MF = DAG.getMachineFunction();
7256   SmallVector<CCValAssign, 16> ArgLocs;
7257   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7258                  *DAG.getContext());
7259 
7260   // Reserve space for the linkage save area (LSA) on the stack.
7261   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7262   //   [SP][CR][LR][2 x reserved][TOC].
7263   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7264   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7265   const bool IsPPC64 = Subtarget.isPPC64();
7266   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7267   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7268   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7269   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7270 
7271   // The prolog code of the callee may store up to 8 GPR argument registers to
7272   // the stack, allowing va_start to index over them in memory if the callee
7273   // is variadic.
7274   // Because we cannot tell if this is needed on the caller side, we have to
7275   // conservatively assume that it is needed.  As such, make sure we have at
7276   // least enough stack space for the caller to store the 8 GPRs.
7277   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7278   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7279                                      CCInfo.getNextStackOffset());
7280 
7281   // Adjust the stack pointer for the new arguments...
7282   // These operations are automatically eliminated by the prolog/epilog pass.
7283   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7284   SDValue CallSeqStart = Chain;
7285 
7286   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7287   SmallVector<SDValue, 8> MemOpChains;
7288 
7289   // Set up a copy of the stack pointer for loading and storing any
7290   // arguments that may not fit in the registers available for argument
7291   // passing.
7292   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7293                                    : DAG.getRegister(PPC::R1, MVT::i32);
7294 
7295   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7296     const unsigned ValNo = ArgLocs[I].getValNo();
7297     SDValue Arg = OutVals[ValNo];
7298     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7299 
7300     if (Flags.isByVal()) {
7301       const unsigned ByValSize = Flags.getByValSize();
7302 
7303       // Nothing to do for zero-sized ByVals on the caller side.
7304       if (!ByValSize) {
7305         ++I;
7306         continue;
7307       }
7308 
7309       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7310         return DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain,
7311                               (LoadOffset != 0)
7312                                   ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7313                                   : Arg,
7314                               MachinePointerInfo(), VT);
7315       };
7316 
7317       unsigned LoadOffset = 0;
7318 
7319       // Initialize registers, which are fully occupied by the by-val argument.
7320       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7321         SDValue Load = GetLoad(PtrVT, LoadOffset);
7322         MemOpChains.push_back(Load.getValue(1));
7323         LoadOffset += PtrByteSize;
7324         const CCValAssign &ByValVA = ArgLocs[I++];
7325         assert(ByValVA.getValNo() == ValNo &&
7326                "Unexpected location for pass-by-value argument.");
7327         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7328       }
7329 
7330       if (LoadOffset == ByValSize)
7331         continue;
7332 
7333       // There must be one more loc to handle the remainder.
7334       assert(ArgLocs[I].getValNo() == ValNo &&
7335              "Expected additional location for by-value argument.");
7336 
7337       if (ArgLocs[I].isMemLoc()) {
7338         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7339         const CCValAssign &ByValVA = ArgLocs[I++];
7340         ISD::ArgFlagsTy MemcpyFlags = Flags;
7341         // Only memcpy the bytes that don't pass in register.
7342         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7343         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7344             (LoadOffset != 0) ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7345                               : Arg,
7346             DAG.getObjectPtrOffset(dl, StackPtr, ByValVA.getLocMemOffset()),
7347             CallSeqStart, MemcpyFlags, DAG, dl);
7348         continue;
7349       }
7350 
7351       // Initialize the final register residue.
7352       // Any residue that occupies the final by-val arg register must be
7353       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7354       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7355       // 2 and 1 byte loads.
7356       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7357       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7358              "Unexpected register residue for by-value argument.");
7359       SDValue ResidueVal;
7360       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7361         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7362         const MVT VT =
7363             N == 1 ? MVT::i8
7364                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7365         SDValue Load = GetLoad(VT, LoadOffset);
7366         MemOpChains.push_back(Load.getValue(1));
7367         LoadOffset += N;
7368         Bytes += N;
7369 
7370         // By-val arguments are passed left-justfied in register.
7371         // Every load here needs to be shifted, otherwise a full register load
7372         // should have been used.
7373         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7374                "Unexpected load emitted during handling of pass-by-value "
7375                "argument.");
7376         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7377         EVT ShiftAmountTy =
7378             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7379         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7380         SDValue ShiftedLoad =
7381             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7382         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7383                                               ShiftedLoad)
7384                                 : ShiftedLoad;
7385       }
7386 
7387       const CCValAssign &ByValVA = ArgLocs[I++];
7388       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7389       continue;
7390     }
7391 
7392     CCValAssign &VA = ArgLocs[I++];
7393     const MVT LocVT = VA.getLocVT();
7394     const MVT ValVT = VA.getValVT();
7395 
7396     switch (VA.getLocInfo()) {
7397     default:
7398       report_fatal_error("Unexpected argument extension type.");
7399     case CCValAssign::Full:
7400       break;
7401     case CCValAssign::ZExt:
7402       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7403       break;
7404     case CCValAssign::SExt:
7405       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7406       break;
7407     }
7408 
7409     if (VA.isRegLoc() && !VA.needsCustom()) {
7410       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7411       continue;
7412     }
7413 
7414     if (VA.isMemLoc()) {
7415       SDValue PtrOff =
7416           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7417       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7418       MemOpChains.push_back(
7419           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7420 
7421       continue;
7422     }
7423 
7424     // Custom handling is used for GPR initializations for vararg float
7425     // arguments.
7426     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7427            ValVT.isFloatingPoint() && LocVT.isInteger() &&
7428            "Unexpected register handling for calling convention.");
7429 
7430     SDValue ArgAsInt =
7431         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7432 
7433     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7434       // f32 in 32-bit GPR
7435       // f64 in 64-bit GPR
7436       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7437     else if (Arg.getValueType().getSizeInBits() < LocVT.getSizeInBits())
7438       // f32 in 64-bit GPR.
7439       RegsToPass.push_back(std::make_pair(
7440           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7441     else {
7442       // f64 in two 32-bit GPRs
7443       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7444       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7445              "Unexpected custom register for argument!");
7446       CCValAssign &GPR1 = VA;
7447       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7448                                      DAG.getConstant(32, dl, MVT::i8));
7449       RegsToPass.push_back(std::make_pair(
7450           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7451 
7452       if (I != E) {
7453         // If only 1 GPR was available, there will only be one custom GPR and
7454         // the argument will also pass in memory.
7455         CCValAssign &PeekArg = ArgLocs[I];
7456         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7457           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7458           CCValAssign &GPR2 = ArgLocs[I++];
7459           RegsToPass.push_back(std::make_pair(
7460               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7461         }
7462       }
7463     }
7464   }
7465 
7466   if (!MemOpChains.empty())
7467     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7468 
7469   // For indirect calls, we need to save the TOC base to the stack for
7470   // restoration after the call.
7471   if (CFlags.IsIndirect) {
7472     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7473     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7474     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7475     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7476     const unsigned TOCSaveOffset =
7477         Subtarget.getFrameLowering()->getTOCSaveOffset();
7478 
7479     setUsesTOCBasePtr(DAG);
7480     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7481     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7482     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7483     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7484     Chain = DAG.getStore(
7485         Val.getValue(1), dl, Val, AddPtr,
7486         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7487   }
7488 
7489   // Build a sequence of copy-to-reg nodes chained together with token chain
7490   // and flag operands which copy the outgoing args into the appropriate regs.
7491   SDValue InFlag;
7492   for (auto Reg : RegsToPass) {
7493     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7494     InFlag = Chain.getValue(1);
7495   }
7496 
7497   const int SPDiff = 0;
7498   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7499                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7500 }
7501 
7502 bool
7503 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7504                                   MachineFunction &MF, bool isVarArg,
7505                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7506                                   LLVMContext &Context) const {
7507   SmallVector<CCValAssign, 16> RVLocs;
7508   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7509   return CCInfo.CheckReturn(
7510       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7511                 ? RetCC_PPC_Cold
7512                 : RetCC_PPC);
7513 }
7514 
7515 SDValue
7516 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7517                                bool isVarArg,
7518                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7519                                const SmallVectorImpl<SDValue> &OutVals,
7520                                const SDLoc &dl, SelectionDAG &DAG) const {
7521   SmallVector<CCValAssign, 16> RVLocs;
7522   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7523                  *DAG.getContext());
7524   CCInfo.AnalyzeReturn(Outs,
7525                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7526                            ? RetCC_PPC_Cold
7527                            : RetCC_PPC);
7528 
7529   SDValue Flag;
7530   SmallVector<SDValue, 4> RetOps(1, Chain);
7531 
7532   // Copy the result values into the output registers.
7533   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7534     CCValAssign &VA = RVLocs[i];
7535     assert(VA.isRegLoc() && "Can only return in registers!");
7536 
7537     SDValue Arg = OutVals[RealResIdx];
7538 
7539     switch (VA.getLocInfo()) {
7540     default: llvm_unreachable("Unknown loc info!");
7541     case CCValAssign::Full: break;
7542     case CCValAssign::AExt:
7543       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7544       break;
7545     case CCValAssign::ZExt:
7546       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7547       break;
7548     case CCValAssign::SExt:
7549       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7550       break;
7551     }
7552     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7553       bool isLittleEndian = Subtarget.isLittleEndian();
7554       // Legalize ret f64 -> ret 2 x i32.
7555       SDValue SVal =
7556           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7557                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7558       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7559       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7560       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7561                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7562       Flag = Chain.getValue(1);
7563       VA = RVLocs[++i]; // skip ahead to next loc
7564       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7565     } else
7566       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7567     Flag = Chain.getValue(1);
7568     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7569   }
7570 
7571   RetOps[0] = Chain;  // Update chain.
7572 
7573   // Add the flag if we have it.
7574   if (Flag.getNode())
7575     RetOps.push_back(Flag);
7576 
7577   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7578 }
7579 
7580 SDValue
7581 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7582                                                 SelectionDAG &DAG) const {
7583   SDLoc dl(Op);
7584 
7585   // Get the correct type for integers.
7586   EVT IntVT = Op.getValueType();
7587 
7588   // Get the inputs.
7589   SDValue Chain = Op.getOperand(0);
7590   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7591   // Build a DYNAREAOFFSET node.
7592   SDValue Ops[2] = {Chain, FPSIdx};
7593   SDVTList VTs = DAG.getVTList(IntVT);
7594   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7595 }
7596 
7597 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7598                                              SelectionDAG &DAG) const {
7599   // When we pop the dynamic allocation we need to restore the SP link.
7600   SDLoc dl(Op);
7601 
7602   // Get the correct type for pointers.
7603   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7604 
7605   // Construct the stack pointer operand.
7606   bool isPPC64 = Subtarget.isPPC64();
7607   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7608   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7609 
7610   // Get the operands for the STACKRESTORE.
7611   SDValue Chain = Op.getOperand(0);
7612   SDValue SaveSP = Op.getOperand(1);
7613 
7614   // Load the old link SP.
7615   SDValue LoadLinkSP =
7616       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7617 
7618   // Restore the stack pointer.
7619   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7620 
7621   // Store the old link SP.
7622   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7623 }
7624 
7625 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7626   MachineFunction &MF = DAG.getMachineFunction();
7627   bool isPPC64 = Subtarget.isPPC64();
7628   EVT PtrVT = getPointerTy(MF.getDataLayout());
7629 
7630   // Get current frame pointer save index.  The users of this index will be
7631   // primarily DYNALLOC instructions.
7632   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7633   int RASI = FI->getReturnAddrSaveIndex();
7634 
7635   // If the frame pointer save index hasn't been defined yet.
7636   if (!RASI) {
7637     // Find out what the fix offset of the frame pointer save area.
7638     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7639     // Allocate the frame index for frame pointer save area.
7640     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7641     // Save the result.
7642     FI->setReturnAddrSaveIndex(RASI);
7643   }
7644   return DAG.getFrameIndex(RASI, PtrVT);
7645 }
7646 
7647 SDValue
7648 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7649   MachineFunction &MF = DAG.getMachineFunction();
7650   bool isPPC64 = Subtarget.isPPC64();
7651   EVT PtrVT = getPointerTy(MF.getDataLayout());
7652 
7653   // Get current frame pointer save index.  The users of this index will be
7654   // primarily DYNALLOC instructions.
7655   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7656   int FPSI = FI->getFramePointerSaveIndex();
7657 
7658   // If the frame pointer save index hasn't been defined yet.
7659   if (!FPSI) {
7660     // Find out what the fix offset of the frame pointer save area.
7661     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7662     // Allocate the frame index for frame pointer save area.
7663     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7664     // Save the result.
7665     FI->setFramePointerSaveIndex(FPSI);
7666   }
7667   return DAG.getFrameIndex(FPSI, PtrVT);
7668 }
7669 
7670 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7671                                                    SelectionDAG &DAG) const {
7672   MachineFunction &MF = DAG.getMachineFunction();
7673   // Get the inputs.
7674   SDValue Chain = Op.getOperand(0);
7675   SDValue Size  = Op.getOperand(1);
7676   SDLoc dl(Op);
7677 
7678   // Get the correct type for pointers.
7679   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7680   // Negate the size.
7681   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7682                                 DAG.getConstant(0, dl, PtrVT), Size);
7683   // Construct a node for the frame pointer save index.
7684   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7685   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7686   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7687   if (hasInlineStackProbe(MF))
7688     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7689   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7690 }
7691 
7692 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7693                                                      SelectionDAG &DAG) const {
7694   MachineFunction &MF = DAG.getMachineFunction();
7695 
7696   bool isPPC64 = Subtarget.isPPC64();
7697   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7698 
7699   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7700   return DAG.getFrameIndex(FI, PtrVT);
7701 }
7702 
7703 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7704                                                SelectionDAG &DAG) const {
7705   SDLoc DL(Op);
7706   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7707                      DAG.getVTList(MVT::i32, MVT::Other),
7708                      Op.getOperand(0), Op.getOperand(1));
7709 }
7710 
7711 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7712                                                 SelectionDAG &DAG) const {
7713   SDLoc DL(Op);
7714   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7715                      Op.getOperand(0), Op.getOperand(1));
7716 }
7717 
7718 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7719 
7720   assert(Op.getValueType() == MVT::i1 &&
7721          "Custom lowering only for i1 loads");
7722 
7723   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7724 
7725   SDLoc dl(Op);
7726   LoadSDNode *LD = cast<LoadSDNode>(Op);
7727 
7728   SDValue Chain = LD->getChain();
7729   SDValue BasePtr = LD->getBasePtr();
7730   MachineMemOperand *MMO = LD->getMemOperand();
7731 
7732   SDValue NewLD =
7733       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7734                      BasePtr, MVT::i8, MMO);
7735   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7736 
7737   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7738   return DAG.getMergeValues(Ops, dl);
7739 }
7740 
7741 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7742   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7743          "Custom lowering only for i1 stores");
7744 
7745   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7746 
7747   SDLoc dl(Op);
7748   StoreSDNode *ST = cast<StoreSDNode>(Op);
7749 
7750   SDValue Chain = ST->getChain();
7751   SDValue BasePtr = ST->getBasePtr();
7752   SDValue Value = ST->getValue();
7753   MachineMemOperand *MMO = ST->getMemOperand();
7754 
7755   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7756                       Value);
7757   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7758 }
7759 
7760 // FIXME: Remove this once the ANDI glue bug is fixed:
7761 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7762   assert(Op.getValueType() == MVT::i1 &&
7763          "Custom lowering only for i1 results");
7764 
7765   SDLoc DL(Op);
7766   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7767 }
7768 
7769 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7770                                                SelectionDAG &DAG) const {
7771 
7772   // Implements a vector truncate that fits in a vector register as a shuffle.
7773   // We want to legalize vector truncates down to where the source fits in
7774   // a vector register (and target is therefore smaller than vector register
7775   // size).  At that point legalization will try to custom lower the sub-legal
7776   // result and get here - where we can contain the truncate as a single target
7777   // operation.
7778 
7779   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7780   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7781   //
7782   // We will implement it for big-endian ordering as this (where x denotes
7783   // undefined):
7784   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7785   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7786   //
7787   // The same operation in little-endian ordering will be:
7788   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7789   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7790 
7791   assert(Op.getValueType().isVector() && "Vector type expected.");
7792 
7793   SDLoc DL(Op);
7794   SDValue N1 = Op.getOperand(0);
7795   unsigned SrcSize = N1.getValueType().getSizeInBits();
7796   assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector");
7797   SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7798 
7799   EVT TrgVT = Op.getValueType();
7800   unsigned TrgNumElts = TrgVT.getVectorNumElements();
7801   EVT EltVT = TrgVT.getVectorElementType();
7802   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7803   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7804 
7805   // First list the elements we want to keep.
7806   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7807   SmallVector<int, 16> ShuffV;
7808   if (Subtarget.isLittleEndian())
7809     for (unsigned i = 0; i < TrgNumElts; ++i)
7810       ShuffV.push_back(i * SizeMult);
7811   else
7812     for (unsigned i = 1; i <= TrgNumElts; ++i)
7813       ShuffV.push_back(i * SizeMult - 1);
7814 
7815   // Populate the remaining elements with undefs.
7816   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7817     // ShuffV.push_back(i + WideNumElts);
7818     ShuffV.push_back(WideNumElts + 1);
7819 
7820   SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc);
7821   return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV);
7822 }
7823 
7824 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7825 /// possible.
7826 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7827   // Not FP, or using SPE? Not a fsel.
7828   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
7829       !Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.hasSPE())
7830     return Op;
7831 
7832   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7833 
7834   EVT ResVT = Op.getValueType();
7835   EVT CmpVT = Op.getOperand(0).getValueType();
7836   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7837   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
7838   SDLoc dl(Op);
7839   SDNodeFlags Flags = Op.getNode()->getFlags();
7840 
7841   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7842   // presence of infinities.
7843   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7844     switch (CC) {
7845     default:
7846       break;
7847     case ISD::SETOGT:
7848     case ISD::SETGT:
7849       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7850     case ISD::SETOLT:
7851     case ISD::SETLT:
7852       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7853     }
7854   }
7855 
7856   // We might be able to do better than this under some circumstances, but in
7857   // general, fsel-based lowering of select is a finite-math-only optimization.
7858   // For more information, see section F.3 of the 2.06 ISA specification.
7859   // With ISA 3.0
7860   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
7861       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
7862     return Op;
7863 
7864   // If the RHS of the comparison is a 0.0, we don't need to do the
7865   // subtraction at all.
7866   SDValue Sel1;
7867   if (isFloatingPointZero(RHS))
7868     switch (CC) {
7869     default: break;       // SETUO etc aren't handled by fsel.
7870     case ISD::SETNE:
7871       std::swap(TV, FV);
7872       LLVM_FALLTHROUGH;
7873     case ISD::SETEQ:
7874       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7875         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7876       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7877       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7878         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7879       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7880                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7881     case ISD::SETULT:
7882     case ISD::SETLT:
7883       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7884       LLVM_FALLTHROUGH;
7885     case ISD::SETOGE:
7886     case ISD::SETGE:
7887       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7888         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7889       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7890     case ISD::SETUGT:
7891     case ISD::SETGT:
7892       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7893       LLVM_FALLTHROUGH;
7894     case ISD::SETOLE:
7895     case ISD::SETLE:
7896       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7897         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7898       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7899                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7900     }
7901 
7902   SDValue Cmp;
7903   switch (CC) {
7904   default: break;       // SETUO etc aren't handled by fsel.
7905   case ISD::SETNE:
7906     std::swap(TV, FV);
7907     LLVM_FALLTHROUGH;
7908   case ISD::SETEQ:
7909     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7910     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7911       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7912     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7913     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7914       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7915     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7916                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7917   case ISD::SETULT:
7918   case ISD::SETLT:
7919     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7920     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7921       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7922     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7923   case ISD::SETOGE:
7924   case ISD::SETGE:
7925     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7926     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7927       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7928     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7929   case ISD::SETUGT:
7930   case ISD::SETGT:
7931     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7932     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7933       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7934     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7935   case ISD::SETOLE:
7936   case ISD::SETLE:
7937     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7938     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7939       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7940     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7941   }
7942   return Op;
7943 }
7944 
7945 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
7946                               const PPCSubtarget &Subtarget) {
7947   SDLoc dl(Op);
7948   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
7949   SDValue Src = Op.getOperand(0);
7950   assert(Src.getValueType().isFloatingPoint());
7951   if (Src.getValueType() == MVT::f32)
7952     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7953   SDValue Conv;
7954   switch (Op.getSimpleValueType().SimpleTy) {
7955   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7956   case MVT::i32:
7957     Conv = DAG.getNode(
7958         IsSigned ? PPCISD::FCTIWZ
7959                  : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7960         dl, MVT::f64, Src);
7961     break;
7962   case MVT::i64:
7963     assert((IsSigned || Subtarget.hasFPCVT()) &&
7964            "i64 FP_TO_UINT is supported only with FPCVT");
7965     Conv = DAG.getNode(IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ, dl,
7966                        MVT::f64, Src);
7967   }
7968   return Conv;
7969 }
7970 
7971 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7972                                                SelectionDAG &DAG,
7973                                                const SDLoc &dl) const {
7974   SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
7975   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
7976 
7977   // Convert the FP value to an int value through memory.
7978   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7979                   (IsSigned || Subtarget.hasFPCVT());
7980   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7981   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7982   MachinePointerInfo MPI =
7983       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7984 
7985   // Emit a store to the stack slot.
7986   SDValue Chain;
7987   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
7988   if (i32Stack) {
7989     MachineFunction &MF = DAG.getMachineFunction();
7990     Alignment = Align(4);
7991     MachineMemOperand *MMO =
7992         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
7993     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
7994     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7995               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7996   } else
7997     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment);
7998 
7999   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
8000   // add in a bias on big endian.
8001   if (Op.getValueType() == MVT::i32 && !i32Stack) {
8002     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8003                         DAG.getConstant(4, dl, FIPtr.getValueType()));
8004     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8005   }
8006 
8007   RLI.Chain = Chain;
8008   RLI.Ptr = FIPtr;
8009   RLI.MPI = MPI;
8010   RLI.Alignment = Alignment;
8011 }
8012 
8013 /// Custom lowers floating point to integer conversions to use
8014 /// the direct move instructions available in ISA 2.07 to avoid the
8015 /// need for load/store combinations.
8016 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8017                                                     SelectionDAG &DAG,
8018                                                     const SDLoc &dl) const {
8019   assert(Op.getOperand(0).getValueType().isFloatingPoint());
8020   return DAG.getNode(PPCISD::MFVSR, dl, Op.getSimpleValueType().SimpleTy,
8021                      convertFPToInt(Op, DAG, Subtarget));
8022 }
8023 
8024 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8025                                           const SDLoc &dl) const {
8026   SDValue Src = Op.getOperand(0);
8027   // FP to INT conversions are legal for f128.
8028   if (Src.getValueType() == MVT::f128)
8029     return Op;
8030 
8031   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8032   // PPC (the libcall is not available).
8033   if (Src.getValueType() == MVT::ppcf128) {
8034     if (Op.getValueType() == MVT::i32) {
8035       if (Op.getOpcode() == ISD::FP_TO_SINT) {
8036         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8037                                  DAG.getIntPtrConstant(0, dl));
8038         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8039                                  DAG.getIntPtrConstant(1, dl));
8040 
8041         // Add the two halves of the long double in round-to-zero mode.
8042         SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8043 
8044         // Now use a smaller FP_TO_SINT.
8045         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8046       }
8047       if (Op.getOpcode() == ISD::FP_TO_UINT) {
8048         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8049         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8050         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
8051         //  X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8052         // FIXME: generated code sucks.
8053         // TODO: Are there fast-math-flags to propagate to this FSUB?
8054         SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Tmp);
8055         True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8056         True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
8057                            DAG.getConstant(0x80000000, dl, MVT::i32));
8058         SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
8059         return DAG.getSelectCC(dl, Src, Tmp, True, False, ISD::SETGE);
8060       }
8061     }
8062 
8063     return SDValue();
8064   }
8065 
8066   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8067     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8068 
8069   ReuseLoadInfo RLI;
8070   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8071 
8072   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8073                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8074 }
8075 
8076 // We're trying to insert a regular store, S, and then a load, L. If the
8077 // incoming value, O, is a load, we might just be able to have our load use the
8078 // address used by O. However, we don't know if anything else will store to
8079 // that address before we can load from it. To prevent this situation, we need
8080 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8081 // the same chain operand as O, we create a token factor from the chain results
8082 // of O and L, and we replace all uses of O's chain result with that token
8083 // factor (see spliceIntoChain below for this last part).
8084 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8085                                             ReuseLoadInfo &RLI,
8086                                             SelectionDAG &DAG,
8087                                             ISD::LoadExtType ET) const {
8088   SDLoc dl(Op);
8089   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8090                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8091   if (ET == ISD::NON_EXTLOAD &&
8092       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8093       isOperationLegalOrCustom(Op.getOpcode(),
8094                                Op.getOperand(0).getValueType())) {
8095 
8096     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8097     return true;
8098   }
8099 
8100   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8101   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8102       LD->isNonTemporal())
8103     return false;
8104   if (LD->getMemoryVT() != MemVT)
8105     return false;
8106 
8107   RLI.Ptr = LD->getBasePtr();
8108   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8109     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8110            "Non-pre-inc AM on PPC?");
8111     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8112                           LD->getOffset());
8113   }
8114 
8115   RLI.Chain = LD->getChain();
8116   RLI.MPI = LD->getPointerInfo();
8117   RLI.IsDereferenceable = LD->isDereferenceable();
8118   RLI.IsInvariant = LD->isInvariant();
8119   RLI.Alignment = LD->getAlign();
8120   RLI.AAInfo = LD->getAAInfo();
8121   RLI.Ranges = LD->getRanges();
8122 
8123   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8124   return true;
8125 }
8126 
8127 // Given the head of the old chain, ResChain, insert a token factor containing
8128 // it and NewResChain, and make users of ResChain now be users of that token
8129 // factor.
8130 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8131 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8132                                         SDValue NewResChain,
8133                                         SelectionDAG &DAG) const {
8134   if (!ResChain)
8135     return;
8136 
8137   SDLoc dl(NewResChain);
8138 
8139   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8140                            NewResChain, DAG.getUNDEF(MVT::Other));
8141   assert(TF.getNode() != NewResChain.getNode() &&
8142          "A new TF really is required here");
8143 
8144   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8145   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8146 }
8147 
8148 /// Analyze profitability of direct move
8149 /// prefer float load to int load plus direct move
8150 /// when there is no integer use of int load
8151 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8152   SDNode *Origin = Op.getOperand(0).getNode();
8153   if (Origin->getOpcode() != ISD::LOAD)
8154     return true;
8155 
8156   // If there is no LXSIBZX/LXSIHZX, like Power8,
8157   // prefer direct move if the memory size is 1 or 2 bytes.
8158   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8159   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8160     return true;
8161 
8162   for (SDNode::use_iterator UI = Origin->use_begin(),
8163                             UE = Origin->use_end();
8164        UI != UE; ++UI) {
8165 
8166     // Only look at the users of the loaded value.
8167     if (UI.getUse().get().getResNo() != 0)
8168       continue;
8169 
8170     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8171         UI->getOpcode() != ISD::UINT_TO_FP)
8172       return true;
8173   }
8174 
8175   return false;
8176 }
8177 
8178 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
8179                               const PPCSubtarget &Subtarget) {
8180   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP;
8181   SDLoc dl(Op);
8182   // If we have FCFIDS, then use it when converting to single-precision.
8183   // Otherwise, convert to double-precision and then round.
8184   bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8185   unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
8186                               : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
8187   EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8188   return DAG.getNode(ConvOpc, dl, ConvTy, Src);
8189 }
8190 
8191 /// Custom lowers integer to floating point conversions to use
8192 /// the direct move instructions available in ISA 2.07 to avoid the
8193 /// need for load/store combinations.
8194 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8195                                                     SelectionDAG &DAG,
8196                                                     const SDLoc &dl) const {
8197   assert((Op.getValueType() == MVT::f32 ||
8198           Op.getValueType() == MVT::f64) &&
8199          "Invalid floating point type as target of conversion");
8200   assert(Subtarget.hasFPCVT() &&
8201          "Int to FP conversions with direct moves require FPCVT");
8202   SDValue Src = Op.getOperand(0);
8203   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8204   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
8205   unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
8206   SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
8207   return convertIntToFP(Op, Mov, DAG, Subtarget);
8208 }
8209 
8210 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8211 
8212   EVT VecVT = Vec.getValueType();
8213   assert(VecVT.isVector() && "Expected a vector type.");
8214   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8215 
8216   EVT EltVT = VecVT.getVectorElementType();
8217   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8218   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8219 
8220   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8221   SmallVector<SDValue, 16> Ops(NumConcat);
8222   Ops[0] = Vec;
8223   SDValue UndefVec = DAG.getUNDEF(VecVT);
8224   for (unsigned i = 1; i < NumConcat; ++i)
8225     Ops[i] = UndefVec;
8226 
8227   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8228 }
8229 
8230 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8231                                                 const SDLoc &dl) const {
8232 
8233   unsigned Opc = Op.getOpcode();
8234   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
8235          "Unexpected conversion type");
8236   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8237          "Supports conversions to v2f64/v4f32 only.");
8238 
8239   bool SignedConv = Opc == ISD::SINT_TO_FP;
8240   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8241 
8242   SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
8243   EVT WideVT = Wide.getValueType();
8244   unsigned WideNumElts = WideVT.getVectorNumElements();
8245   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8246 
8247   SmallVector<int, 16> ShuffV;
8248   for (unsigned i = 0; i < WideNumElts; ++i)
8249     ShuffV.push_back(i + WideNumElts);
8250 
8251   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8252   int SaveElts = FourEltRes ? 4 : 2;
8253   if (Subtarget.isLittleEndian())
8254     for (int i = 0; i < SaveElts; i++)
8255       ShuffV[i * Stride] = i;
8256   else
8257     for (int i = 1; i <= SaveElts; i++)
8258       ShuffV[i * Stride - 1] = i - 1;
8259 
8260   SDValue ShuffleSrc2 =
8261       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8262   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8263 
8264   SDValue Extend;
8265   if (SignedConv) {
8266     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8267     EVT ExtVT = Op.getOperand(0).getValueType();
8268     if (Subtarget.hasP9Altivec())
8269       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8270                                IntermediateVT.getVectorNumElements());
8271 
8272     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8273                          DAG.getValueType(ExtVT));
8274   } else
8275     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8276 
8277   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8278 }
8279 
8280 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8281                                           SelectionDAG &DAG) const {
8282   SDLoc dl(Op);
8283   SDValue Src = Op.getOperand(0);
8284   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP;
8285 
8286   EVT InVT = Src.getValueType();
8287   EVT OutVT = Op.getValueType();
8288   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8289       isOperationCustom(Op.getOpcode(), InVT))
8290     return LowerINT_TO_FPVector(Op, DAG, dl);
8291 
8292   // Conversions to f128 are legal.
8293   if (Op.getValueType() == MVT::f128)
8294     return Op;
8295 
8296   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8297   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8298     return SDValue();
8299 
8300   if (Src.getValueType() == MVT::i1)
8301     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
8302                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
8303                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
8304 
8305   // If we have direct moves, we can do all the conversion, skip the store/load
8306   // however, without FPCVT we can't do most conversions.
8307   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8308       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8309     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8310 
8311   assert((IsSigned || Subtarget.hasFPCVT()) &&
8312          "UINT_TO_FP is supported only with FPCVT");
8313 
8314   if (Src.getValueType() == MVT::i64) {
8315     SDValue SINT = Src;
8316     // When converting to single-precision, we actually need to convert
8317     // to double-precision first and then round to single-precision.
8318     // To avoid double-rounding effects during that operation, we have
8319     // to prepare the input operand.  Bits that might be truncated when
8320     // converting to double-precision are replaced by a bit that won't
8321     // be lost at this stage, but is below the single-precision rounding
8322     // position.
8323     //
8324     // However, if -enable-unsafe-fp-math is in effect, accept double
8325     // rounding to avoid the extra overhead.
8326     if (Op.getValueType() == MVT::f32 &&
8327         !Subtarget.hasFPCVT() &&
8328         !DAG.getTarget().Options.UnsafeFPMath) {
8329 
8330       // Twiddle input to make sure the low 11 bits are zero.  (If this
8331       // is the case, we are guaranteed the value will fit into the 53 bit
8332       // mantissa of an IEEE double-precision value without rounding.)
8333       // If any of those low 11 bits were not zero originally, make sure
8334       // bit 12 (value 2048) is set instead, so that the final rounding
8335       // to single-precision gets the correct result.
8336       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8337                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8338       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8339                           Round, DAG.getConstant(2047, dl, MVT::i64));
8340       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8341       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8342                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8343 
8344       // However, we cannot use that value unconditionally: if the magnitude
8345       // of the input value is small, the bit-twiddling we did above might
8346       // end up visibly changing the output.  Fortunately, in that case, we
8347       // don't need to twiddle bits since the original input will convert
8348       // exactly to double-precision floating-point already.  Therefore,
8349       // construct a conditional to use the original value if the top 11
8350       // bits are all sign-bit copies, and use the rounded value computed
8351       // above otherwise.
8352       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8353                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8354       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8355                          Cond, DAG.getConstant(1, dl, MVT::i64));
8356       Cond = DAG.getSetCC(
8357           dl,
8358           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8359           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8360 
8361       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8362     }
8363 
8364     ReuseLoadInfo RLI;
8365     SDValue Bits;
8366 
8367     MachineFunction &MF = DAG.getMachineFunction();
8368     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8369       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8370                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8371       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8372     } else if (Subtarget.hasLFIWAX() &&
8373                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8374       MachineMemOperand *MMO =
8375         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8376                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8377       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8378       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8379                                      DAG.getVTList(MVT::f64, MVT::Other),
8380                                      Ops, MVT::i32, MMO);
8381       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8382     } else if (Subtarget.hasFPCVT() &&
8383                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8384       MachineMemOperand *MMO =
8385         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8386                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8387       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8388       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8389                                      DAG.getVTList(MVT::f64, MVT::Other),
8390                                      Ops, MVT::i32, MMO);
8391       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8392     } else if (((Subtarget.hasLFIWAX() &&
8393                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8394                 (Subtarget.hasFPCVT() &&
8395                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8396                SINT.getOperand(0).getValueType() == MVT::i32) {
8397       MachineFrameInfo &MFI = MF.getFrameInfo();
8398       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8399 
8400       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8401       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8402 
8403       SDValue Store =
8404           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
8405                        MachinePointerInfo::getFixedStack(
8406                            DAG.getMachineFunction(), FrameIdx));
8407 
8408       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8409              "Expected an i32 store");
8410 
8411       RLI.Ptr = FIdx;
8412       RLI.Chain = Store;
8413       RLI.MPI =
8414           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8415       RLI.Alignment = Align(4);
8416 
8417       MachineMemOperand *MMO =
8418         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8419                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8420       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8421       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8422                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8423                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8424                                      Ops, MVT::i32, MMO);
8425     } else
8426       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8427 
8428     SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget);
8429 
8430     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8431       FP = DAG.getNode(ISD::FP_ROUND, dl,
8432                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
8433     return FP;
8434   }
8435 
8436   assert(Src.getValueType() == MVT::i32 &&
8437          "Unhandled INT_TO_FP type in custom expander!");
8438   // Since we only generate this in 64-bit mode, we can take advantage of
8439   // 64-bit registers.  In particular, sign extend the input value into the
8440   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8441   // then lfd it and fcfid it.
8442   MachineFunction &MF = DAG.getMachineFunction();
8443   MachineFrameInfo &MFI = MF.getFrameInfo();
8444   EVT PtrVT = getPointerTy(MF.getDataLayout());
8445 
8446   SDValue Ld;
8447   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8448     ReuseLoadInfo RLI;
8449     bool ReusingLoad;
8450     if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8451       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8452       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8453 
8454       SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Src, FIdx,
8455                                    MachinePointerInfo::getFixedStack(
8456                                        DAG.getMachineFunction(), FrameIdx));
8457 
8458       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8459              "Expected an i32 store");
8460 
8461       RLI.Ptr = FIdx;
8462       RLI.Chain = Store;
8463       RLI.MPI =
8464           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8465       RLI.Alignment = Align(4);
8466     }
8467 
8468     MachineMemOperand *MMO =
8469       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8470                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8471     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8472     Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8473                                  DAG.getVTList(MVT::f64, MVT::Other), Ops,
8474                                  MVT::i32, MMO);
8475     if (ReusingLoad)
8476       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8477   } else {
8478     assert(Subtarget.isPPC64() &&
8479            "i32->FP without LFIWAX supported only on PPC64");
8480 
8481     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8482     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8483 
8484     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8485 
8486     // STD the extended value into the stack slot.
8487     SDValue Store = DAG.getStore(
8488         DAG.getEntryNode(), dl, Ext64, FIdx,
8489         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8490 
8491     // Load the value as a double.
8492     Ld = DAG.getLoad(
8493         MVT::f64, dl, Store, FIdx,
8494         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8495   }
8496 
8497   // FCFID it and return it.
8498   SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget);
8499   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8500     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8501                      DAG.getIntPtrConstant(0, dl));
8502   return FP;
8503 }
8504 
8505 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8506                                             SelectionDAG &DAG) const {
8507   SDLoc dl(Op);
8508   /*
8509    The rounding mode is in bits 30:31 of FPSR, and has the following
8510    settings:
8511      00 Round to nearest
8512      01 Round to 0
8513      10 Round to +inf
8514      11 Round to -inf
8515 
8516   FLT_ROUNDS, on the other hand, expects the following:
8517     -1 Undefined
8518      0 Round to 0
8519      1 Round to nearest
8520      2 Round to +inf
8521      3 Round to -inf
8522 
8523   To perform the conversion, we do:
8524     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8525   */
8526 
8527   MachineFunction &MF = DAG.getMachineFunction();
8528   EVT VT = Op.getValueType();
8529   EVT PtrVT = getPointerTy(MF.getDataLayout());
8530 
8531   // Save FP Control Word to register
8532   SDValue Chain = Op.getOperand(0);
8533   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8534   Chain = MFFS.getValue(1);
8535 
8536   // Save FP register to stack slot
8537   int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8538   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8539   Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8540 
8541   // Load FP Control Word from low 32 bits of stack slot.
8542   SDValue Four = DAG.getConstant(4, dl, PtrVT);
8543   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8544   SDValue CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8545   Chain = CWD.getValue(1);
8546 
8547   // Transform as necessary
8548   SDValue CWD1 =
8549     DAG.getNode(ISD::AND, dl, MVT::i32,
8550                 CWD, DAG.getConstant(3, dl, MVT::i32));
8551   SDValue CWD2 =
8552     DAG.getNode(ISD::SRL, dl, MVT::i32,
8553                 DAG.getNode(ISD::AND, dl, MVT::i32,
8554                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8555                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8556                             DAG.getConstant(3, dl, MVT::i32)),
8557                 DAG.getConstant(1, dl, MVT::i32));
8558 
8559   SDValue RetVal =
8560     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8561 
8562   RetVal =
8563       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8564                   dl, VT, RetVal);
8565 
8566   return DAG.getMergeValues({RetVal, Chain}, dl);
8567 }
8568 
8569 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8570   EVT VT = Op.getValueType();
8571   unsigned BitWidth = VT.getSizeInBits();
8572   SDLoc dl(Op);
8573   assert(Op.getNumOperands() == 3 &&
8574          VT == Op.getOperand(1).getValueType() &&
8575          "Unexpected SHL!");
8576 
8577   // Expand into a bunch of logical ops.  Note that these ops
8578   // depend on the PPC behavior for oversized shift amounts.
8579   SDValue Lo = Op.getOperand(0);
8580   SDValue Hi = Op.getOperand(1);
8581   SDValue Amt = Op.getOperand(2);
8582   EVT AmtVT = Amt.getValueType();
8583 
8584   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8585                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8586   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8587   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8588   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8589   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8590                              DAG.getConstant(-BitWidth, dl, AmtVT));
8591   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8592   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8593   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8594   SDValue OutOps[] = { OutLo, OutHi };
8595   return DAG.getMergeValues(OutOps, dl);
8596 }
8597 
8598 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8599   EVT VT = Op.getValueType();
8600   SDLoc dl(Op);
8601   unsigned BitWidth = VT.getSizeInBits();
8602   assert(Op.getNumOperands() == 3 &&
8603          VT == Op.getOperand(1).getValueType() &&
8604          "Unexpected SRL!");
8605 
8606   // Expand into a bunch of logical ops.  Note that these ops
8607   // depend on the PPC behavior for oversized shift amounts.
8608   SDValue Lo = Op.getOperand(0);
8609   SDValue Hi = Op.getOperand(1);
8610   SDValue Amt = Op.getOperand(2);
8611   EVT AmtVT = Amt.getValueType();
8612 
8613   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8614                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8615   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8616   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8617   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8618   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8619                              DAG.getConstant(-BitWidth, dl, AmtVT));
8620   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8621   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8622   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8623   SDValue OutOps[] = { OutLo, OutHi };
8624   return DAG.getMergeValues(OutOps, dl);
8625 }
8626 
8627 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8628   SDLoc dl(Op);
8629   EVT VT = Op.getValueType();
8630   unsigned BitWidth = VT.getSizeInBits();
8631   assert(Op.getNumOperands() == 3 &&
8632          VT == Op.getOperand(1).getValueType() &&
8633          "Unexpected SRA!");
8634 
8635   // Expand into a bunch of logical ops, followed by a select_cc.
8636   SDValue Lo = Op.getOperand(0);
8637   SDValue Hi = Op.getOperand(1);
8638   SDValue Amt = Op.getOperand(2);
8639   EVT AmtVT = Amt.getValueType();
8640 
8641   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8642                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8643   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8644   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8645   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8646   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8647                              DAG.getConstant(-BitWidth, dl, AmtVT));
8648   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8649   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8650   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8651                                   Tmp4, Tmp6, ISD::SETLE);
8652   SDValue OutOps[] = { OutLo, OutHi };
8653   return DAG.getMergeValues(OutOps, dl);
8654 }
8655 
8656 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
8657                                             SelectionDAG &DAG) const {
8658   SDLoc dl(Op);
8659   EVT VT = Op.getValueType();
8660   unsigned BitWidth = VT.getSizeInBits();
8661 
8662   bool IsFSHL = Op.getOpcode() == ISD::FSHL;
8663   SDValue X = Op.getOperand(0);
8664   SDValue Y = Op.getOperand(1);
8665   SDValue Z = Op.getOperand(2);
8666   EVT AmtVT = Z.getValueType();
8667 
8668   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8669   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8670   // This is simpler than TargetLowering::expandFunnelShift because we can rely
8671   // on PowerPC shift by BW being well defined.
8672   Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
8673                   DAG.getConstant(BitWidth - 1, dl, AmtVT));
8674   SDValue SubZ =
8675       DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
8676   X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
8677   Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
8678   return DAG.getNode(ISD::OR, dl, VT, X, Y);
8679 }
8680 
8681 //===----------------------------------------------------------------------===//
8682 // Vector related lowering.
8683 //
8684 
8685 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8686 /// element size of SplatSize. Cast the result to VT.
8687 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8688                                       SelectionDAG &DAG, const SDLoc &dl) {
8689   static const MVT VTys[] = { // canonical VT to use for each size.
8690     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8691   };
8692 
8693   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8694 
8695   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8696   if (Val == ((1LU << (SplatSize * 8)) - 1)) {
8697     SplatSize = 1;
8698     Val = 0xFF;
8699   }
8700 
8701   EVT CanonicalVT = VTys[SplatSize-1];
8702 
8703   // Build a canonical splat for this value.
8704   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8705 }
8706 
8707 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8708 /// specified intrinsic ID.
8709 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8710                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
8711   if (DestVT == MVT::Other) DestVT = Op.getValueType();
8712   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8713                      DAG.getConstant(IID, dl, MVT::i32), Op);
8714 }
8715 
8716 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8717 /// specified intrinsic ID.
8718 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8719                                 SelectionDAG &DAG, const SDLoc &dl,
8720                                 EVT DestVT = MVT::Other) {
8721   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8722   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8723                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8724 }
8725 
8726 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8727 /// specified intrinsic ID.
8728 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8729                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8730                                 EVT DestVT = MVT::Other) {
8731   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8732   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8733                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8734 }
8735 
8736 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8737 /// amount.  The result has the specified value type.
8738 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8739                            SelectionDAG &DAG, const SDLoc &dl) {
8740   // Force LHS/RHS to be the right type.
8741   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8742   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8743 
8744   int Ops[16];
8745   for (unsigned i = 0; i != 16; ++i)
8746     Ops[i] = i + Amt;
8747   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8748   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8749 }
8750 
8751 /// Do we have an efficient pattern in a .td file for this node?
8752 ///
8753 /// \param V - pointer to the BuildVectorSDNode being matched
8754 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8755 ///
8756 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8757 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8758 /// the opposite is true (expansion is beneficial) are:
8759 /// - The node builds a vector out of integers that are not 32 or 64-bits
8760 /// - The node builds a vector out of constants
8761 /// - The node is a "load-and-splat"
8762 /// In all other cases, we will choose to keep the BUILD_VECTOR.
8763 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8764                                             bool HasDirectMove,
8765                                             bool HasP8Vector) {
8766   EVT VecVT = V->getValueType(0);
8767   bool RightType = VecVT == MVT::v2f64 ||
8768     (HasP8Vector && VecVT == MVT::v4f32) ||
8769     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8770   if (!RightType)
8771     return false;
8772 
8773   bool IsSplat = true;
8774   bool IsLoad = false;
8775   SDValue Op0 = V->getOperand(0);
8776 
8777   // This function is called in a block that confirms the node is not a constant
8778   // splat. So a constant BUILD_VECTOR here means the vector is built out of
8779   // different constants.
8780   if (V->isConstant())
8781     return false;
8782   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8783     if (V->getOperand(i).isUndef())
8784       return false;
8785     // We want to expand nodes that represent load-and-splat even if the
8786     // loaded value is a floating point truncation or conversion to int.
8787     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8788         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8789          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8790         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8791          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8792         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8793          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8794       IsLoad = true;
8795     // If the operands are different or the input is not a load and has more
8796     // uses than just this BV node, then it isn't a splat.
8797     if (V->getOperand(i) != Op0 ||
8798         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8799       IsSplat = false;
8800   }
8801   return !(IsSplat && IsLoad);
8802 }
8803 
8804 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8805 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8806 
8807   SDLoc dl(Op);
8808   SDValue Op0 = Op->getOperand(0);
8809 
8810   if ((Op.getValueType() != MVT::f128) ||
8811       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8812       (Op0.getOperand(0).getValueType() != MVT::i64) ||
8813       (Op0.getOperand(1).getValueType() != MVT::i64))
8814     return SDValue();
8815 
8816   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8817                      Op0.getOperand(1));
8818 }
8819 
8820 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
8821   const SDValue *InputLoad = &Op;
8822   if (InputLoad->getOpcode() == ISD::BITCAST)
8823     InputLoad = &InputLoad->getOperand(0);
8824   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
8825       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
8826     IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
8827     InputLoad = &InputLoad->getOperand(0);
8828   }
8829   if (InputLoad->getOpcode() != ISD::LOAD)
8830     return nullptr;
8831   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8832   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
8833 }
8834 
8835 // Convert the argument APFloat to a single precision APFloat if there is no
8836 // loss in information during the conversion to single precision APFloat and the
8837 // resulting number is not a denormal number. Return true if successful.
8838 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
8839   APFloat APFloatToConvert = ArgAPFloat;
8840   bool LosesInfo = true;
8841   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
8842                            &LosesInfo);
8843   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
8844   if (Success)
8845     ArgAPFloat = APFloatToConvert;
8846   return Success;
8847 }
8848 
8849 // Bitcast the argument APInt to a double and convert it to a single precision
8850 // APFloat, bitcast the APFloat to an APInt and assign it to the original
8851 // argument if there is no loss in information during the conversion from
8852 // double to single precision APFloat and the resulting number is not a denormal
8853 // number. Return true if successful.
8854 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
8855   double DpValue = ArgAPInt.bitsToDouble();
8856   APFloat APFloatDp(DpValue);
8857   bool Success = convertToNonDenormSingle(APFloatDp);
8858   if (Success)
8859     ArgAPInt = APFloatDp.bitcastToAPInt();
8860   return Success;
8861 }
8862 
8863 // If this is a case we can't handle, return null and let the default
8864 // expansion code take care of it.  If we CAN select this case, and if it
8865 // selects to a single instruction, return Op.  Otherwise, if we can codegen
8866 // this case more efficiently than a constant pool load, lower it to the
8867 // sequence of ops that should be used.
8868 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
8869                                              SelectionDAG &DAG) const {
8870   SDLoc dl(Op);
8871   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
8872   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
8873 
8874   // Check if this is a splat of a constant value.
8875   APInt APSplatBits, APSplatUndef;
8876   unsigned SplatBitSize;
8877   bool HasAnyUndefs;
8878   bool BVNIsConstantSplat =
8879       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
8880                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
8881 
8882   // If it is a splat of a double, check if we can shrink it to a 32 bit
8883   // non-denormal float which when converted back to double gives us the same
8884   // double. This is to exploit the XXSPLTIDP instruction.
8885   if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
8886       (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
8887       convertToNonDenormSingle(APSplatBits)) {
8888     SDValue SplatNode = DAG.getNode(
8889         PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
8890         DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
8891     return DAG.getBitcast(Op.getValueType(), SplatNode);
8892   }
8893 
8894   if (!BVNIsConstantSplat || SplatBitSize > 32) {
8895 
8896     bool IsPermutedLoad = false;
8897     const SDValue *InputLoad =
8898         getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
8899     // Handle load-and-splat patterns as we have instructions that will do this
8900     // in one go.
8901     if (InputLoad && DAG.isSplatValue(Op, true)) {
8902       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8903 
8904       // We have handling for 4 and 8 byte elements.
8905       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
8906 
8907       // Checking for a single use of this load, we have to check for vector
8908       // width (128 bits) / ElementSize uses (since each operand of the
8909       // BUILD_VECTOR is a separate use of the value.
8910       if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
8911           ((Subtarget.hasVSX() && ElementSize == 64) ||
8912            (Subtarget.hasP9Vector() && ElementSize == 32))) {
8913         SDValue Ops[] = {
8914           LD->getChain(),    // Chain
8915           LD->getBasePtr(),  // Ptr
8916           DAG.getValueType(Op.getValueType()) // VT
8917         };
8918         return
8919           DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
8920                                   DAG.getVTList(Op.getValueType(), MVT::Other),
8921                                   Ops, LD->getMemoryVT(), LD->getMemOperand());
8922       }
8923     }
8924 
8925     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
8926     // lowered to VSX instructions under certain conditions.
8927     // Without VSX, there is no pattern more efficient than expanding the node.
8928     if (Subtarget.hasVSX() &&
8929         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
8930                                         Subtarget.hasP8Vector()))
8931       return Op;
8932     return SDValue();
8933   }
8934 
8935   uint64_t SplatBits = APSplatBits.getZExtValue();
8936   uint64_t SplatUndef = APSplatUndef.getZExtValue();
8937   unsigned SplatSize = SplatBitSize / 8;
8938 
8939   // First, handle single instruction cases.
8940 
8941   // All zeros?
8942   if (SplatBits == 0) {
8943     // Canonicalize all zero vectors to be v4i32.
8944     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
8945       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
8946       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
8947     }
8948     return Op;
8949   }
8950 
8951   // We have XXSPLTIW for constant splats four bytes wide.
8952   // Given vector length is a multiple of 4, 2-byte splats can be replaced
8953   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
8954   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
8955   // turned into a 4-byte splat of 0xABABABAB.
8956   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
8957     return getCanonicalConstSplat((SplatBits |= SplatBits << 16), SplatSize * 2,
8958                                   Op.getValueType(), DAG, dl);
8959 
8960   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
8961     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
8962                                   dl);
8963 
8964   // We have XXSPLTIB for constant splats one byte wide.
8965   if (Subtarget.hasP9Vector() && SplatSize == 1)
8966     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
8967                                   dl);
8968 
8969   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
8970   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
8971                     (32-SplatBitSize));
8972   if (SextVal >= -16 && SextVal <= 15)
8973     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
8974                                   dl);
8975 
8976   // Two instruction sequences.
8977 
8978   // If this value is in the range [-32,30] and is even, use:
8979   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
8980   // If this value is in the range [17,31] and is odd, use:
8981   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
8982   // If this value is in the range [-31,-17] and is odd, use:
8983   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
8984   // Note the last two are three-instruction sequences.
8985   if (SextVal >= -32 && SextVal <= 31) {
8986     // To avoid having these optimizations undone by constant folding,
8987     // we convert to a pseudo that will be expanded later into one of
8988     // the above forms.
8989     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
8990     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
8991               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
8992     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
8993     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
8994     if (VT == Op.getValueType())
8995       return RetVal;
8996     else
8997       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
8998   }
8999 
9000   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9001   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9002   // for fneg/fabs.
9003   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9004     // Make -1 and vspltisw -1:
9005     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9006 
9007     // Make the VSLW intrinsic, computing 0x8000_0000.
9008     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9009                                    OnesV, DAG, dl);
9010 
9011     // xor by OnesV to invert it.
9012     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9013     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9014   }
9015 
9016   // Check to see if this is a wide variety of vsplti*, binop self cases.
9017   static const signed char SplatCsts[] = {
9018     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9019     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9020   };
9021 
9022   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9023     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9024     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9025     int i = SplatCsts[idx];
9026 
9027     // Figure out what shift amount will be used by altivec if shifted by i in
9028     // this splat size.
9029     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9030 
9031     // vsplti + shl self.
9032     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9033       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9034       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9035         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9036         Intrinsic::ppc_altivec_vslw
9037       };
9038       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9039       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9040     }
9041 
9042     // vsplti + srl self.
9043     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9044       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9045       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9046         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9047         Intrinsic::ppc_altivec_vsrw
9048       };
9049       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9050       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9051     }
9052 
9053     // vsplti + sra self.
9054     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9055       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9056       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9057         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
9058         Intrinsic::ppc_altivec_vsraw
9059       };
9060       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9061       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9062     }
9063 
9064     // vsplti + rol self.
9065     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9066                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9067       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9068       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9069         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9070         Intrinsic::ppc_altivec_vrlw
9071       };
9072       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9073       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9074     }
9075 
9076     // t = vsplti c, result = vsldoi t, t, 1
9077     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9078       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9079       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9080       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9081     }
9082     // t = vsplti c, result = vsldoi t, t, 2
9083     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9084       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9085       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9086       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9087     }
9088     // t = vsplti c, result = vsldoi t, t, 3
9089     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9090       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9091       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9092       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9093     }
9094   }
9095 
9096   return SDValue();
9097 }
9098 
9099 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9100 /// the specified operations to build the shuffle.
9101 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9102                                       SDValue RHS, SelectionDAG &DAG,
9103                                       const SDLoc &dl) {
9104   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9105   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9106   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9107 
9108   enum {
9109     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9110     OP_VMRGHW,
9111     OP_VMRGLW,
9112     OP_VSPLTISW0,
9113     OP_VSPLTISW1,
9114     OP_VSPLTISW2,
9115     OP_VSPLTISW3,
9116     OP_VSLDOI4,
9117     OP_VSLDOI8,
9118     OP_VSLDOI12
9119   };
9120 
9121   if (OpNum == OP_COPY) {
9122     if (LHSID == (1*9+2)*9+3) return LHS;
9123     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9124     return RHS;
9125   }
9126 
9127   SDValue OpLHS, OpRHS;
9128   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9129   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9130 
9131   int ShufIdxs[16];
9132   switch (OpNum) {
9133   default: llvm_unreachable("Unknown i32 permute!");
9134   case OP_VMRGHW:
9135     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9136     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9137     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9138     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9139     break;
9140   case OP_VMRGLW:
9141     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9142     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9143     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9144     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9145     break;
9146   case OP_VSPLTISW0:
9147     for (unsigned i = 0; i != 16; ++i)
9148       ShufIdxs[i] = (i&3)+0;
9149     break;
9150   case OP_VSPLTISW1:
9151     for (unsigned i = 0; i != 16; ++i)
9152       ShufIdxs[i] = (i&3)+4;
9153     break;
9154   case OP_VSPLTISW2:
9155     for (unsigned i = 0; i != 16; ++i)
9156       ShufIdxs[i] = (i&3)+8;
9157     break;
9158   case OP_VSPLTISW3:
9159     for (unsigned i = 0; i != 16; ++i)
9160       ShufIdxs[i] = (i&3)+12;
9161     break;
9162   case OP_VSLDOI4:
9163     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9164   case OP_VSLDOI8:
9165     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9166   case OP_VSLDOI12:
9167     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9168   }
9169   EVT VT = OpLHS.getValueType();
9170   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9171   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9172   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9173   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9174 }
9175 
9176 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9177 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9178 /// SDValue.
9179 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9180                                            SelectionDAG &DAG) const {
9181   const unsigned BytesInVector = 16;
9182   bool IsLE = Subtarget.isLittleEndian();
9183   SDLoc dl(N);
9184   SDValue V1 = N->getOperand(0);
9185   SDValue V2 = N->getOperand(1);
9186   unsigned ShiftElts = 0, InsertAtByte = 0;
9187   bool Swap = false;
9188 
9189   // Shifts required to get the byte we want at element 7.
9190   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9191                                    0, 15, 14, 13, 12, 11, 10, 9};
9192   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9193                                 1, 2,  3,  4,  5,  6,  7,  8};
9194 
9195   ArrayRef<int> Mask = N->getMask();
9196   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9197 
9198   // For each mask element, find out if we're just inserting something
9199   // from V2 into V1 or vice versa.
9200   // Possible permutations inserting an element from V2 into V1:
9201   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9202   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9203   //   ...
9204   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9205   // Inserting from V1 into V2 will be similar, except mask range will be
9206   // [16,31].
9207 
9208   bool FoundCandidate = false;
9209   // If both vector operands for the shuffle are the same vector, the mask
9210   // will contain only elements from the first one and the second one will be
9211   // undef.
9212   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9213   // Go through the mask of half-words to find an element that's being moved
9214   // from one vector to the other.
9215   for (unsigned i = 0; i < BytesInVector; ++i) {
9216     unsigned CurrentElement = Mask[i];
9217     // If 2nd operand is undefined, we should only look for element 7 in the
9218     // Mask.
9219     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9220       continue;
9221 
9222     bool OtherElementsInOrder = true;
9223     // Examine the other elements in the Mask to see if they're in original
9224     // order.
9225     for (unsigned j = 0; j < BytesInVector; ++j) {
9226       if (j == i)
9227         continue;
9228       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9229       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9230       // in which we always assume we're always picking from the 1st operand.
9231       int MaskOffset =
9232           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9233       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9234         OtherElementsInOrder = false;
9235         break;
9236       }
9237     }
9238     // If other elements are in original order, we record the number of shifts
9239     // we need to get the element we want into element 7. Also record which byte
9240     // in the vector we should insert into.
9241     if (OtherElementsInOrder) {
9242       // If 2nd operand is undefined, we assume no shifts and no swapping.
9243       if (V2.isUndef()) {
9244         ShiftElts = 0;
9245         Swap = false;
9246       } else {
9247         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9248         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9249                          : BigEndianShifts[CurrentElement & 0xF];
9250         Swap = CurrentElement < BytesInVector;
9251       }
9252       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9253       FoundCandidate = true;
9254       break;
9255     }
9256   }
9257 
9258   if (!FoundCandidate)
9259     return SDValue();
9260 
9261   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9262   // optionally with VECSHL if shift is required.
9263   if (Swap)
9264     std::swap(V1, V2);
9265   if (V2.isUndef())
9266     V2 = V1;
9267   if (ShiftElts) {
9268     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9269                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9270     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9271                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9272   }
9273   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9274                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9275 }
9276 
9277 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9278 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9279 /// SDValue.
9280 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9281                                            SelectionDAG &DAG) const {
9282   const unsigned NumHalfWords = 8;
9283   const unsigned BytesInVector = NumHalfWords * 2;
9284   // Check that the shuffle is on half-words.
9285   if (!isNByteElemShuffleMask(N, 2, 1))
9286     return SDValue();
9287 
9288   bool IsLE = Subtarget.isLittleEndian();
9289   SDLoc dl(N);
9290   SDValue V1 = N->getOperand(0);
9291   SDValue V2 = N->getOperand(1);
9292   unsigned ShiftElts = 0, InsertAtByte = 0;
9293   bool Swap = false;
9294 
9295   // Shifts required to get the half-word we want at element 3.
9296   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9297   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9298 
9299   uint32_t Mask = 0;
9300   uint32_t OriginalOrderLow = 0x1234567;
9301   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9302   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9303   // 32-bit space, only need 4-bit nibbles per element.
9304   for (unsigned i = 0; i < NumHalfWords; ++i) {
9305     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9306     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9307   }
9308 
9309   // For each mask element, find out if we're just inserting something
9310   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9311   // from V2 into V1:
9312   //   X, 1, 2, 3, 4, 5, 6, 7
9313   //   0, X, 2, 3, 4, 5, 6, 7
9314   //   0, 1, X, 3, 4, 5, 6, 7
9315   //   0, 1, 2, X, 4, 5, 6, 7
9316   //   0, 1, 2, 3, X, 5, 6, 7
9317   //   0, 1, 2, 3, 4, X, 6, 7
9318   //   0, 1, 2, 3, 4, 5, X, 7
9319   //   0, 1, 2, 3, 4, 5, 6, X
9320   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9321 
9322   bool FoundCandidate = false;
9323   // Go through the mask of half-words to find an element that's being moved
9324   // from one vector to the other.
9325   for (unsigned i = 0; i < NumHalfWords; ++i) {
9326     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9327     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9328     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9329     uint32_t TargetOrder = 0x0;
9330 
9331     // If both vector operands for the shuffle are the same vector, the mask
9332     // will contain only elements from the first one and the second one will be
9333     // undef.
9334     if (V2.isUndef()) {
9335       ShiftElts = 0;
9336       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9337       TargetOrder = OriginalOrderLow;
9338       Swap = false;
9339       // Skip if not the correct element or mask of other elements don't equal
9340       // to our expected order.
9341       if (MaskOneElt == VINSERTHSrcElem &&
9342           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9343         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9344         FoundCandidate = true;
9345         break;
9346       }
9347     } else { // If both operands are defined.
9348       // Target order is [8,15] if the current mask is between [0,7].
9349       TargetOrder =
9350           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9351       // Skip if mask of other elements don't equal our expected order.
9352       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9353         // We only need the last 3 bits for the number of shifts.
9354         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9355                          : BigEndianShifts[MaskOneElt & 0x7];
9356         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9357         Swap = MaskOneElt < NumHalfWords;
9358         FoundCandidate = true;
9359         break;
9360       }
9361     }
9362   }
9363 
9364   if (!FoundCandidate)
9365     return SDValue();
9366 
9367   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9368   // optionally with VECSHL if shift is required.
9369   if (Swap)
9370     std::swap(V1, V2);
9371   if (V2.isUndef())
9372     V2 = V1;
9373   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9374   if (ShiftElts) {
9375     // Double ShiftElts because we're left shifting on v16i8 type.
9376     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9377                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9378     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9379     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9380                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9381     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9382   }
9383   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9384   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9385                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9386   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9387 }
9388 
9389 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9390 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9391 /// return the default SDValue.
9392 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9393                                               SelectionDAG &DAG) const {
9394   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9395   // to v16i8. Peek through the bitcasts to get the actual operands.
9396   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9397   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9398 
9399   auto ShuffleMask = SVN->getMask();
9400   SDValue VecShuffle(SVN, 0);
9401   SDLoc DL(SVN);
9402 
9403   // Check that we have a four byte shuffle.
9404   if (!isNByteElemShuffleMask(SVN, 4, 1))
9405     return SDValue();
9406 
9407   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9408   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9409     std::swap(LHS, RHS);
9410     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9411     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9412   }
9413 
9414   // Ensure that the RHS is a vector of constants.
9415   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9416   if (!BVN)
9417     return SDValue();
9418 
9419   // Check if RHS is a splat of 4-bytes (or smaller).
9420   APInt APSplatValue, APSplatUndef;
9421   unsigned SplatBitSize;
9422   bool HasAnyUndefs;
9423   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9424                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9425       SplatBitSize > 32)
9426     return SDValue();
9427 
9428   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9429   // The instruction splats a constant C into two words of the source vector
9430   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9431   // Thus we check that the shuffle mask is the equivalent  of
9432   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9433   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9434   // within each word are consecutive, so we only need to check the first byte.
9435   SDValue Index;
9436   bool IsLE = Subtarget.isLittleEndian();
9437   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9438       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9439        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9440     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9441   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9442            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9443             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9444     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9445   else
9446     return SDValue();
9447 
9448   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9449   // for XXSPLTI32DX.
9450   unsigned SplatVal = APSplatValue.getZExtValue();
9451   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9452     SplatVal |= (SplatVal << SplatBitSize);
9453 
9454   SDValue SplatNode = DAG.getNode(
9455       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9456       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9457   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9458 }
9459 
9460 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9461 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9462 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9463 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9464 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9465   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9466   assert(Op.getValueType() == MVT::v1i128 &&
9467          "Only set v1i128 as custom, other type shouldn't reach here!");
9468   SDLoc dl(Op);
9469   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9470   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9471   unsigned SHLAmt = N1.getConstantOperandVal(0);
9472   if (SHLAmt % 8 == 0) {
9473     SmallVector<int, 16> Mask(16, 0);
9474     std::iota(Mask.begin(), Mask.end(), 0);
9475     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9476     if (SDValue Shuffle =
9477             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9478                                  DAG.getUNDEF(MVT::v16i8), Mask))
9479       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9480   }
9481   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9482   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9483                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9484   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9485                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9486   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9487   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9488 }
9489 
9490 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9491 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9492 /// return the code it can be lowered into.  Worst case, it can always be
9493 /// lowered into a vperm.
9494 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9495                                                SelectionDAG &DAG) const {
9496   SDLoc dl(Op);
9497   SDValue V1 = Op.getOperand(0);
9498   SDValue V2 = Op.getOperand(1);
9499   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9500 
9501   // Any nodes that were combined in the target-independent combiner prior
9502   // to vector legalization will not be sent to the target combine. Try to
9503   // combine it here.
9504   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9505     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9506       return NewShuffle;
9507     Op = NewShuffle;
9508     SVOp = cast<ShuffleVectorSDNode>(Op);
9509     V1 = Op.getOperand(0);
9510     V2 = Op.getOperand(1);
9511   }
9512   EVT VT = Op.getValueType();
9513   bool isLittleEndian = Subtarget.isLittleEndian();
9514 
9515   unsigned ShiftElts, InsertAtByte;
9516   bool Swap = false;
9517 
9518   // If this is a load-and-splat, we can do that with a single instruction
9519   // in some cases. However if the load has multiple uses, we don't want to
9520   // combine it because that will just produce multiple loads.
9521   bool IsPermutedLoad = false;
9522   const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
9523   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9524       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9525       InputLoad->hasOneUse()) {
9526     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9527     int SplatIdx =
9528       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9529 
9530     // The splat index for permuted loads will be in the left half of the vector
9531     // which is strictly wider than the loaded value by 8 bytes. So we need to
9532     // adjust the splat index to point to the correct address in memory.
9533     if (IsPermutedLoad) {
9534       assert(isLittleEndian && "Unexpected permuted load on big endian target");
9535       SplatIdx += IsFourByte ? 2 : 1;
9536       assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
9537              "Splat of a value outside of the loaded memory");
9538     }
9539 
9540     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9541     // For 4-byte load-and-splat, we need Power9.
9542     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9543       uint64_t Offset = 0;
9544       if (IsFourByte)
9545         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9546       else
9547         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9548 
9549       SDValue BasePtr = LD->getBasePtr();
9550       if (Offset != 0)
9551         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9552                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9553       SDValue Ops[] = {
9554         LD->getChain(),    // Chain
9555         BasePtr,           // BasePtr
9556         DAG.getValueType(Op.getValueType()) // VT
9557       };
9558       SDVTList VTL =
9559         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9560       SDValue LdSplt =
9561         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9562                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9563       if (LdSplt.getValueType() != SVOp->getValueType(0))
9564         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9565       return LdSplt;
9566     }
9567   }
9568   if (Subtarget.hasP9Vector() &&
9569       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9570                            isLittleEndian)) {
9571     if (Swap)
9572       std::swap(V1, V2);
9573     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9574     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9575     if (ShiftElts) {
9576       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9577                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9578       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9579                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9580       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9581     }
9582     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9583                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9584     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9585   }
9586 
9587   if (Subtarget.hasPrefixInstrs()) {
9588     SDValue SplatInsertNode;
9589     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9590       return SplatInsertNode;
9591   }
9592 
9593   if (Subtarget.hasP9Altivec()) {
9594     SDValue NewISDNode;
9595     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9596       return NewISDNode;
9597 
9598     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9599       return NewISDNode;
9600   }
9601 
9602   if (Subtarget.hasVSX() &&
9603       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9604     if (Swap)
9605       std::swap(V1, V2);
9606     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9607     SDValue Conv2 =
9608         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9609 
9610     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9611                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9612     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9613   }
9614 
9615   if (Subtarget.hasVSX() &&
9616     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9617     if (Swap)
9618       std::swap(V1, V2);
9619     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9620     SDValue Conv2 =
9621         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9622 
9623     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9624                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9625     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9626   }
9627 
9628   if (Subtarget.hasP9Vector()) {
9629      if (PPC::isXXBRHShuffleMask(SVOp)) {
9630       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9631       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9632       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9633     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9634       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9635       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9636       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9637     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9638       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9639       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9640       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9641     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9642       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9643       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9644       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9645     }
9646   }
9647 
9648   if (Subtarget.hasVSX()) {
9649     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9650       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9651 
9652       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9653       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9654                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
9655       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9656     }
9657 
9658     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9659     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9660       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9661       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9662       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9663     }
9664   }
9665 
9666   // Cases that are handled by instructions that take permute immediates
9667   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9668   // selected by the instruction selector.
9669   if (V2.isUndef()) {
9670     if (PPC::isSplatShuffleMask(SVOp, 1) ||
9671         PPC::isSplatShuffleMask(SVOp, 2) ||
9672         PPC::isSplatShuffleMask(SVOp, 4) ||
9673         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9674         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9675         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9676         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9677         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9678         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9679         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9680         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9681         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9682         (Subtarget.hasP8Altivec() && (
9683          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9684          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9685          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9686       return Op;
9687     }
9688   }
9689 
9690   // Altivec has a variety of "shuffle immediates" that take two vector inputs
9691   // and produce a fixed permutation.  If any of these match, do not lower to
9692   // VPERM.
9693   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9694   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9695       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9696       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9697       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9698       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9699       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9700       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9701       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9702       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9703       (Subtarget.hasP8Altivec() && (
9704        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9705        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9706        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9707     return Op;
9708 
9709   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
9710   // perfect shuffle table to emit an optimal matching sequence.
9711   ArrayRef<int> PermMask = SVOp->getMask();
9712 
9713   unsigned PFIndexes[4];
9714   bool isFourElementShuffle = true;
9715   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9716     unsigned EltNo = 8;   // Start out undef.
9717     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
9718       if (PermMask[i*4+j] < 0)
9719         continue;   // Undef, ignore it.
9720 
9721       unsigned ByteSource = PermMask[i*4+j];
9722       if ((ByteSource & 3) != j) {
9723         isFourElementShuffle = false;
9724         break;
9725       }
9726 
9727       if (EltNo == 8) {
9728         EltNo = ByteSource/4;
9729       } else if (EltNo != ByteSource/4) {
9730         isFourElementShuffle = false;
9731         break;
9732       }
9733     }
9734     PFIndexes[i] = EltNo;
9735   }
9736 
9737   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9738   // perfect shuffle vector to determine if it is cost effective to do this as
9739   // discrete instructions, or whether we should use a vperm.
9740   // For now, we skip this for little endian until such time as we have a
9741   // little-endian perfect shuffle table.
9742   if (isFourElementShuffle && !isLittleEndian) {
9743     // Compute the index in the perfect shuffle table.
9744     unsigned PFTableIndex =
9745       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9746 
9747     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9748     unsigned Cost  = (PFEntry >> 30);
9749 
9750     // Determining when to avoid vperm is tricky.  Many things affect the cost
9751     // of vperm, particularly how many times the perm mask needs to be computed.
9752     // For example, if the perm mask can be hoisted out of a loop or is already
9753     // used (perhaps because there are multiple permutes with the same shuffle
9754     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
9755     // the loop requires an extra register.
9756     //
9757     // As a compromise, we only emit discrete instructions if the shuffle can be
9758     // generated in 3 or fewer operations.  When we have loop information
9759     // available, if this block is within a loop, we should avoid using vperm
9760     // for 3-operation perms and use a constant pool load instead.
9761     if (Cost < 3)
9762       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9763   }
9764 
9765   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9766   // vector that will get spilled to the constant pool.
9767   if (V2.isUndef()) V2 = V1;
9768 
9769   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9770   // that it is in input element units, not in bytes.  Convert now.
9771 
9772   // For little endian, the order of the input vectors is reversed, and
9773   // the permutation mask is complemented with respect to 31.  This is
9774   // necessary to produce proper semantics with the big-endian-biased vperm
9775   // instruction.
9776   EVT EltVT = V1.getValueType().getVectorElementType();
9777   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9778 
9779   SmallVector<SDValue, 16> ResultMask;
9780   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9781     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9782 
9783     for (unsigned j = 0; j != BytesPerElement; ++j)
9784       if (isLittleEndian)
9785         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9786                                              dl, MVT::i32));
9787       else
9788         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9789                                              MVT::i32));
9790   }
9791 
9792   ShufflesHandledWithVPERM++;
9793   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9794   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
9795   LLVM_DEBUG(SVOp->dump());
9796   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
9797   LLVM_DEBUG(VPermMask.dump());
9798 
9799   if (isLittleEndian)
9800     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9801                        V2, V1, VPermMask);
9802   else
9803     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9804                        V1, V2, VPermMask);
9805 }
9806 
9807 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9808 /// vector comparison.  If it is, return true and fill in Opc/isDot with
9809 /// information about the intrinsic.
9810 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9811                                  bool &isDot, const PPCSubtarget &Subtarget) {
9812   unsigned IntrinsicID =
9813       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9814   CompareOpc = -1;
9815   isDot = false;
9816   switch (IntrinsicID) {
9817   default:
9818     return false;
9819   // Comparison predicates.
9820   case Intrinsic::ppc_altivec_vcmpbfp_p:
9821     CompareOpc = 966;
9822     isDot = true;
9823     break;
9824   case Intrinsic::ppc_altivec_vcmpeqfp_p:
9825     CompareOpc = 198;
9826     isDot = true;
9827     break;
9828   case Intrinsic::ppc_altivec_vcmpequb_p:
9829     CompareOpc = 6;
9830     isDot = true;
9831     break;
9832   case Intrinsic::ppc_altivec_vcmpequh_p:
9833     CompareOpc = 70;
9834     isDot = true;
9835     break;
9836   case Intrinsic::ppc_altivec_vcmpequw_p:
9837     CompareOpc = 134;
9838     isDot = true;
9839     break;
9840   case Intrinsic::ppc_altivec_vcmpequd_p:
9841     if (Subtarget.hasP8Altivec()) {
9842       CompareOpc = 199;
9843       isDot = true;
9844     } else
9845       return false;
9846     break;
9847   case Intrinsic::ppc_altivec_vcmpneb_p:
9848   case Intrinsic::ppc_altivec_vcmpneh_p:
9849   case Intrinsic::ppc_altivec_vcmpnew_p:
9850   case Intrinsic::ppc_altivec_vcmpnezb_p:
9851   case Intrinsic::ppc_altivec_vcmpnezh_p:
9852   case Intrinsic::ppc_altivec_vcmpnezw_p:
9853     if (Subtarget.hasP9Altivec()) {
9854       switch (IntrinsicID) {
9855       default:
9856         llvm_unreachable("Unknown comparison intrinsic.");
9857       case Intrinsic::ppc_altivec_vcmpneb_p:
9858         CompareOpc = 7;
9859         break;
9860       case Intrinsic::ppc_altivec_vcmpneh_p:
9861         CompareOpc = 71;
9862         break;
9863       case Intrinsic::ppc_altivec_vcmpnew_p:
9864         CompareOpc = 135;
9865         break;
9866       case Intrinsic::ppc_altivec_vcmpnezb_p:
9867         CompareOpc = 263;
9868         break;
9869       case Intrinsic::ppc_altivec_vcmpnezh_p:
9870         CompareOpc = 327;
9871         break;
9872       case Intrinsic::ppc_altivec_vcmpnezw_p:
9873         CompareOpc = 391;
9874         break;
9875       }
9876       isDot = true;
9877     } else
9878       return false;
9879     break;
9880   case Intrinsic::ppc_altivec_vcmpgefp_p:
9881     CompareOpc = 454;
9882     isDot = true;
9883     break;
9884   case Intrinsic::ppc_altivec_vcmpgtfp_p:
9885     CompareOpc = 710;
9886     isDot = true;
9887     break;
9888   case Intrinsic::ppc_altivec_vcmpgtsb_p:
9889     CompareOpc = 774;
9890     isDot = true;
9891     break;
9892   case Intrinsic::ppc_altivec_vcmpgtsh_p:
9893     CompareOpc = 838;
9894     isDot = true;
9895     break;
9896   case Intrinsic::ppc_altivec_vcmpgtsw_p:
9897     CompareOpc = 902;
9898     isDot = true;
9899     break;
9900   case Intrinsic::ppc_altivec_vcmpgtsd_p:
9901     if (Subtarget.hasP8Altivec()) {
9902       CompareOpc = 967;
9903       isDot = true;
9904     } else
9905       return false;
9906     break;
9907   case Intrinsic::ppc_altivec_vcmpgtub_p:
9908     CompareOpc = 518;
9909     isDot = true;
9910     break;
9911   case Intrinsic::ppc_altivec_vcmpgtuh_p:
9912     CompareOpc = 582;
9913     isDot = true;
9914     break;
9915   case Intrinsic::ppc_altivec_vcmpgtuw_p:
9916     CompareOpc = 646;
9917     isDot = true;
9918     break;
9919   case Intrinsic::ppc_altivec_vcmpgtud_p:
9920     if (Subtarget.hasP8Altivec()) {
9921       CompareOpc = 711;
9922       isDot = true;
9923     } else
9924       return false;
9925     break;
9926 
9927   // VSX predicate comparisons use the same infrastructure
9928   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9929   case Intrinsic::ppc_vsx_xvcmpgedp_p:
9930   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9931   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9932   case Intrinsic::ppc_vsx_xvcmpgesp_p:
9933   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9934     if (Subtarget.hasVSX()) {
9935       switch (IntrinsicID) {
9936       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9937         CompareOpc = 99;
9938         break;
9939       case Intrinsic::ppc_vsx_xvcmpgedp_p:
9940         CompareOpc = 115;
9941         break;
9942       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9943         CompareOpc = 107;
9944         break;
9945       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9946         CompareOpc = 67;
9947         break;
9948       case Intrinsic::ppc_vsx_xvcmpgesp_p:
9949         CompareOpc = 83;
9950         break;
9951       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9952         CompareOpc = 75;
9953         break;
9954       }
9955       isDot = true;
9956     } else
9957       return false;
9958     break;
9959 
9960   // Normal Comparisons.
9961   case Intrinsic::ppc_altivec_vcmpbfp:
9962     CompareOpc = 966;
9963     break;
9964   case Intrinsic::ppc_altivec_vcmpeqfp:
9965     CompareOpc = 198;
9966     break;
9967   case Intrinsic::ppc_altivec_vcmpequb:
9968     CompareOpc = 6;
9969     break;
9970   case Intrinsic::ppc_altivec_vcmpequh:
9971     CompareOpc = 70;
9972     break;
9973   case Intrinsic::ppc_altivec_vcmpequw:
9974     CompareOpc = 134;
9975     break;
9976   case Intrinsic::ppc_altivec_vcmpequd:
9977     if (Subtarget.hasP8Altivec())
9978       CompareOpc = 199;
9979     else
9980       return false;
9981     break;
9982   case Intrinsic::ppc_altivec_vcmpneb:
9983   case Intrinsic::ppc_altivec_vcmpneh:
9984   case Intrinsic::ppc_altivec_vcmpnew:
9985   case Intrinsic::ppc_altivec_vcmpnezb:
9986   case Intrinsic::ppc_altivec_vcmpnezh:
9987   case Intrinsic::ppc_altivec_vcmpnezw:
9988     if (Subtarget.hasP9Altivec())
9989       switch (IntrinsicID) {
9990       default:
9991         llvm_unreachable("Unknown comparison intrinsic.");
9992       case Intrinsic::ppc_altivec_vcmpneb:
9993         CompareOpc = 7;
9994         break;
9995       case Intrinsic::ppc_altivec_vcmpneh:
9996         CompareOpc = 71;
9997         break;
9998       case Intrinsic::ppc_altivec_vcmpnew:
9999         CompareOpc = 135;
10000         break;
10001       case Intrinsic::ppc_altivec_vcmpnezb:
10002         CompareOpc = 263;
10003         break;
10004       case Intrinsic::ppc_altivec_vcmpnezh:
10005         CompareOpc = 327;
10006         break;
10007       case Intrinsic::ppc_altivec_vcmpnezw:
10008         CompareOpc = 391;
10009         break;
10010       }
10011     else
10012       return false;
10013     break;
10014   case Intrinsic::ppc_altivec_vcmpgefp:
10015     CompareOpc = 454;
10016     break;
10017   case Intrinsic::ppc_altivec_vcmpgtfp:
10018     CompareOpc = 710;
10019     break;
10020   case Intrinsic::ppc_altivec_vcmpgtsb:
10021     CompareOpc = 774;
10022     break;
10023   case Intrinsic::ppc_altivec_vcmpgtsh:
10024     CompareOpc = 838;
10025     break;
10026   case Intrinsic::ppc_altivec_vcmpgtsw:
10027     CompareOpc = 902;
10028     break;
10029   case Intrinsic::ppc_altivec_vcmpgtsd:
10030     if (Subtarget.hasP8Altivec())
10031       CompareOpc = 967;
10032     else
10033       return false;
10034     break;
10035   case Intrinsic::ppc_altivec_vcmpgtub:
10036     CompareOpc = 518;
10037     break;
10038   case Intrinsic::ppc_altivec_vcmpgtuh:
10039     CompareOpc = 582;
10040     break;
10041   case Intrinsic::ppc_altivec_vcmpgtuw:
10042     CompareOpc = 646;
10043     break;
10044   case Intrinsic::ppc_altivec_vcmpgtud:
10045     if (Subtarget.hasP8Altivec())
10046       CompareOpc = 711;
10047     else
10048       return false;
10049     break;
10050   }
10051   return true;
10052 }
10053 
10054 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10055 /// lower, do it, otherwise return null.
10056 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10057                                                    SelectionDAG &DAG) const {
10058   unsigned IntrinsicID =
10059     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10060 
10061   SDLoc dl(Op);
10062 
10063   if (IntrinsicID == Intrinsic::thread_pointer) {
10064     // Reads the thread pointer register, used for __builtin_thread_pointer.
10065     if (Subtarget.isPPC64())
10066       return DAG.getRegister(PPC::X13, MVT::i64);
10067     return DAG.getRegister(PPC::R2, MVT::i32);
10068   }
10069 
10070   // If this is a lowered altivec predicate compare, CompareOpc is set to the
10071   // opcode number of the comparison.
10072   int CompareOpc;
10073   bool isDot;
10074   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10075     return SDValue();    // Don't custom lower most intrinsics.
10076 
10077   // If this is a non-dot comparison, make the VCMP node and we are done.
10078   if (!isDot) {
10079     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10080                               Op.getOperand(1), Op.getOperand(2),
10081                               DAG.getConstant(CompareOpc, dl, MVT::i32));
10082     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10083   }
10084 
10085   // Create the PPCISD altivec 'dot' comparison node.
10086   SDValue Ops[] = {
10087     Op.getOperand(2),  // LHS
10088     Op.getOperand(3),  // RHS
10089     DAG.getConstant(CompareOpc, dl, MVT::i32)
10090   };
10091   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10092   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
10093 
10094   // Now that we have the comparison, emit a copy from the CR to a GPR.
10095   // This is flagged to the above dot comparison.
10096   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10097                                 DAG.getRegister(PPC::CR6, MVT::i32),
10098                                 CompNode.getValue(1));
10099 
10100   // Unpack the result based on how the target uses it.
10101   unsigned BitNo;   // Bit # of CR6.
10102   bool InvertBit;   // Invert result?
10103   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10104   default:  // Can't happen, don't crash on invalid number though.
10105   case 0:   // Return the value of the EQ bit of CR6.
10106     BitNo = 0; InvertBit = false;
10107     break;
10108   case 1:   // Return the inverted value of the EQ bit of CR6.
10109     BitNo = 0; InvertBit = true;
10110     break;
10111   case 2:   // Return the value of the LT bit of CR6.
10112     BitNo = 2; InvertBit = false;
10113     break;
10114   case 3:   // Return the inverted value of the LT bit of CR6.
10115     BitNo = 2; InvertBit = true;
10116     break;
10117   }
10118 
10119   // Shift the bit into the low position.
10120   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10121                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10122   // Isolate the bit.
10123   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10124                       DAG.getConstant(1, dl, MVT::i32));
10125 
10126   // If we are supposed to, toggle the bit.
10127   if (InvertBit)
10128     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10129                         DAG.getConstant(1, dl, MVT::i32));
10130   return Flags;
10131 }
10132 
10133 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10134                                                SelectionDAG &DAG) const {
10135   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10136   // the beginning of the argument list.
10137   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10138   SDLoc DL(Op);
10139   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10140   case Intrinsic::ppc_cfence: {
10141     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10142     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10143     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10144                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10145                                                   Op.getOperand(ArgStart + 1)),
10146                                       Op.getOperand(0)),
10147                    0);
10148   }
10149   default:
10150     break;
10151   }
10152   return SDValue();
10153 }
10154 
10155 // Lower scalar BSWAP64 to xxbrd.
10156 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10157   SDLoc dl(Op);
10158   // MTVSRDD
10159   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10160                    Op.getOperand(0));
10161   // XXBRD
10162   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10163   // MFVSRD
10164   int VectorIndex = 0;
10165   if (Subtarget.isLittleEndian())
10166     VectorIndex = 1;
10167   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10168                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10169   return Op;
10170 }
10171 
10172 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10173 // compared to a value that is atomically loaded (atomic loads zero-extend).
10174 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10175                                                 SelectionDAG &DAG) const {
10176   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10177          "Expecting an atomic compare-and-swap here.");
10178   SDLoc dl(Op);
10179   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10180   EVT MemVT = AtomicNode->getMemoryVT();
10181   if (MemVT.getSizeInBits() >= 32)
10182     return Op;
10183 
10184   SDValue CmpOp = Op.getOperand(2);
10185   // If this is already correctly zero-extended, leave it alone.
10186   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10187   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10188     return Op;
10189 
10190   // Clear the high bits of the compare operand.
10191   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10192   SDValue NewCmpOp =
10193     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10194                 DAG.getConstant(MaskVal, dl, MVT::i32));
10195 
10196   // Replace the existing compare operand with the properly zero-extended one.
10197   SmallVector<SDValue, 4> Ops;
10198   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10199     Ops.push_back(AtomicNode->getOperand(i));
10200   Ops[2] = NewCmpOp;
10201   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10202   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10203   auto NodeTy =
10204     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10205   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10206 }
10207 
10208 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10209                                                  SelectionDAG &DAG) const {
10210   SDLoc dl(Op);
10211   // Create a stack slot that is 16-byte aligned.
10212   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10213   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10214   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10215   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10216 
10217   // Store the input value into Value#0 of the stack slot.
10218   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10219                                MachinePointerInfo());
10220   // Load it out.
10221   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10222 }
10223 
10224 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10225                                                   SelectionDAG &DAG) const {
10226   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10227          "Should only be called for ISD::INSERT_VECTOR_ELT");
10228 
10229   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10230   // We have legal lowering for constant indices but not for variable ones.
10231   if (!C)
10232     return SDValue();
10233 
10234   EVT VT = Op.getValueType();
10235   SDLoc dl(Op);
10236   SDValue V1 = Op.getOperand(0);
10237   SDValue V2 = Op.getOperand(1);
10238   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10239   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10240     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10241     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10242     unsigned InsertAtElement = C->getZExtValue();
10243     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10244     if (Subtarget.isLittleEndian()) {
10245       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10246     }
10247     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10248                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10249   }
10250   return Op;
10251 }
10252 
10253 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10254   SDLoc dl(Op);
10255   if (Op.getValueType() == MVT::v4i32) {
10256     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10257 
10258     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10259     // +16 as shift amt.
10260     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10261     SDValue RHSSwap =   // = vrlw RHS, 16
10262       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10263 
10264     // Shrinkify inputs to v8i16.
10265     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10266     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10267     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10268 
10269     // Low parts multiplied together, generating 32-bit results (we ignore the
10270     // top parts).
10271     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10272                                         LHS, RHS, DAG, dl, MVT::v4i32);
10273 
10274     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10275                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10276     // Shift the high parts up 16 bits.
10277     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10278                               Neg16, DAG, dl);
10279     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10280   } else if (Op.getValueType() == MVT::v16i8) {
10281     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10282     bool isLittleEndian = Subtarget.isLittleEndian();
10283 
10284     // Multiply the even 8-bit parts, producing 16-bit sums.
10285     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10286                                            LHS, RHS, DAG, dl, MVT::v8i16);
10287     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10288 
10289     // Multiply the odd 8-bit parts, producing 16-bit sums.
10290     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10291                                           LHS, RHS, DAG, dl, MVT::v8i16);
10292     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10293 
10294     // Merge the results together.  Because vmuleub and vmuloub are
10295     // instructions with a big-endian bias, we must reverse the
10296     // element numbering and reverse the meaning of "odd" and "even"
10297     // when generating little endian code.
10298     int Ops[16];
10299     for (unsigned i = 0; i != 8; ++i) {
10300       if (isLittleEndian) {
10301         Ops[i*2  ] = 2*i;
10302         Ops[i*2+1] = 2*i+16;
10303       } else {
10304         Ops[i*2  ] = 2*i+1;
10305         Ops[i*2+1] = 2*i+1+16;
10306       }
10307     }
10308     if (isLittleEndian)
10309       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10310     else
10311       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10312   } else {
10313     llvm_unreachable("Unknown mul to lower!");
10314   }
10315 }
10316 
10317 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
10318 
10319   assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
10320 
10321   EVT VT = Op.getValueType();
10322   assert(VT.isVector() &&
10323          "Only set vector abs as custom, scalar abs shouldn't reach here!");
10324   assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
10325           VT == MVT::v16i8) &&
10326          "Unexpected vector element type!");
10327   assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
10328          "Current subtarget doesn't support smax v2i64!");
10329 
10330   // For vector abs, it can be lowered to:
10331   // abs x
10332   // ==>
10333   // y = -x
10334   // smax(x, y)
10335 
10336   SDLoc dl(Op);
10337   SDValue X = Op.getOperand(0);
10338   SDValue Zero = DAG.getConstant(0, dl, VT);
10339   SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
10340 
10341   // SMAX patch https://reviews.llvm.org/D47332
10342   // hasn't landed yet, so use intrinsic first here.
10343   // TODO: Should use SMAX directly once SMAX patch landed
10344   Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
10345   if (VT == MVT::v2i64)
10346     BifID = Intrinsic::ppc_altivec_vmaxsd;
10347   else if (VT == MVT::v8i16)
10348     BifID = Intrinsic::ppc_altivec_vmaxsh;
10349   else if (VT == MVT::v16i8)
10350     BifID = Intrinsic::ppc_altivec_vmaxsb;
10351 
10352   return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
10353 }
10354 
10355 // Custom lowering for fpext vf32 to v2f64
10356 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10357 
10358   assert(Op.getOpcode() == ISD::FP_EXTEND &&
10359          "Should only be called for ISD::FP_EXTEND");
10360 
10361   // FIXME: handle extends from half precision float vectors on P9.
10362   // We only want to custom lower an extend from v2f32 to v2f64.
10363   if (Op.getValueType() != MVT::v2f64 ||
10364       Op.getOperand(0).getValueType() != MVT::v2f32)
10365     return SDValue();
10366 
10367   SDLoc dl(Op);
10368   SDValue Op0 = Op.getOperand(0);
10369 
10370   switch (Op0.getOpcode()) {
10371   default:
10372     return SDValue();
10373   case ISD::EXTRACT_SUBVECTOR: {
10374     assert(Op0.getNumOperands() == 2 &&
10375            isa<ConstantSDNode>(Op0->getOperand(1)) &&
10376            "Node should have 2 operands with second one being a constant!");
10377 
10378     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10379       return SDValue();
10380 
10381     // Custom lower is only done for high or low doubleword.
10382     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10383     if (Idx % 2 != 0)
10384       return SDValue();
10385 
10386     // Since input is v4f32, at this point Idx is either 0 or 2.
10387     // Shift to get the doubleword position we want.
10388     int DWord = Idx >> 1;
10389 
10390     // High and low word positions are different on little endian.
10391     if (Subtarget.isLittleEndian())
10392       DWord ^= 0x1;
10393 
10394     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10395                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10396   }
10397   case ISD::FADD:
10398   case ISD::FMUL:
10399   case ISD::FSUB: {
10400     SDValue NewLoad[2];
10401     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10402       // Ensure both input are loads.
10403       SDValue LdOp = Op0.getOperand(i);
10404       if (LdOp.getOpcode() != ISD::LOAD)
10405         return SDValue();
10406       // Generate new load node.
10407       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10408       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10409       NewLoad[i] = DAG.getMemIntrinsicNode(
10410           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10411           LD->getMemoryVT(), LD->getMemOperand());
10412     }
10413     SDValue NewOp =
10414         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10415                     NewLoad[1], Op0.getNode()->getFlags());
10416     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10417                        DAG.getConstant(0, dl, MVT::i32));
10418   }
10419   case ISD::LOAD: {
10420     LoadSDNode *LD = cast<LoadSDNode>(Op0);
10421     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10422     SDValue NewLd = DAG.getMemIntrinsicNode(
10423         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10424         LD->getMemoryVT(), LD->getMemOperand());
10425     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10426                        DAG.getConstant(0, dl, MVT::i32));
10427   }
10428   }
10429   llvm_unreachable("ERROR:Should return for all cases within swtich.");
10430 }
10431 
10432 /// LowerOperation - Provide custom lowering hooks for some operations.
10433 ///
10434 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10435   switch (Op.getOpcode()) {
10436   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10437   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
10438   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
10439   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
10440   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
10441   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
10442   case ISD::SETCC:              return LowerSETCC(Op, DAG);
10443   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
10444   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
10445 
10446   // Variable argument lowering.
10447   case ISD::VASTART:            return LowerVASTART(Op, DAG);
10448   case ISD::VAARG:              return LowerVAARG(Op, DAG);
10449   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
10450 
10451   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
10452   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10453   case ISD::GET_DYNAMIC_AREA_OFFSET:
10454     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10455 
10456   // Exception handling lowering.
10457   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
10458   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
10459   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
10460 
10461   case ISD::LOAD:               return LowerLOAD(Op, DAG);
10462   case ISD::STORE:              return LowerSTORE(Op, DAG);
10463   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
10464   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
10465   case ISD::FP_TO_UINT:
10466   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10467   case ISD::UINT_TO_FP:
10468   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
10469   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
10470 
10471   // Lower 64-bit shifts.
10472   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
10473   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
10474   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
10475 
10476   case ISD::FSHL:               return LowerFunnelShift(Op, DAG);
10477   case ISD::FSHR:               return LowerFunnelShift(Op, DAG);
10478 
10479   // Vector-related lowering.
10480   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
10481   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
10482   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10483   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
10484   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
10485   case ISD::MUL:                return LowerMUL(Op, DAG);
10486   case ISD::ABS:                return LowerABS(Op, DAG);
10487   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
10488   case ISD::ROTL:               return LowerROTL(Op, DAG);
10489 
10490   // For counter-based loop handling.
10491   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
10492 
10493   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
10494 
10495   // Frame & Return address.
10496   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
10497   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
10498 
10499   case ISD::INTRINSIC_VOID:
10500     return LowerINTRINSIC_VOID(Op, DAG);
10501   case ISD::BSWAP:
10502     return LowerBSWAP(Op, DAG);
10503   case ISD::ATOMIC_CMP_SWAP:
10504     return LowerATOMIC_CMP_SWAP(Op, DAG);
10505   }
10506 }
10507 
10508 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10509                                            SmallVectorImpl<SDValue>&Results,
10510                                            SelectionDAG &DAG) const {
10511   SDLoc dl(N);
10512   switch (N->getOpcode()) {
10513   default:
10514     llvm_unreachable("Do not know how to custom type legalize this operation!");
10515   case ISD::READCYCLECOUNTER: {
10516     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10517     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10518 
10519     Results.push_back(
10520         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
10521     Results.push_back(RTB.getValue(2));
10522     break;
10523   }
10524   case ISD::INTRINSIC_W_CHAIN: {
10525     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10526         Intrinsic::loop_decrement)
10527       break;
10528 
10529     assert(N->getValueType(0) == MVT::i1 &&
10530            "Unexpected result type for CTR decrement intrinsic");
10531     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10532                                  N->getValueType(0));
10533     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10534     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10535                                  N->getOperand(1));
10536 
10537     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10538     Results.push_back(NewInt.getValue(1));
10539     break;
10540   }
10541   case ISD::VAARG: {
10542     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10543       return;
10544 
10545     EVT VT = N->getValueType(0);
10546 
10547     if (VT == MVT::i64) {
10548       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10549 
10550       Results.push_back(NewNode);
10551       Results.push_back(NewNode.getValue(1));
10552     }
10553     return;
10554   }
10555   case ISD::FP_TO_SINT:
10556   case ISD::FP_TO_UINT:
10557     // LowerFP_TO_INT() can only handle f32 and f64.
10558     if (N->getOperand(0).getValueType() == MVT::ppcf128)
10559       return;
10560     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10561     return;
10562   case ISD::TRUNCATE: {
10563     EVT TrgVT = N->getValueType(0);
10564     EVT OpVT = N->getOperand(0).getValueType();
10565     if (TrgVT.isVector() &&
10566         isOperationCustom(N->getOpcode(), TrgVT) &&
10567         OpVT.getSizeInBits() <= 128 &&
10568         isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits()))
10569       Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG));
10570     return;
10571   }
10572   case ISD::BITCAST:
10573     // Don't handle bitcast here.
10574     return;
10575   case ISD::FP_EXTEND:
10576     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
10577     if (Lowered)
10578       Results.push_back(Lowered);
10579     return;
10580   }
10581 }
10582 
10583 //===----------------------------------------------------------------------===//
10584 //  Other Lowering Code
10585 //===----------------------------------------------------------------------===//
10586 
10587 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10588   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10589   Function *Func = Intrinsic::getDeclaration(M, Id);
10590   return Builder.CreateCall(Func, {});
10591 }
10592 
10593 // The mappings for emitLeading/TrailingFence is taken from
10594 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10595 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10596                                                  Instruction *Inst,
10597                                                  AtomicOrdering Ord) const {
10598   if (Ord == AtomicOrdering::SequentiallyConsistent)
10599     return callIntrinsic(Builder, Intrinsic::ppc_sync);
10600   if (isReleaseOrStronger(Ord))
10601     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10602   return nullptr;
10603 }
10604 
10605 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10606                                                   Instruction *Inst,
10607                                                   AtomicOrdering Ord) const {
10608   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10609     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10610     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10611     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10612     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10613       return Builder.CreateCall(
10614           Intrinsic::getDeclaration(
10615               Builder.GetInsertBlock()->getParent()->getParent(),
10616               Intrinsic::ppc_cfence, {Inst->getType()}),
10617           {Inst});
10618     // FIXME: Can use isync for rmw operation.
10619     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10620   }
10621   return nullptr;
10622 }
10623 
10624 MachineBasicBlock *
10625 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10626                                     unsigned AtomicSize,
10627                                     unsigned BinOpcode,
10628                                     unsigned CmpOpcode,
10629                                     unsigned CmpPred) const {
10630   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10631   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10632 
10633   auto LoadMnemonic = PPC::LDARX;
10634   auto StoreMnemonic = PPC::STDCX;
10635   switch (AtomicSize) {
10636   default:
10637     llvm_unreachable("Unexpected size of atomic entity");
10638   case 1:
10639     LoadMnemonic = PPC::LBARX;
10640     StoreMnemonic = PPC::STBCX;
10641     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10642     break;
10643   case 2:
10644     LoadMnemonic = PPC::LHARX;
10645     StoreMnemonic = PPC::STHCX;
10646     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10647     break;
10648   case 4:
10649     LoadMnemonic = PPC::LWARX;
10650     StoreMnemonic = PPC::STWCX;
10651     break;
10652   case 8:
10653     LoadMnemonic = PPC::LDARX;
10654     StoreMnemonic = PPC::STDCX;
10655     break;
10656   }
10657 
10658   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10659   MachineFunction *F = BB->getParent();
10660   MachineFunction::iterator It = ++BB->getIterator();
10661 
10662   Register dest = MI.getOperand(0).getReg();
10663   Register ptrA = MI.getOperand(1).getReg();
10664   Register ptrB = MI.getOperand(2).getReg();
10665   Register incr = MI.getOperand(3).getReg();
10666   DebugLoc dl = MI.getDebugLoc();
10667 
10668   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10669   MachineBasicBlock *loop2MBB =
10670     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10671   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10672   F->insert(It, loopMBB);
10673   if (CmpOpcode)
10674     F->insert(It, loop2MBB);
10675   F->insert(It, exitMBB);
10676   exitMBB->splice(exitMBB->begin(), BB,
10677                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10678   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10679 
10680   MachineRegisterInfo &RegInfo = F->getRegInfo();
10681   Register TmpReg = (!BinOpcode) ? incr :
10682     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10683                                            : &PPC::GPRCRegClass);
10684 
10685   //  thisMBB:
10686   //   ...
10687   //   fallthrough --> loopMBB
10688   BB->addSuccessor(loopMBB);
10689 
10690   //  loopMBB:
10691   //   l[wd]arx dest, ptr
10692   //   add r0, dest, incr
10693   //   st[wd]cx. r0, ptr
10694   //   bne- loopMBB
10695   //   fallthrough --> exitMBB
10696 
10697   // For max/min...
10698   //  loopMBB:
10699   //   l[wd]arx dest, ptr
10700   //   cmpl?[wd] incr, dest
10701   //   bgt exitMBB
10702   //  loop2MBB:
10703   //   st[wd]cx. dest, ptr
10704   //   bne- loopMBB
10705   //   fallthrough --> exitMBB
10706 
10707   BB = loopMBB;
10708   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10709     .addReg(ptrA).addReg(ptrB);
10710   if (BinOpcode)
10711     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10712   if (CmpOpcode) {
10713     // Signed comparisons of byte or halfword values must be sign-extended.
10714     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10715       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10716       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10717               ExtReg).addReg(dest);
10718       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10719         .addReg(incr).addReg(ExtReg);
10720     } else
10721       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10722         .addReg(incr).addReg(dest);
10723 
10724     BuildMI(BB, dl, TII->get(PPC::BCC))
10725       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
10726     BB->addSuccessor(loop2MBB);
10727     BB->addSuccessor(exitMBB);
10728     BB = loop2MBB;
10729   }
10730   BuildMI(BB, dl, TII->get(StoreMnemonic))
10731     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
10732   BuildMI(BB, dl, TII->get(PPC::BCC))
10733     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
10734   BB->addSuccessor(loopMBB);
10735   BB->addSuccessor(exitMBB);
10736 
10737   //  exitMBB:
10738   //   ...
10739   BB = exitMBB;
10740   return BB;
10741 }
10742 
10743 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
10744     MachineInstr &MI, MachineBasicBlock *BB,
10745     bool is8bit, // operation
10746     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
10747   // If we support part-word atomic mnemonics, just use them
10748   if (Subtarget.hasPartwordAtomics())
10749     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
10750                             CmpPred);
10751 
10752   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10753   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10754   // In 64 bit mode we have to use 64 bits for addresses, even though the
10755   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
10756   // registers without caring whether they're 32 or 64, but here we're
10757   // doing actual arithmetic on the addresses.
10758   bool is64bit = Subtarget.isPPC64();
10759   bool isLittleEndian = Subtarget.isLittleEndian();
10760   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10761 
10762   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10763   MachineFunction *F = BB->getParent();
10764   MachineFunction::iterator It = ++BB->getIterator();
10765 
10766   Register dest = MI.getOperand(0).getReg();
10767   Register ptrA = MI.getOperand(1).getReg();
10768   Register ptrB = MI.getOperand(2).getReg();
10769   Register incr = MI.getOperand(3).getReg();
10770   DebugLoc dl = MI.getDebugLoc();
10771 
10772   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10773   MachineBasicBlock *loop2MBB =
10774       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10775   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10776   F->insert(It, loopMBB);
10777   if (CmpOpcode)
10778     F->insert(It, loop2MBB);
10779   F->insert(It, exitMBB);
10780   exitMBB->splice(exitMBB->begin(), BB,
10781                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10782   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10783 
10784   MachineRegisterInfo &RegInfo = F->getRegInfo();
10785   const TargetRegisterClass *RC =
10786       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10787   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
10788 
10789   Register PtrReg = RegInfo.createVirtualRegister(RC);
10790   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
10791   Register ShiftReg =
10792       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
10793   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
10794   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
10795   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
10796   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
10797   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
10798   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
10799   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
10800   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
10801   Register Ptr1Reg;
10802   Register TmpReg =
10803       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
10804 
10805   //  thisMBB:
10806   //   ...
10807   //   fallthrough --> loopMBB
10808   BB->addSuccessor(loopMBB);
10809 
10810   // The 4-byte load must be aligned, while a char or short may be
10811   // anywhere in the word.  Hence all this nasty bookkeeping code.
10812   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
10813   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
10814   //   xori shift, shift1, 24 [16]
10815   //   rlwinm ptr, ptr1, 0, 0, 29
10816   //   slw incr2, incr, shift
10817   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
10818   //   slw mask, mask2, shift
10819   //  loopMBB:
10820   //   lwarx tmpDest, ptr
10821   //   add tmp, tmpDest, incr2
10822   //   andc tmp2, tmpDest, mask
10823   //   and tmp3, tmp, mask
10824   //   or tmp4, tmp3, tmp2
10825   //   stwcx. tmp4, ptr
10826   //   bne- loopMBB
10827   //   fallthrough --> exitMBB
10828   //   srw dest, tmpDest, shift
10829   if (ptrA != ZeroReg) {
10830     Ptr1Reg = RegInfo.createVirtualRegister(RC);
10831     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10832         .addReg(ptrA)
10833         .addReg(ptrB);
10834   } else {
10835     Ptr1Reg = ptrB;
10836   }
10837   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
10838   // mode.
10839   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
10840       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
10841       .addImm(3)
10842       .addImm(27)
10843       .addImm(is8bit ? 28 : 27);
10844   if (!isLittleEndian)
10845     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
10846         .addReg(Shift1Reg)
10847         .addImm(is8bit ? 24 : 16);
10848   if (is64bit)
10849     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
10850         .addReg(Ptr1Reg)
10851         .addImm(0)
10852         .addImm(61);
10853   else
10854     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
10855         .addReg(Ptr1Reg)
10856         .addImm(0)
10857         .addImm(0)
10858         .addImm(29);
10859   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
10860   if (is8bit)
10861     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
10862   else {
10863     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
10864     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
10865         .addReg(Mask3Reg)
10866         .addImm(65535);
10867   }
10868   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
10869       .addReg(Mask2Reg)
10870       .addReg(ShiftReg);
10871 
10872   BB = loopMBB;
10873   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
10874       .addReg(ZeroReg)
10875       .addReg(PtrReg);
10876   if (BinOpcode)
10877     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
10878         .addReg(Incr2Reg)
10879         .addReg(TmpDestReg);
10880   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
10881       .addReg(TmpDestReg)
10882       .addReg(MaskReg);
10883   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
10884   if (CmpOpcode) {
10885     // For unsigned comparisons, we can directly compare the shifted values.
10886     // For signed comparisons we shift and sign extend.
10887     Register SReg = RegInfo.createVirtualRegister(GPRC);
10888     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
10889         .addReg(TmpDestReg)
10890         .addReg(MaskReg);
10891     unsigned ValueReg = SReg;
10892     unsigned CmpReg = Incr2Reg;
10893     if (CmpOpcode == PPC::CMPW) {
10894       ValueReg = RegInfo.createVirtualRegister(GPRC);
10895       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
10896           .addReg(SReg)
10897           .addReg(ShiftReg);
10898       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
10899       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
10900           .addReg(ValueReg);
10901       ValueReg = ValueSReg;
10902       CmpReg = incr;
10903     }
10904     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10905         .addReg(CmpReg)
10906         .addReg(ValueReg);
10907     BuildMI(BB, dl, TII->get(PPC::BCC))
10908         .addImm(CmpPred)
10909         .addReg(PPC::CR0)
10910         .addMBB(exitMBB);
10911     BB->addSuccessor(loop2MBB);
10912     BB->addSuccessor(exitMBB);
10913     BB = loop2MBB;
10914   }
10915   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
10916   BuildMI(BB, dl, TII->get(PPC::STWCX))
10917       .addReg(Tmp4Reg)
10918       .addReg(ZeroReg)
10919       .addReg(PtrReg);
10920   BuildMI(BB, dl, TII->get(PPC::BCC))
10921       .addImm(PPC::PRED_NE)
10922       .addReg(PPC::CR0)
10923       .addMBB(loopMBB);
10924   BB->addSuccessor(loopMBB);
10925   BB->addSuccessor(exitMBB);
10926 
10927   //  exitMBB:
10928   //   ...
10929   BB = exitMBB;
10930   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
10931       .addReg(TmpDestReg)
10932       .addReg(ShiftReg);
10933   return BB;
10934 }
10935 
10936 llvm::MachineBasicBlock *
10937 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
10938                                     MachineBasicBlock *MBB) const {
10939   DebugLoc DL = MI.getDebugLoc();
10940   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10941   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
10942 
10943   MachineFunction *MF = MBB->getParent();
10944   MachineRegisterInfo &MRI = MF->getRegInfo();
10945 
10946   const BasicBlock *BB = MBB->getBasicBlock();
10947   MachineFunction::iterator I = ++MBB->getIterator();
10948 
10949   Register DstReg = MI.getOperand(0).getReg();
10950   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
10951   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
10952   Register mainDstReg = MRI.createVirtualRegister(RC);
10953   Register restoreDstReg = MRI.createVirtualRegister(RC);
10954 
10955   MVT PVT = getPointerTy(MF->getDataLayout());
10956   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10957          "Invalid Pointer Size!");
10958   // For v = setjmp(buf), we generate
10959   //
10960   // thisMBB:
10961   //  SjLjSetup mainMBB
10962   //  bl mainMBB
10963   //  v_restore = 1
10964   //  b sinkMBB
10965   //
10966   // mainMBB:
10967   //  buf[LabelOffset] = LR
10968   //  v_main = 0
10969   //
10970   // sinkMBB:
10971   //  v = phi(main, restore)
10972   //
10973 
10974   MachineBasicBlock *thisMBB = MBB;
10975   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
10976   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
10977   MF->insert(I, mainMBB);
10978   MF->insert(I, sinkMBB);
10979 
10980   MachineInstrBuilder MIB;
10981 
10982   // Transfer the remainder of BB and its successor edges to sinkMBB.
10983   sinkMBB->splice(sinkMBB->begin(), MBB,
10984                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10985   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
10986 
10987   // Note that the structure of the jmp_buf used here is not compatible
10988   // with that used by libc, and is not designed to be. Specifically, it
10989   // stores only those 'reserved' registers that LLVM does not otherwise
10990   // understand how to spill. Also, by convention, by the time this
10991   // intrinsic is called, Clang has already stored the frame address in the
10992   // first slot of the buffer and stack address in the third. Following the
10993   // X86 target code, we'll store the jump address in the second slot. We also
10994   // need to save the TOC pointer (R2) to handle jumps between shared
10995   // libraries, and that will be stored in the fourth slot. The thread
10996   // identifier (R13) is not affected.
10997 
10998   // thisMBB:
10999   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11000   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11001   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11002 
11003   // Prepare IP either in reg.
11004   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11005   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11006   Register BufReg = MI.getOperand(1).getReg();
11007 
11008   if (Subtarget.is64BitELFABI()) {
11009     setUsesTOCBasePtr(*MBB->getParent());
11010     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11011               .addReg(PPC::X2)
11012               .addImm(TOCOffset)
11013               .addReg(BufReg)
11014               .cloneMemRefs(MI);
11015   }
11016 
11017   // Naked functions never have a base pointer, and so we use r1. For all
11018   // other functions, this decision must be delayed until during PEI.
11019   unsigned BaseReg;
11020   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11021     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11022   else
11023     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11024 
11025   MIB = BuildMI(*thisMBB, MI, DL,
11026                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11027             .addReg(BaseReg)
11028             .addImm(BPOffset)
11029             .addReg(BufReg)
11030             .cloneMemRefs(MI);
11031 
11032   // Setup
11033   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11034   MIB.addRegMask(TRI->getNoPreservedMask());
11035 
11036   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11037 
11038   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11039           .addMBB(mainMBB);
11040   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11041 
11042   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11043   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11044 
11045   // mainMBB:
11046   //  mainDstReg = 0
11047   MIB =
11048       BuildMI(mainMBB, DL,
11049               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11050 
11051   // Store IP
11052   if (Subtarget.isPPC64()) {
11053     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11054             .addReg(LabelReg)
11055             .addImm(LabelOffset)
11056             .addReg(BufReg);
11057   } else {
11058     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11059             .addReg(LabelReg)
11060             .addImm(LabelOffset)
11061             .addReg(BufReg);
11062   }
11063   MIB.cloneMemRefs(MI);
11064 
11065   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11066   mainMBB->addSuccessor(sinkMBB);
11067 
11068   // sinkMBB:
11069   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11070           TII->get(PPC::PHI), DstReg)
11071     .addReg(mainDstReg).addMBB(mainMBB)
11072     .addReg(restoreDstReg).addMBB(thisMBB);
11073 
11074   MI.eraseFromParent();
11075   return sinkMBB;
11076 }
11077 
11078 MachineBasicBlock *
11079 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11080                                      MachineBasicBlock *MBB) const {
11081   DebugLoc DL = MI.getDebugLoc();
11082   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11083 
11084   MachineFunction *MF = MBB->getParent();
11085   MachineRegisterInfo &MRI = MF->getRegInfo();
11086 
11087   MVT PVT = getPointerTy(MF->getDataLayout());
11088   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11089          "Invalid Pointer Size!");
11090 
11091   const TargetRegisterClass *RC =
11092     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11093   Register Tmp = MRI.createVirtualRegister(RC);
11094   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11095   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11096   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11097   unsigned BP =
11098       (PVT == MVT::i64)
11099           ? PPC::X30
11100           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11101                                                               : PPC::R30);
11102 
11103   MachineInstrBuilder MIB;
11104 
11105   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11106   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11107   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11108   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11109 
11110   Register BufReg = MI.getOperand(0).getReg();
11111 
11112   // Reload FP (the jumped-to function may not have had a
11113   // frame pointer, and if so, then its r31 will be restored
11114   // as necessary).
11115   if (PVT == MVT::i64) {
11116     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11117             .addImm(0)
11118             .addReg(BufReg);
11119   } else {
11120     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11121             .addImm(0)
11122             .addReg(BufReg);
11123   }
11124   MIB.cloneMemRefs(MI);
11125 
11126   // Reload IP
11127   if (PVT == MVT::i64) {
11128     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11129             .addImm(LabelOffset)
11130             .addReg(BufReg);
11131   } else {
11132     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11133             .addImm(LabelOffset)
11134             .addReg(BufReg);
11135   }
11136   MIB.cloneMemRefs(MI);
11137 
11138   // Reload SP
11139   if (PVT == MVT::i64) {
11140     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11141             .addImm(SPOffset)
11142             .addReg(BufReg);
11143   } else {
11144     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11145             .addImm(SPOffset)
11146             .addReg(BufReg);
11147   }
11148   MIB.cloneMemRefs(MI);
11149 
11150   // Reload BP
11151   if (PVT == MVT::i64) {
11152     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11153             .addImm(BPOffset)
11154             .addReg(BufReg);
11155   } else {
11156     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11157             .addImm(BPOffset)
11158             .addReg(BufReg);
11159   }
11160   MIB.cloneMemRefs(MI);
11161 
11162   // Reload TOC
11163   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11164     setUsesTOCBasePtr(*MBB->getParent());
11165     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11166               .addImm(TOCOffset)
11167               .addReg(BufReg)
11168               .cloneMemRefs(MI);
11169   }
11170 
11171   // Jump
11172   BuildMI(*MBB, MI, DL,
11173           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11174   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11175 
11176   MI.eraseFromParent();
11177   return MBB;
11178 }
11179 
11180 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11181   // If the function specifically requests inline stack probes, emit them.
11182   if (MF.getFunction().hasFnAttribute("probe-stack"))
11183     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11184            "inline-asm";
11185   return false;
11186 }
11187 
11188 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11189   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11190   unsigned StackAlign = TFI->getStackAlignment();
11191   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11192          "Unexpected stack alignment");
11193   // The default stack probe size is 4096 if the function has no
11194   // stack-probe-size attribute.
11195   unsigned StackProbeSize = 4096;
11196   const Function &Fn = MF.getFunction();
11197   if (Fn.hasFnAttribute("stack-probe-size"))
11198     Fn.getFnAttribute("stack-probe-size")
11199         .getValueAsString()
11200         .getAsInteger(0, StackProbeSize);
11201   // Round down to the stack alignment.
11202   StackProbeSize &= ~(StackAlign - 1);
11203   return StackProbeSize ? StackProbeSize : StackAlign;
11204 }
11205 
11206 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11207 // into three phases. In the first phase, it uses pseudo instruction
11208 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11209 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11210 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11211 // MaxCallFrameSize so that it can calculate correct data area pointer.
11212 MachineBasicBlock *
11213 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11214                                     MachineBasicBlock *MBB) const {
11215   const bool isPPC64 = Subtarget.isPPC64();
11216   MachineFunction *MF = MBB->getParent();
11217   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11218   DebugLoc DL = MI.getDebugLoc();
11219   const unsigned ProbeSize = getStackProbeSize(*MF);
11220   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11221   MachineRegisterInfo &MRI = MF->getRegInfo();
11222   // The CFG of probing stack looks as
11223   //         +-----+
11224   //         | MBB |
11225   //         +--+--+
11226   //            |
11227   //       +----v----+
11228   //  +--->+ TestMBB +---+
11229   //  |    +----+----+   |
11230   //  |         |        |
11231   //  |   +-----v----+   |
11232   //  +---+ BlockMBB |   |
11233   //      +----------+   |
11234   //                     |
11235   //       +---------+   |
11236   //       | TailMBB +<--+
11237   //       +---------+
11238   // In MBB, calculate previous frame pointer and final stack pointer.
11239   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11240   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11241   // TailMBB is spliced via \p MI.
11242   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11243   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11244   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11245 
11246   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11247   MF->insert(MBBIter, TestMBB);
11248   MF->insert(MBBIter, BlockMBB);
11249   MF->insert(MBBIter, TailMBB);
11250 
11251   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11252   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11253 
11254   Register DstReg = MI.getOperand(0).getReg();
11255   Register NegSizeReg = MI.getOperand(1).getReg();
11256   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11257   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11258   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11259   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11260 
11261   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11262   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11263   // NegSize.
11264   unsigned ProbeOpc;
11265   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11266     ProbeOpc =
11267         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11268   else
11269     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11270     // and NegSizeReg will be allocated in the same phyreg to avoid
11271     // redundant copy when NegSizeReg has only one use which is current MI and
11272     // will be replaced by PREPARE_PROBED_ALLOCA then.
11273     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11274                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11275   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11276       .addDef(ActualNegSizeReg)
11277       .addReg(NegSizeReg)
11278       .add(MI.getOperand(2))
11279       .add(MI.getOperand(3));
11280 
11281   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11282   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11283           FinalStackPtr)
11284       .addReg(SPReg)
11285       .addReg(ActualNegSizeReg);
11286 
11287   // Materialize a scratch register for update.
11288   int64_t NegProbeSize = -(int64_t)ProbeSize;
11289   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11290   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11291   if (!isInt<16>(NegProbeSize)) {
11292     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11293     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11294         .addImm(NegProbeSize >> 16);
11295     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11296             ScratchReg)
11297         .addReg(TempReg)
11298         .addImm(NegProbeSize & 0xFFFF);
11299   } else
11300     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11301         .addImm(NegProbeSize);
11302 
11303   {
11304     // Probing leading residual part.
11305     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11306     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11307         .addReg(ActualNegSizeReg)
11308         .addReg(ScratchReg);
11309     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11310     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11311         .addReg(Div)
11312         .addReg(ScratchReg);
11313     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11314     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11315         .addReg(Mul)
11316         .addReg(ActualNegSizeReg);
11317     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11318         .addReg(FramePointer)
11319         .addReg(SPReg)
11320         .addReg(NegMod);
11321   }
11322 
11323   {
11324     // Remaining part should be multiple of ProbeSize.
11325     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11326     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11327         .addReg(SPReg)
11328         .addReg(FinalStackPtr);
11329     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11330         .addImm(PPC::PRED_EQ)
11331         .addReg(CmpResult)
11332         .addMBB(TailMBB);
11333     TestMBB->addSuccessor(BlockMBB);
11334     TestMBB->addSuccessor(TailMBB);
11335   }
11336 
11337   {
11338     // Touch the block.
11339     // |P...|P...|P...
11340     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11341         .addReg(FramePointer)
11342         .addReg(SPReg)
11343         .addReg(ScratchReg);
11344     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11345     BlockMBB->addSuccessor(TestMBB);
11346   }
11347 
11348   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11349   // DYNAREAOFFSET pseudo instruction to get the future result.
11350   Register MaxCallFrameSizeReg =
11351       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11352   BuildMI(TailMBB, DL,
11353           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11354           MaxCallFrameSizeReg)
11355       .add(MI.getOperand(2))
11356       .add(MI.getOperand(3));
11357   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11358       .addReg(SPReg)
11359       .addReg(MaxCallFrameSizeReg);
11360 
11361   // Splice instructions after MI to TailMBB.
11362   TailMBB->splice(TailMBB->end(), MBB,
11363                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11364   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11365   MBB->addSuccessor(TestMBB);
11366 
11367   // Delete the pseudo instruction.
11368   MI.eraseFromParent();
11369 
11370   ++NumDynamicAllocaProbed;
11371   return TailMBB;
11372 }
11373 
11374 MachineBasicBlock *
11375 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11376                                                MachineBasicBlock *BB) const {
11377   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11378       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11379     if (Subtarget.is64BitELFABI() &&
11380         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11381         !Subtarget.isUsingPCRelativeCalls()) {
11382       // Call lowering should have added an r2 operand to indicate a dependence
11383       // on the TOC base pointer value. It can't however, because there is no
11384       // way to mark the dependence as implicit there, and so the stackmap code
11385       // will confuse it with a regular operand. Instead, add the dependence
11386       // here.
11387       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11388     }
11389 
11390     return emitPatchPoint(MI, BB);
11391   }
11392 
11393   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11394       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11395     return emitEHSjLjSetJmp(MI, BB);
11396   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11397              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11398     return emitEHSjLjLongJmp(MI, BB);
11399   }
11400 
11401   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11402 
11403   // To "insert" these instructions we actually have to insert their
11404   // control-flow patterns.
11405   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11406   MachineFunction::iterator It = ++BB->getIterator();
11407 
11408   MachineFunction *F = BB->getParent();
11409 
11410   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11411       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11412       MI.getOpcode() == PPC::SELECT_I8) {
11413     SmallVector<MachineOperand, 2> Cond;
11414     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11415         MI.getOpcode() == PPC::SELECT_CC_I8)
11416       Cond.push_back(MI.getOperand(4));
11417     else
11418       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11419     Cond.push_back(MI.getOperand(1));
11420 
11421     DebugLoc dl = MI.getDebugLoc();
11422     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11423                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11424   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11425              MI.getOpcode() == PPC::SELECT_CC_F8 ||
11426              MI.getOpcode() == PPC::SELECT_CC_F16 ||
11427              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11428              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11429              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11430              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11431              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11432              MI.getOpcode() == PPC::SELECT_CC_SPE ||
11433              MI.getOpcode() == PPC::SELECT_F4 ||
11434              MI.getOpcode() == PPC::SELECT_F8 ||
11435              MI.getOpcode() == PPC::SELECT_F16 ||
11436              MI.getOpcode() == PPC::SELECT_SPE ||
11437              MI.getOpcode() == PPC::SELECT_SPE4 ||
11438              MI.getOpcode() == PPC::SELECT_VRRC ||
11439              MI.getOpcode() == PPC::SELECT_VSFRC ||
11440              MI.getOpcode() == PPC::SELECT_VSSRC ||
11441              MI.getOpcode() == PPC::SELECT_VSRC) {
11442     // The incoming instruction knows the destination vreg to set, the
11443     // condition code register to branch on, the true/false values to
11444     // select between, and a branch opcode to use.
11445 
11446     //  thisMBB:
11447     //  ...
11448     //   TrueVal = ...
11449     //   cmpTY ccX, r1, r2
11450     //   bCC copy1MBB
11451     //   fallthrough --> copy0MBB
11452     MachineBasicBlock *thisMBB = BB;
11453     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11454     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11455     DebugLoc dl = MI.getDebugLoc();
11456     F->insert(It, copy0MBB);
11457     F->insert(It, sinkMBB);
11458 
11459     // Transfer the remainder of BB and its successor edges to sinkMBB.
11460     sinkMBB->splice(sinkMBB->begin(), BB,
11461                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11462     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11463 
11464     // Next, add the true and fallthrough blocks as its successors.
11465     BB->addSuccessor(copy0MBB);
11466     BB->addSuccessor(sinkMBB);
11467 
11468     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11469         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11470         MI.getOpcode() == PPC::SELECT_F16 ||
11471         MI.getOpcode() == PPC::SELECT_SPE4 ||
11472         MI.getOpcode() == PPC::SELECT_SPE ||
11473         MI.getOpcode() == PPC::SELECT_VRRC ||
11474         MI.getOpcode() == PPC::SELECT_VSFRC ||
11475         MI.getOpcode() == PPC::SELECT_VSSRC ||
11476         MI.getOpcode() == PPC::SELECT_VSRC) {
11477       BuildMI(BB, dl, TII->get(PPC::BC))
11478           .addReg(MI.getOperand(1).getReg())
11479           .addMBB(sinkMBB);
11480     } else {
11481       unsigned SelectPred = MI.getOperand(4).getImm();
11482       BuildMI(BB, dl, TII->get(PPC::BCC))
11483           .addImm(SelectPred)
11484           .addReg(MI.getOperand(1).getReg())
11485           .addMBB(sinkMBB);
11486     }
11487 
11488     //  copy0MBB:
11489     //   %FalseValue = ...
11490     //   # fallthrough to sinkMBB
11491     BB = copy0MBB;
11492 
11493     // Update machine-CFG edges
11494     BB->addSuccessor(sinkMBB);
11495 
11496     //  sinkMBB:
11497     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11498     //  ...
11499     BB = sinkMBB;
11500     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11501         .addReg(MI.getOperand(3).getReg())
11502         .addMBB(copy0MBB)
11503         .addReg(MI.getOperand(2).getReg())
11504         .addMBB(thisMBB);
11505   } else if (MI.getOpcode() == PPC::ReadTB) {
11506     // To read the 64-bit time-base register on a 32-bit target, we read the
11507     // two halves. Should the counter have wrapped while it was being read, we
11508     // need to try again.
11509     // ...
11510     // readLoop:
11511     // mfspr Rx,TBU # load from TBU
11512     // mfspr Ry,TB  # load from TB
11513     // mfspr Rz,TBU # load from TBU
11514     // cmpw crX,Rx,Rz # check if 'old'='new'
11515     // bne readLoop   # branch if they're not equal
11516     // ...
11517 
11518     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11519     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11520     DebugLoc dl = MI.getDebugLoc();
11521     F->insert(It, readMBB);
11522     F->insert(It, sinkMBB);
11523 
11524     // Transfer the remainder of BB and its successor edges to sinkMBB.
11525     sinkMBB->splice(sinkMBB->begin(), BB,
11526                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11527     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11528 
11529     BB->addSuccessor(readMBB);
11530     BB = readMBB;
11531 
11532     MachineRegisterInfo &RegInfo = F->getRegInfo();
11533     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11534     Register LoReg = MI.getOperand(0).getReg();
11535     Register HiReg = MI.getOperand(1).getReg();
11536 
11537     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11538     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11539     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11540 
11541     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11542 
11543     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11544         .addReg(HiReg)
11545         .addReg(ReadAgainReg);
11546     BuildMI(BB, dl, TII->get(PPC::BCC))
11547         .addImm(PPC::PRED_NE)
11548         .addReg(CmpReg)
11549         .addMBB(readMBB);
11550 
11551     BB->addSuccessor(readMBB);
11552     BB->addSuccessor(sinkMBB);
11553   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11554     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11555   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11556     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11557   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11558     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11559   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11560     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11561 
11562   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11563     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11564   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11565     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11566   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11567     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11568   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11569     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11570 
11571   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11572     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11573   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11574     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11575   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11576     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11577   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11578     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11579 
11580   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11581     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11582   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11583     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11584   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11585     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11586   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11587     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11588 
11589   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11590     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11591   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11592     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11593   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11594     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11595   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11596     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11597 
11598   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11599     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11600   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11601     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11602   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11603     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11604   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11605     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11606 
11607   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11608     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11609   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11610     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11611   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11612     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11613   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11614     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11615 
11616   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11617     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11618   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11619     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11620   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11621     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11622   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11623     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11624 
11625   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11626     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11627   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11628     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11629   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11630     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
11631   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11632     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
11633 
11634   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11635     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
11636   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11637     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
11638   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11639     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
11640   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11641     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
11642 
11643   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11644     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
11645   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11646     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
11647   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11648     BB = EmitAtomicBinary(MI, BB, 4, 0);
11649   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11650     BB = EmitAtomicBinary(MI, BB, 8, 0);
11651   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11652            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11653            (Subtarget.hasPartwordAtomics() &&
11654             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11655            (Subtarget.hasPartwordAtomics() &&
11656             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11657     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11658 
11659     auto LoadMnemonic = PPC::LDARX;
11660     auto StoreMnemonic = PPC::STDCX;
11661     switch (MI.getOpcode()) {
11662     default:
11663       llvm_unreachable("Compare and swap of unknown size");
11664     case PPC::ATOMIC_CMP_SWAP_I8:
11665       LoadMnemonic = PPC::LBARX;
11666       StoreMnemonic = PPC::STBCX;
11667       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11668       break;
11669     case PPC::ATOMIC_CMP_SWAP_I16:
11670       LoadMnemonic = PPC::LHARX;
11671       StoreMnemonic = PPC::STHCX;
11672       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11673       break;
11674     case PPC::ATOMIC_CMP_SWAP_I32:
11675       LoadMnemonic = PPC::LWARX;
11676       StoreMnemonic = PPC::STWCX;
11677       break;
11678     case PPC::ATOMIC_CMP_SWAP_I64:
11679       LoadMnemonic = PPC::LDARX;
11680       StoreMnemonic = PPC::STDCX;
11681       break;
11682     }
11683     Register dest = MI.getOperand(0).getReg();
11684     Register ptrA = MI.getOperand(1).getReg();
11685     Register ptrB = MI.getOperand(2).getReg();
11686     Register oldval = MI.getOperand(3).getReg();
11687     Register newval = MI.getOperand(4).getReg();
11688     DebugLoc dl = MI.getDebugLoc();
11689 
11690     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11691     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11692     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11693     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11694     F->insert(It, loop1MBB);
11695     F->insert(It, loop2MBB);
11696     F->insert(It, midMBB);
11697     F->insert(It, exitMBB);
11698     exitMBB->splice(exitMBB->begin(), BB,
11699                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11700     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11701 
11702     //  thisMBB:
11703     //   ...
11704     //   fallthrough --> loopMBB
11705     BB->addSuccessor(loop1MBB);
11706 
11707     // loop1MBB:
11708     //   l[bhwd]arx dest, ptr
11709     //   cmp[wd] dest, oldval
11710     //   bne- midMBB
11711     // loop2MBB:
11712     //   st[bhwd]cx. newval, ptr
11713     //   bne- loopMBB
11714     //   b exitBB
11715     // midMBB:
11716     //   st[bhwd]cx. dest, ptr
11717     // exitBB:
11718     BB = loop1MBB;
11719     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11720     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11721         .addReg(oldval)
11722         .addReg(dest);
11723     BuildMI(BB, dl, TII->get(PPC::BCC))
11724         .addImm(PPC::PRED_NE)
11725         .addReg(PPC::CR0)
11726         .addMBB(midMBB);
11727     BB->addSuccessor(loop2MBB);
11728     BB->addSuccessor(midMBB);
11729 
11730     BB = loop2MBB;
11731     BuildMI(BB, dl, TII->get(StoreMnemonic))
11732         .addReg(newval)
11733         .addReg(ptrA)
11734         .addReg(ptrB);
11735     BuildMI(BB, dl, TII->get(PPC::BCC))
11736         .addImm(PPC::PRED_NE)
11737         .addReg(PPC::CR0)
11738         .addMBB(loop1MBB);
11739     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11740     BB->addSuccessor(loop1MBB);
11741     BB->addSuccessor(exitMBB);
11742 
11743     BB = midMBB;
11744     BuildMI(BB, dl, TII->get(StoreMnemonic))
11745         .addReg(dest)
11746         .addReg(ptrA)
11747         .addReg(ptrB);
11748     BB->addSuccessor(exitMBB);
11749 
11750     //  exitMBB:
11751     //   ...
11752     BB = exitMBB;
11753   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
11754              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
11755     // We must use 64-bit registers for addresses when targeting 64-bit,
11756     // since we're actually doing arithmetic on them.  Other registers
11757     // can be 32-bit.
11758     bool is64bit = Subtarget.isPPC64();
11759     bool isLittleEndian = Subtarget.isLittleEndian();
11760     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
11761 
11762     Register dest = MI.getOperand(0).getReg();
11763     Register ptrA = MI.getOperand(1).getReg();
11764     Register ptrB = MI.getOperand(2).getReg();
11765     Register oldval = MI.getOperand(3).getReg();
11766     Register newval = MI.getOperand(4).getReg();
11767     DebugLoc dl = MI.getDebugLoc();
11768 
11769     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11770     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11771     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11772     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11773     F->insert(It, loop1MBB);
11774     F->insert(It, loop2MBB);
11775     F->insert(It, midMBB);
11776     F->insert(It, exitMBB);
11777     exitMBB->splice(exitMBB->begin(), BB,
11778                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11779     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11780 
11781     MachineRegisterInfo &RegInfo = F->getRegInfo();
11782     const TargetRegisterClass *RC =
11783         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11784     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11785 
11786     Register PtrReg = RegInfo.createVirtualRegister(RC);
11787     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11788     Register ShiftReg =
11789         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11790     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
11791     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
11792     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
11793     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
11794     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11795     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11796     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11797     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11798     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11799     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11800     Register Ptr1Reg;
11801     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
11802     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11803     //  thisMBB:
11804     //   ...
11805     //   fallthrough --> loopMBB
11806     BB->addSuccessor(loop1MBB);
11807 
11808     // The 4-byte load must be aligned, while a char or short may be
11809     // anywhere in the word.  Hence all this nasty bookkeeping code.
11810     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11811     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11812     //   xori shift, shift1, 24 [16]
11813     //   rlwinm ptr, ptr1, 0, 0, 29
11814     //   slw newval2, newval, shift
11815     //   slw oldval2, oldval,shift
11816     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11817     //   slw mask, mask2, shift
11818     //   and newval3, newval2, mask
11819     //   and oldval3, oldval2, mask
11820     // loop1MBB:
11821     //   lwarx tmpDest, ptr
11822     //   and tmp, tmpDest, mask
11823     //   cmpw tmp, oldval3
11824     //   bne- midMBB
11825     // loop2MBB:
11826     //   andc tmp2, tmpDest, mask
11827     //   or tmp4, tmp2, newval3
11828     //   stwcx. tmp4, ptr
11829     //   bne- loop1MBB
11830     //   b exitBB
11831     // midMBB:
11832     //   stwcx. tmpDest, ptr
11833     // exitBB:
11834     //   srw dest, tmpDest, shift
11835     if (ptrA != ZeroReg) {
11836       Ptr1Reg = RegInfo.createVirtualRegister(RC);
11837       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11838           .addReg(ptrA)
11839           .addReg(ptrB);
11840     } else {
11841       Ptr1Reg = ptrB;
11842     }
11843 
11844     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11845     // mode.
11846     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11847         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11848         .addImm(3)
11849         .addImm(27)
11850         .addImm(is8bit ? 28 : 27);
11851     if (!isLittleEndian)
11852       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11853           .addReg(Shift1Reg)
11854           .addImm(is8bit ? 24 : 16);
11855     if (is64bit)
11856       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11857           .addReg(Ptr1Reg)
11858           .addImm(0)
11859           .addImm(61);
11860     else
11861       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11862           .addReg(Ptr1Reg)
11863           .addImm(0)
11864           .addImm(0)
11865           .addImm(29);
11866     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
11867         .addReg(newval)
11868         .addReg(ShiftReg);
11869     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
11870         .addReg(oldval)
11871         .addReg(ShiftReg);
11872     if (is8bit)
11873       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11874     else {
11875       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11876       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11877           .addReg(Mask3Reg)
11878           .addImm(65535);
11879     }
11880     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11881         .addReg(Mask2Reg)
11882         .addReg(ShiftReg);
11883     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
11884         .addReg(NewVal2Reg)
11885         .addReg(MaskReg);
11886     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
11887         .addReg(OldVal2Reg)
11888         .addReg(MaskReg);
11889 
11890     BB = loop1MBB;
11891     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11892         .addReg(ZeroReg)
11893         .addReg(PtrReg);
11894     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
11895         .addReg(TmpDestReg)
11896         .addReg(MaskReg);
11897     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
11898         .addReg(TmpReg)
11899         .addReg(OldVal3Reg);
11900     BuildMI(BB, dl, TII->get(PPC::BCC))
11901         .addImm(PPC::PRED_NE)
11902         .addReg(PPC::CR0)
11903         .addMBB(midMBB);
11904     BB->addSuccessor(loop2MBB);
11905     BB->addSuccessor(midMBB);
11906 
11907     BB = loop2MBB;
11908     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11909         .addReg(TmpDestReg)
11910         .addReg(MaskReg);
11911     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
11912         .addReg(Tmp2Reg)
11913         .addReg(NewVal3Reg);
11914     BuildMI(BB, dl, TII->get(PPC::STWCX))
11915         .addReg(Tmp4Reg)
11916         .addReg(ZeroReg)
11917         .addReg(PtrReg);
11918     BuildMI(BB, dl, TII->get(PPC::BCC))
11919         .addImm(PPC::PRED_NE)
11920         .addReg(PPC::CR0)
11921         .addMBB(loop1MBB);
11922     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11923     BB->addSuccessor(loop1MBB);
11924     BB->addSuccessor(exitMBB);
11925 
11926     BB = midMBB;
11927     BuildMI(BB, dl, TII->get(PPC::STWCX))
11928         .addReg(TmpDestReg)
11929         .addReg(ZeroReg)
11930         .addReg(PtrReg);
11931     BB->addSuccessor(exitMBB);
11932 
11933     //  exitMBB:
11934     //   ...
11935     BB = exitMBB;
11936     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11937         .addReg(TmpReg)
11938         .addReg(ShiftReg);
11939   } else if (MI.getOpcode() == PPC::FADDrtz) {
11940     // This pseudo performs an FADD with rounding mode temporarily forced
11941     // to round-to-zero.  We emit this via custom inserter since the FPSCR
11942     // is not modeled at the SelectionDAG level.
11943     Register Dest = MI.getOperand(0).getReg();
11944     Register Src1 = MI.getOperand(1).getReg();
11945     Register Src2 = MI.getOperand(2).getReg();
11946     DebugLoc dl = MI.getDebugLoc();
11947 
11948     MachineRegisterInfo &RegInfo = F->getRegInfo();
11949     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11950 
11951     // Save FPSCR value.
11952     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
11953 
11954     // Set rounding mode to round-to-zero.
11955     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
11956         .addImm(31)
11957         .addReg(PPC::RM, RegState::ImplicitDefine);
11958 
11959     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
11960         .addImm(30)
11961         .addReg(PPC::RM, RegState::ImplicitDefine);
11962 
11963     // Perform addition.
11964     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
11965 
11966     // Restore FPSCR value.
11967     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
11968   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11969              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
11970              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11971              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
11972     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11973                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
11974                           ? PPC::ANDI8_rec
11975                           : PPC::ANDI_rec;
11976     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11977                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
11978 
11979     MachineRegisterInfo &RegInfo = F->getRegInfo();
11980     Register Dest = RegInfo.createVirtualRegister(
11981         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
11982 
11983     DebugLoc Dl = MI.getDebugLoc();
11984     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
11985         .addReg(MI.getOperand(1).getReg())
11986         .addImm(1);
11987     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11988             MI.getOperand(0).getReg())
11989         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
11990   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
11991     DebugLoc Dl = MI.getDebugLoc();
11992     MachineRegisterInfo &RegInfo = F->getRegInfo();
11993     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11994     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
11995     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11996             MI.getOperand(0).getReg())
11997         .addReg(CRReg);
11998   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
11999     DebugLoc Dl = MI.getDebugLoc();
12000     unsigned Imm = MI.getOperand(1).getImm();
12001     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12002     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12003             MI.getOperand(0).getReg())
12004         .addReg(PPC::CR0EQ);
12005   } else if (MI.getOpcode() == PPC::SETRNDi) {
12006     DebugLoc dl = MI.getDebugLoc();
12007     Register OldFPSCRReg = MI.getOperand(0).getReg();
12008 
12009     // Save FPSCR value.
12010     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12011 
12012     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12013     // the following settings:
12014     //   00 Round to nearest
12015     //   01 Round to 0
12016     //   10 Round to +inf
12017     //   11 Round to -inf
12018 
12019     // When the operand is immediate, using the two least significant bits of
12020     // the immediate to set the bits 62:63 of FPSCR.
12021     unsigned Mode = MI.getOperand(1).getImm();
12022     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12023         .addImm(31)
12024         .addReg(PPC::RM, RegState::ImplicitDefine);
12025 
12026     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12027         .addImm(30)
12028         .addReg(PPC::RM, RegState::ImplicitDefine);
12029   } else if (MI.getOpcode() == PPC::SETRND) {
12030     DebugLoc dl = MI.getDebugLoc();
12031 
12032     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12033     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12034     // If the target doesn't have DirectMove, we should use stack to do the
12035     // conversion, because the target doesn't have the instructions like mtvsrd
12036     // or mfvsrd to do this conversion directly.
12037     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12038       if (Subtarget.hasDirectMove()) {
12039         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12040           .addReg(SrcReg);
12041       } else {
12042         // Use stack to do the register copy.
12043         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12044         MachineRegisterInfo &RegInfo = F->getRegInfo();
12045         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12046         if (RC == &PPC::F8RCRegClass) {
12047           // Copy register from F8RCRegClass to G8RCRegclass.
12048           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12049                  "Unsupported RegClass.");
12050 
12051           StoreOp = PPC::STFD;
12052           LoadOp = PPC::LD;
12053         } else {
12054           // Copy register from G8RCRegClass to F8RCRegclass.
12055           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12056                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12057                  "Unsupported RegClass.");
12058         }
12059 
12060         MachineFrameInfo &MFI = F->getFrameInfo();
12061         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12062 
12063         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12064             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12065             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12066             MFI.getObjectAlign(FrameIdx));
12067 
12068         // Store the SrcReg into the stack.
12069         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12070           .addReg(SrcReg)
12071           .addImm(0)
12072           .addFrameIndex(FrameIdx)
12073           .addMemOperand(MMOStore);
12074 
12075         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12076             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12077             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12078             MFI.getObjectAlign(FrameIdx));
12079 
12080         // Load from the stack where SrcReg is stored, and save to DestReg,
12081         // so we have done the RegClass conversion from RegClass::SrcReg to
12082         // RegClass::DestReg.
12083         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12084           .addImm(0)
12085           .addFrameIndex(FrameIdx)
12086           .addMemOperand(MMOLoad);
12087       }
12088     };
12089 
12090     Register OldFPSCRReg = MI.getOperand(0).getReg();
12091 
12092     // Save FPSCR value.
12093     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12094 
12095     // When the operand is gprc register, use two least significant bits of the
12096     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12097     //
12098     // copy OldFPSCRTmpReg, OldFPSCRReg
12099     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12100     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12101     // copy NewFPSCRReg, NewFPSCRTmpReg
12102     // mtfsf 255, NewFPSCRReg
12103     MachineOperand SrcOp = MI.getOperand(1);
12104     MachineRegisterInfo &RegInfo = F->getRegInfo();
12105     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12106 
12107     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12108 
12109     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12110     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12111 
12112     // The first operand of INSERT_SUBREG should be a register which has
12113     // subregisters, we only care about its RegClass, so we should use an
12114     // IMPLICIT_DEF register.
12115     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12116     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12117       .addReg(ImDefReg)
12118       .add(SrcOp)
12119       .addImm(1);
12120 
12121     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12122     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12123       .addReg(OldFPSCRTmpReg)
12124       .addReg(ExtSrcReg)
12125       .addImm(0)
12126       .addImm(62);
12127 
12128     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12129     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12130 
12131     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12132     // bits of FPSCR.
12133     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12134       .addImm(255)
12135       .addReg(NewFPSCRReg)
12136       .addImm(0)
12137       .addImm(0);
12138   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12139              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12140     return emitProbedAlloca(MI, BB);
12141   } else {
12142     llvm_unreachable("Unexpected instr type to insert");
12143   }
12144 
12145   MI.eraseFromParent(); // The pseudo instruction is gone now.
12146   return BB;
12147 }
12148 
12149 //===----------------------------------------------------------------------===//
12150 // Target Optimization Hooks
12151 //===----------------------------------------------------------------------===//
12152 
12153 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12154   // For the estimates, convergence is quadratic, so we essentially double the
12155   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12156   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12157   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12158   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12159   if (VT.getScalarType() == MVT::f64)
12160     RefinementSteps++;
12161   return RefinementSteps;
12162 }
12163 
12164 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12165                                            int Enabled, int &RefinementSteps,
12166                                            bool &UseOneConstNR,
12167                                            bool Reciprocal) const {
12168   EVT VT = Operand.getValueType();
12169   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12170       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12171       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12172       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12173     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12174       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12175 
12176     // The Newton-Raphson computation with a single constant does not provide
12177     // enough accuracy on some CPUs.
12178     UseOneConstNR = !Subtarget.needsTwoConstNR();
12179     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12180   }
12181   return SDValue();
12182 }
12183 
12184 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12185                                             int Enabled,
12186                                             int &RefinementSteps) const {
12187   EVT VT = Operand.getValueType();
12188   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12189       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12190       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12191       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12192     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12193       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12194     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12195   }
12196   return SDValue();
12197 }
12198 
12199 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12200   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12201   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12202   // enabled for division), this functionality is redundant with the default
12203   // combiner logic (once the division -> reciprocal/multiply transformation
12204   // has taken place). As a result, this matters more for older cores than for
12205   // newer ones.
12206 
12207   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12208   // reciprocal if there are two or more FDIVs (for embedded cores with only
12209   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12210   switch (Subtarget.getCPUDirective()) {
12211   default:
12212     return 3;
12213   case PPC::DIR_440:
12214   case PPC::DIR_A2:
12215   case PPC::DIR_E500:
12216   case PPC::DIR_E500mc:
12217   case PPC::DIR_E5500:
12218     return 2;
12219   }
12220 }
12221 
12222 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12223 // collapsed, and so we need to look through chains of them.
12224 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12225                                      int64_t& Offset, SelectionDAG &DAG) {
12226   if (DAG.isBaseWithConstantOffset(Loc)) {
12227     Base = Loc.getOperand(0);
12228     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12229 
12230     // The base might itself be a base plus an offset, and if so, accumulate
12231     // that as well.
12232     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12233   }
12234 }
12235 
12236 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12237                             unsigned Bytes, int Dist,
12238                             SelectionDAG &DAG) {
12239   if (VT.getSizeInBits() / 8 != Bytes)
12240     return false;
12241 
12242   SDValue BaseLoc = Base->getBasePtr();
12243   if (Loc.getOpcode() == ISD::FrameIndex) {
12244     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12245       return false;
12246     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12247     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12248     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12249     int FS  = MFI.getObjectSize(FI);
12250     int BFS = MFI.getObjectSize(BFI);
12251     if (FS != BFS || FS != (int)Bytes) return false;
12252     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12253   }
12254 
12255   SDValue Base1 = Loc, Base2 = BaseLoc;
12256   int64_t Offset1 = 0, Offset2 = 0;
12257   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12258   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12259   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12260     return true;
12261 
12262   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12263   const GlobalValue *GV1 = nullptr;
12264   const GlobalValue *GV2 = nullptr;
12265   Offset1 = 0;
12266   Offset2 = 0;
12267   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12268   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12269   if (isGA1 && isGA2 && GV1 == GV2)
12270     return Offset1 == (Offset2 + Dist*Bytes);
12271   return false;
12272 }
12273 
12274 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12275 // not enforce equality of the chain operands.
12276 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12277                             unsigned Bytes, int Dist,
12278                             SelectionDAG &DAG) {
12279   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12280     EVT VT = LS->getMemoryVT();
12281     SDValue Loc = LS->getBasePtr();
12282     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12283   }
12284 
12285   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12286     EVT VT;
12287     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12288     default: return false;
12289     case Intrinsic::ppc_altivec_lvx:
12290     case Intrinsic::ppc_altivec_lvxl:
12291     case Intrinsic::ppc_vsx_lxvw4x:
12292     case Intrinsic::ppc_vsx_lxvw4x_be:
12293       VT = MVT::v4i32;
12294       break;
12295     case Intrinsic::ppc_vsx_lxvd2x:
12296     case Intrinsic::ppc_vsx_lxvd2x_be:
12297       VT = MVT::v2f64;
12298       break;
12299     case Intrinsic::ppc_altivec_lvebx:
12300       VT = MVT::i8;
12301       break;
12302     case Intrinsic::ppc_altivec_lvehx:
12303       VT = MVT::i16;
12304       break;
12305     case Intrinsic::ppc_altivec_lvewx:
12306       VT = MVT::i32;
12307       break;
12308     }
12309 
12310     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12311   }
12312 
12313   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12314     EVT VT;
12315     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12316     default: return false;
12317     case Intrinsic::ppc_altivec_stvx:
12318     case Intrinsic::ppc_altivec_stvxl:
12319     case Intrinsic::ppc_vsx_stxvw4x:
12320       VT = MVT::v4i32;
12321       break;
12322     case Intrinsic::ppc_vsx_stxvd2x:
12323       VT = MVT::v2f64;
12324       break;
12325     case Intrinsic::ppc_vsx_stxvw4x_be:
12326       VT = MVT::v4i32;
12327       break;
12328     case Intrinsic::ppc_vsx_stxvd2x_be:
12329       VT = MVT::v2f64;
12330       break;
12331     case Intrinsic::ppc_altivec_stvebx:
12332       VT = MVT::i8;
12333       break;
12334     case Intrinsic::ppc_altivec_stvehx:
12335       VT = MVT::i16;
12336       break;
12337     case Intrinsic::ppc_altivec_stvewx:
12338       VT = MVT::i32;
12339       break;
12340     }
12341 
12342     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12343   }
12344 
12345   return false;
12346 }
12347 
12348 // Return true is there is a nearyby consecutive load to the one provided
12349 // (regardless of alignment). We search up and down the chain, looking though
12350 // token factors and other loads (but nothing else). As a result, a true result
12351 // indicates that it is safe to create a new consecutive load adjacent to the
12352 // load provided.
12353 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12354   SDValue Chain = LD->getChain();
12355   EVT VT = LD->getMemoryVT();
12356 
12357   SmallSet<SDNode *, 16> LoadRoots;
12358   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12359   SmallSet<SDNode *, 16> Visited;
12360 
12361   // First, search up the chain, branching to follow all token-factor operands.
12362   // If we find a consecutive load, then we're done, otherwise, record all
12363   // nodes just above the top-level loads and token factors.
12364   while (!Queue.empty()) {
12365     SDNode *ChainNext = Queue.pop_back_val();
12366     if (!Visited.insert(ChainNext).second)
12367       continue;
12368 
12369     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12370       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12371         return true;
12372 
12373       if (!Visited.count(ChainLD->getChain().getNode()))
12374         Queue.push_back(ChainLD->getChain().getNode());
12375     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12376       for (const SDUse &O : ChainNext->ops())
12377         if (!Visited.count(O.getNode()))
12378           Queue.push_back(O.getNode());
12379     } else
12380       LoadRoots.insert(ChainNext);
12381   }
12382 
12383   // Second, search down the chain, starting from the top-level nodes recorded
12384   // in the first phase. These top-level nodes are the nodes just above all
12385   // loads and token factors. Starting with their uses, recursively look though
12386   // all loads (just the chain uses) and token factors to find a consecutive
12387   // load.
12388   Visited.clear();
12389   Queue.clear();
12390 
12391   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12392        IE = LoadRoots.end(); I != IE; ++I) {
12393     Queue.push_back(*I);
12394 
12395     while (!Queue.empty()) {
12396       SDNode *LoadRoot = Queue.pop_back_val();
12397       if (!Visited.insert(LoadRoot).second)
12398         continue;
12399 
12400       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12401         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12402           return true;
12403 
12404       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12405            UE = LoadRoot->use_end(); UI != UE; ++UI)
12406         if (((isa<MemSDNode>(*UI) &&
12407             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12408             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12409           Queue.push_back(*UI);
12410     }
12411   }
12412 
12413   return false;
12414 }
12415 
12416 /// This function is called when we have proved that a SETCC node can be replaced
12417 /// by subtraction (and other supporting instructions) so that the result of
12418 /// comparison is kept in a GPR instead of CR. This function is purely for
12419 /// codegen purposes and has some flags to guide the codegen process.
12420 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12421                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12422   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12423 
12424   // Zero extend the operands to the largest legal integer. Originally, they
12425   // must be of a strictly smaller size.
12426   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12427                          DAG.getConstant(Size, DL, MVT::i32));
12428   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12429                          DAG.getConstant(Size, DL, MVT::i32));
12430 
12431   // Swap if needed. Depends on the condition code.
12432   if (Swap)
12433     std::swap(Op0, Op1);
12434 
12435   // Subtract extended integers.
12436   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12437 
12438   // Move the sign bit to the least significant position and zero out the rest.
12439   // Now the least significant bit carries the result of original comparison.
12440   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12441                              DAG.getConstant(Size - 1, DL, MVT::i32));
12442   auto Final = Shifted;
12443 
12444   // Complement the result if needed. Based on the condition code.
12445   if (Complement)
12446     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12447                         DAG.getConstant(1, DL, MVT::i64));
12448 
12449   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12450 }
12451 
12452 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12453                                                   DAGCombinerInfo &DCI) const {
12454   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12455 
12456   SelectionDAG &DAG = DCI.DAG;
12457   SDLoc DL(N);
12458 
12459   // Size of integers being compared has a critical role in the following
12460   // analysis, so we prefer to do this when all types are legal.
12461   if (!DCI.isAfterLegalizeDAG())
12462     return SDValue();
12463 
12464   // If all users of SETCC extend its value to a legal integer type
12465   // then we replace SETCC with a subtraction
12466   for (SDNode::use_iterator UI = N->use_begin(),
12467        UE = N->use_end(); UI != UE; ++UI) {
12468     if (UI->getOpcode() != ISD::ZERO_EXTEND)
12469       return SDValue();
12470   }
12471 
12472   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12473   auto OpSize = N->getOperand(0).getValueSizeInBits();
12474 
12475   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12476 
12477   if (OpSize < Size) {
12478     switch (CC) {
12479     default: break;
12480     case ISD::SETULT:
12481       return generateEquivalentSub(N, Size, false, false, DL, DAG);
12482     case ISD::SETULE:
12483       return generateEquivalentSub(N, Size, true, true, DL, DAG);
12484     case ISD::SETUGT:
12485       return generateEquivalentSub(N, Size, false, true, DL, DAG);
12486     case ISD::SETUGE:
12487       return generateEquivalentSub(N, Size, true, false, DL, DAG);
12488     }
12489   }
12490 
12491   return SDValue();
12492 }
12493 
12494 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12495                                                   DAGCombinerInfo &DCI) const {
12496   SelectionDAG &DAG = DCI.DAG;
12497   SDLoc dl(N);
12498 
12499   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12500   // If we're tracking CR bits, we need to be careful that we don't have:
12501   //   trunc(binary-ops(zext(x), zext(y)))
12502   // or
12503   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12504   // such that we're unnecessarily moving things into GPRs when it would be
12505   // better to keep them in CR bits.
12506 
12507   // Note that trunc here can be an actual i1 trunc, or can be the effective
12508   // truncation that comes from a setcc or select_cc.
12509   if (N->getOpcode() == ISD::TRUNCATE &&
12510       N->getValueType(0) != MVT::i1)
12511     return SDValue();
12512 
12513   if (N->getOperand(0).getValueType() != MVT::i32 &&
12514       N->getOperand(0).getValueType() != MVT::i64)
12515     return SDValue();
12516 
12517   if (N->getOpcode() == ISD::SETCC ||
12518       N->getOpcode() == ISD::SELECT_CC) {
12519     // If we're looking at a comparison, then we need to make sure that the
12520     // high bits (all except for the first) don't matter the result.
12521     ISD::CondCode CC =
12522       cast<CondCodeSDNode>(N->getOperand(
12523         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12524     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12525 
12526     if (ISD::isSignedIntSetCC(CC)) {
12527       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12528           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12529         return SDValue();
12530     } else if (ISD::isUnsignedIntSetCC(CC)) {
12531       if (!DAG.MaskedValueIsZero(N->getOperand(0),
12532                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12533           !DAG.MaskedValueIsZero(N->getOperand(1),
12534                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
12535         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12536                                              : SDValue());
12537     } else {
12538       // This is neither a signed nor an unsigned comparison, just make sure
12539       // that the high bits are equal.
12540       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12541       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12542 
12543       // We don't really care about what is known about the first bit (if
12544       // anything), so clear it in all masks prior to comparing them.
12545       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
12546       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
12547 
12548       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
12549         return SDValue();
12550     }
12551   }
12552 
12553   // We now know that the higher-order bits are irrelevant, we just need to
12554   // make sure that all of the intermediate operations are bit operations, and
12555   // all inputs are extensions.
12556   if (N->getOperand(0).getOpcode() != ISD::AND &&
12557       N->getOperand(0).getOpcode() != ISD::OR  &&
12558       N->getOperand(0).getOpcode() != ISD::XOR &&
12559       N->getOperand(0).getOpcode() != ISD::SELECT &&
12560       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12561       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12562       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12563       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12564       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12565     return SDValue();
12566 
12567   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12568       N->getOperand(1).getOpcode() != ISD::AND &&
12569       N->getOperand(1).getOpcode() != ISD::OR  &&
12570       N->getOperand(1).getOpcode() != ISD::XOR &&
12571       N->getOperand(1).getOpcode() != ISD::SELECT &&
12572       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
12573       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
12574       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
12575       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
12576       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
12577     return SDValue();
12578 
12579   SmallVector<SDValue, 4> Inputs;
12580   SmallVector<SDValue, 8> BinOps, PromOps;
12581   SmallPtrSet<SDNode *, 16> Visited;
12582 
12583   for (unsigned i = 0; i < 2; ++i) {
12584     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12585           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12586           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12587           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12588         isa<ConstantSDNode>(N->getOperand(i)))
12589       Inputs.push_back(N->getOperand(i));
12590     else
12591       BinOps.push_back(N->getOperand(i));
12592 
12593     if (N->getOpcode() == ISD::TRUNCATE)
12594       break;
12595   }
12596 
12597   // Visit all inputs, collect all binary operations (and, or, xor and
12598   // select) that are all fed by extensions.
12599   while (!BinOps.empty()) {
12600     SDValue BinOp = BinOps.back();
12601     BinOps.pop_back();
12602 
12603     if (!Visited.insert(BinOp.getNode()).second)
12604       continue;
12605 
12606     PromOps.push_back(BinOp);
12607 
12608     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12609       // The condition of the select is not promoted.
12610       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12611         continue;
12612       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12613         continue;
12614 
12615       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12616             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12617             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12618            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12619           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12620         Inputs.push_back(BinOp.getOperand(i));
12621       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12622                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12623                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12624                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12625                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
12626                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12627                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12628                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12629                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
12630         BinOps.push_back(BinOp.getOperand(i));
12631       } else {
12632         // We have an input that is not an extension or another binary
12633         // operation; we'll abort this transformation.
12634         return SDValue();
12635       }
12636     }
12637   }
12638 
12639   // Make sure that this is a self-contained cluster of operations (which
12640   // is not quite the same thing as saying that everything has only one
12641   // use).
12642   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12643     if (isa<ConstantSDNode>(Inputs[i]))
12644       continue;
12645 
12646     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12647                               UE = Inputs[i].getNode()->use_end();
12648          UI != UE; ++UI) {
12649       SDNode *User = *UI;
12650       if (User != N && !Visited.count(User))
12651         return SDValue();
12652 
12653       // Make sure that we're not going to promote the non-output-value
12654       // operand(s) or SELECT or SELECT_CC.
12655       // FIXME: Although we could sometimes handle this, and it does occur in
12656       // practice that one of the condition inputs to the select is also one of
12657       // the outputs, we currently can't deal with this.
12658       if (User->getOpcode() == ISD::SELECT) {
12659         if (User->getOperand(0) == Inputs[i])
12660           return SDValue();
12661       } else if (User->getOpcode() == ISD::SELECT_CC) {
12662         if (User->getOperand(0) == Inputs[i] ||
12663             User->getOperand(1) == Inputs[i])
12664           return SDValue();
12665       }
12666     }
12667   }
12668 
12669   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12670     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12671                               UE = PromOps[i].getNode()->use_end();
12672          UI != UE; ++UI) {
12673       SDNode *User = *UI;
12674       if (User != N && !Visited.count(User))
12675         return SDValue();
12676 
12677       // Make sure that we're not going to promote the non-output-value
12678       // operand(s) or SELECT or SELECT_CC.
12679       // FIXME: Although we could sometimes handle this, and it does occur in
12680       // practice that one of the condition inputs to the select is also one of
12681       // the outputs, we currently can't deal with this.
12682       if (User->getOpcode() == ISD::SELECT) {
12683         if (User->getOperand(0) == PromOps[i])
12684           return SDValue();
12685       } else if (User->getOpcode() == ISD::SELECT_CC) {
12686         if (User->getOperand(0) == PromOps[i] ||
12687             User->getOperand(1) == PromOps[i])
12688           return SDValue();
12689       }
12690     }
12691   }
12692 
12693   // Replace all inputs with the extension operand.
12694   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12695     // Constants may have users outside the cluster of to-be-promoted nodes,
12696     // and so we need to replace those as we do the promotions.
12697     if (isa<ConstantSDNode>(Inputs[i]))
12698       continue;
12699     else
12700       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
12701   }
12702 
12703   std::list<HandleSDNode> PromOpHandles;
12704   for (auto &PromOp : PromOps)
12705     PromOpHandles.emplace_back(PromOp);
12706 
12707   // Replace all operations (these are all the same, but have a different
12708   // (i1) return type). DAG.getNode will validate that the types of
12709   // a binary operator match, so go through the list in reverse so that
12710   // we've likely promoted both operands first. Any intermediate truncations or
12711   // extensions disappear.
12712   while (!PromOpHandles.empty()) {
12713     SDValue PromOp = PromOpHandles.back().getValue();
12714     PromOpHandles.pop_back();
12715 
12716     if (PromOp.getOpcode() == ISD::TRUNCATE ||
12717         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
12718         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
12719         PromOp.getOpcode() == ISD::ANY_EXTEND) {
12720       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
12721           PromOp.getOperand(0).getValueType() != MVT::i1) {
12722         // The operand is not yet ready (see comment below).
12723         PromOpHandles.emplace_front(PromOp);
12724         continue;
12725       }
12726 
12727       SDValue RepValue = PromOp.getOperand(0);
12728       if (isa<ConstantSDNode>(RepValue))
12729         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
12730 
12731       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
12732       continue;
12733     }
12734 
12735     unsigned C;
12736     switch (PromOp.getOpcode()) {
12737     default:             C = 0; break;
12738     case ISD::SELECT:    C = 1; break;
12739     case ISD::SELECT_CC: C = 2; break;
12740     }
12741 
12742     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12743          PromOp.getOperand(C).getValueType() != MVT::i1) ||
12744         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12745          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
12746       // The to-be-promoted operands of this node have not yet been
12747       // promoted (this should be rare because we're going through the
12748       // list backward, but if one of the operands has several users in
12749       // this cluster of to-be-promoted nodes, it is possible).
12750       PromOpHandles.emplace_front(PromOp);
12751       continue;
12752     }
12753 
12754     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12755                                 PromOp.getNode()->op_end());
12756 
12757     // If there are any constant inputs, make sure they're replaced now.
12758     for (unsigned i = 0; i < 2; ++i)
12759       if (isa<ConstantSDNode>(Ops[C+i]))
12760         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
12761 
12762     DAG.ReplaceAllUsesOfValueWith(PromOp,
12763       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
12764   }
12765 
12766   // Now we're left with the initial truncation itself.
12767   if (N->getOpcode() == ISD::TRUNCATE)
12768     return N->getOperand(0);
12769 
12770   // Otherwise, this is a comparison. The operands to be compared have just
12771   // changed type (to i1), but everything else is the same.
12772   return SDValue(N, 0);
12773 }
12774 
12775 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
12776                                                   DAGCombinerInfo &DCI) const {
12777   SelectionDAG &DAG = DCI.DAG;
12778   SDLoc dl(N);
12779 
12780   // If we're tracking CR bits, we need to be careful that we don't have:
12781   //   zext(binary-ops(trunc(x), trunc(y)))
12782   // or
12783   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
12784   // such that we're unnecessarily moving things into CR bits that can more
12785   // efficiently stay in GPRs. Note that if we're not certain that the high
12786   // bits are set as required by the final extension, we still may need to do
12787   // some masking to get the proper behavior.
12788 
12789   // This same functionality is important on PPC64 when dealing with
12790   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
12791   // the return values of functions. Because it is so similar, it is handled
12792   // here as well.
12793 
12794   if (N->getValueType(0) != MVT::i32 &&
12795       N->getValueType(0) != MVT::i64)
12796     return SDValue();
12797 
12798   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
12799         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
12800     return SDValue();
12801 
12802   if (N->getOperand(0).getOpcode() != ISD::AND &&
12803       N->getOperand(0).getOpcode() != ISD::OR  &&
12804       N->getOperand(0).getOpcode() != ISD::XOR &&
12805       N->getOperand(0).getOpcode() != ISD::SELECT &&
12806       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
12807     return SDValue();
12808 
12809   SmallVector<SDValue, 4> Inputs;
12810   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
12811   SmallPtrSet<SDNode *, 16> Visited;
12812 
12813   // Visit all inputs, collect all binary operations (and, or, xor and
12814   // select) that are all fed by truncations.
12815   while (!BinOps.empty()) {
12816     SDValue BinOp = BinOps.back();
12817     BinOps.pop_back();
12818 
12819     if (!Visited.insert(BinOp.getNode()).second)
12820       continue;
12821 
12822     PromOps.push_back(BinOp);
12823 
12824     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12825       // The condition of the select is not promoted.
12826       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12827         continue;
12828       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12829         continue;
12830 
12831       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12832           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12833         Inputs.push_back(BinOp.getOperand(i));
12834       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12835                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12836                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12837                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12838                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
12839         BinOps.push_back(BinOp.getOperand(i));
12840       } else {
12841         // We have an input that is not a truncation or another binary
12842         // operation; we'll abort this transformation.
12843         return SDValue();
12844       }
12845     }
12846   }
12847 
12848   // The operands of a select that must be truncated when the select is
12849   // promoted because the operand is actually part of the to-be-promoted set.
12850   DenseMap<SDNode *, EVT> SelectTruncOp[2];
12851 
12852   // Make sure that this is a self-contained cluster of operations (which
12853   // is not quite the same thing as saying that everything has only one
12854   // use).
12855   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12856     if (isa<ConstantSDNode>(Inputs[i]))
12857       continue;
12858 
12859     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12860                               UE = Inputs[i].getNode()->use_end();
12861          UI != UE; ++UI) {
12862       SDNode *User = *UI;
12863       if (User != N && !Visited.count(User))
12864         return SDValue();
12865 
12866       // If we're going to promote the non-output-value operand(s) or SELECT or
12867       // SELECT_CC, record them for truncation.
12868       if (User->getOpcode() == ISD::SELECT) {
12869         if (User->getOperand(0) == Inputs[i])
12870           SelectTruncOp[0].insert(std::make_pair(User,
12871                                     User->getOperand(0).getValueType()));
12872       } else if (User->getOpcode() == ISD::SELECT_CC) {
12873         if (User->getOperand(0) == Inputs[i])
12874           SelectTruncOp[0].insert(std::make_pair(User,
12875                                     User->getOperand(0).getValueType()));
12876         if (User->getOperand(1) == Inputs[i])
12877           SelectTruncOp[1].insert(std::make_pair(User,
12878                                     User->getOperand(1).getValueType()));
12879       }
12880     }
12881   }
12882 
12883   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12884     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12885                               UE = PromOps[i].getNode()->use_end();
12886          UI != UE; ++UI) {
12887       SDNode *User = *UI;
12888       if (User != N && !Visited.count(User))
12889         return SDValue();
12890 
12891       // If we're going to promote the non-output-value operand(s) or SELECT or
12892       // SELECT_CC, record them for truncation.
12893       if (User->getOpcode() == ISD::SELECT) {
12894         if (User->getOperand(0) == PromOps[i])
12895           SelectTruncOp[0].insert(std::make_pair(User,
12896                                     User->getOperand(0).getValueType()));
12897       } else if (User->getOpcode() == ISD::SELECT_CC) {
12898         if (User->getOperand(0) == PromOps[i])
12899           SelectTruncOp[0].insert(std::make_pair(User,
12900                                     User->getOperand(0).getValueType()));
12901         if (User->getOperand(1) == PromOps[i])
12902           SelectTruncOp[1].insert(std::make_pair(User,
12903                                     User->getOperand(1).getValueType()));
12904       }
12905     }
12906   }
12907 
12908   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
12909   bool ReallyNeedsExt = false;
12910   if (N->getOpcode() != ISD::ANY_EXTEND) {
12911     // If all of the inputs are not already sign/zero extended, then
12912     // we'll still need to do that at the end.
12913     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12914       if (isa<ConstantSDNode>(Inputs[i]))
12915         continue;
12916 
12917       unsigned OpBits =
12918         Inputs[i].getOperand(0).getValueSizeInBits();
12919       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
12920 
12921       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
12922            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
12923                                   APInt::getHighBitsSet(OpBits,
12924                                                         OpBits-PromBits))) ||
12925           (N->getOpcode() == ISD::SIGN_EXTEND &&
12926            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
12927              (OpBits-(PromBits-1)))) {
12928         ReallyNeedsExt = true;
12929         break;
12930       }
12931     }
12932   }
12933 
12934   // Replace all inputs, either with the truncation operand, or a
12935   // truncation or extension to the final output type.
12936   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12937     // Constant inputs need to be replaced with the to-be-promoted nodes that
12938     // use them because they might have users outside of the cluster of
12939     // promoted nodes.
12940     if (isa<ConstantSDNode>(Inputs[i]))
12941       continue;
12942 
12943     SDValue InSrc = Inputs[i].getOperand(0);
12944     if (Inputs[i].getValueType() == N->getValueType(0))
12945       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
12946     else if (N->getOpcode() == ISD::SIGN_EXTEND)
12947       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12948         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
12949     else if (N->getOpcode() == ISD::ZERO_EXTEND)
12950       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12951         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
12952     else
12953       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12954         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
12955   }
12956 
12957   std::list<HandleSDNode> PromOpHandles;
12958   for (auto &PromOp : PromOps)
12959     PromOpHandles.emplace_back(PromOp);
12960 
12961   // Replace all operations (these are all the same, but have a different
12962   // (promoted) return type). DAG.getNode will validate that the types of
12963   // a binary operator match, so go through the list in reverse so that
12964   // we've likely promoted both operands first.
12965   while (!PromOpHandles.empty()) {
12966     SDValue PromOp = PromOpHandles.back().getValue();
12967     PromOpHandles.pop_back();
12968 
12969     unsigned C;
12970     switch (PromOp.getOpcode()) {
12971     default:             C = 0; break;
12972     case ISD::SELECT:    C = 1; break;
12973     case ISD::SELECT_CC: C = 2; break;
12974     }
12975 
12976     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12977          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
12978         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12979          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
12980       // The to-be-promoted operands of this node have not yet been
12981       // promoted (this should be rare because we're going through the
12982       // list backward, but if one of the operands has several users in
12983       // this cluster of to-be-promoted nodes, it is possible).
12984       PromOpHandles.emplace_front(PromOp);
12985       continue;
12986     }
12987 
12988     // For SELECT and SELECT_CC nodes, we do a similar check for any
12989     // to-be-promoted comparison inputs.
12990     if (PromOp.getOpcode() == ISD::SELECT ||
12991         PromOp.getOpcode() == ISD::SELECT_CC) {
12992       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
12993            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
12994           (SelectTruncOp[1].count(PromOp.getNode()) &&
12995            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
12996         PromOpHandles.emplace_front(PromOp);
12997         continue;
12998       }
12999     }
13000 
13001     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13002                                 PromOp.getNode()->op_end());
13003 
13004     // If this node has constant inputs, then they'll need to be promoted here.
13005     for (unsigned i = 0; i < 2; ++i) {
13006       if (!isa<ConstantSDNode>(Ops[C+i]))
13007         continue;
13008       if (Ops[C+i].getValueType() == N->getValueType(0))
13009         continue;
13010 
13011       if (N->getOpcode() == ISD::SIGN_EXTEND)
13012         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13013       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13014         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13015       else
13016         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13017     }
13018 
13019     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13020     // truncate them again to the original value type.
13021     if (PromOp.getOpcode() == ISD::SELECT ||
13022         PromOp.getOpcode() == ISD::SELECT_CC) {
13023       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13024       if (SI0 != SelectTruncOp[0].end())
13025         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13026       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13027       if (SI1 != SelectTruncOp[1].end())
13028         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13029     }
13030 
13031     DAG.ReplaceAllUsesOfValueWith(PromOp,
13032       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13033   }
13034 
13035   // Now we're left with the initial extension itself.
13036   if (!ReallyNeedsExt)
13037     return N->getOperand(0);
13038 
13039   // To zero extend, just mask off everything except for the first bit (in the
13040   // i1 case).
13041   if (N->getOpcode() == ISD::ZERO_EXTEND)
13042     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13043                        DAG.getConstant(APInt::getLowBitsSet(
13044                                          N->getValueSizeInBits(0), PromBits),
13045                                        dl, N->getValueType(0)));
13046 
13047   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13048          "Invalid extension type");
13049   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13050   SDValue ShiftCst =
13051       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13052   return DAG.getNode(
13053       ISD::SRA, dl, N->getValueType(0),
13054       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13055       ShiftCst);
13056 }
13057 
13058 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13059                                         DAGCombinerInfo &DCI) const {
13060   assert(N->getOpcode() == ISD::SETCC &&
13061          "Should be called with a SETCC node");
13062 
13063   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13064   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13065     SDValue LHS = N->getOperand(0);
13066     SDValue RHS = N->getOperand(1);
13067 
13068     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13069     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13070         LHS.hasOneUse())
13071       std::swap(LHS, RHS);
13072 
13073     // x == 0-y --> x+y == 0
13074     // x != 0-y --> x+y != 0
13075     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13076         RHS.hasOneUse()) {
13077       SDLoc DL(N);
13078       SelectionDAG &DAG = DCI.DAG;
13079       EVT VT = N->getValueType(0);
13080       EVT OpVT = LHS.getValueType();
13081       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13082       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13083     }
13084   }
13085 
13086   return DAGCombineTruncBoolExt(N, DCI);
13087 }
13088 
13089 // Is this an extending load from an f32 to an f64?
13090 static bool isFPExtLoad(SDValue Op) {
13091   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13092     return LD->getExtensionType() == ISD::EXTLOAD &&
13093       Op.getValueType() == MVT::f64;
13094   return false;
13095 }
13096 
13097 /// Reduces the number of fp-to-int conversion when building a vector.
13098 ///
13099 /// If this vector is built out of floating to integer conversions,
13100 /// transform it to a vector built out of floating point values followed by a
13101 /// single floating to integer conversion of the vector.
13102 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13103 /// becomes (fptosi (build_vector ($A, $B, ...)))
13104 SDValue PPCTargetLowering::
13105 combineElementTruncationToVectorTruncation(SDNode *N,
13106                                            DAGCombinerInfo &DCI) const {
13107   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13108          "Should be called with a BUILD_VECTOR node");
13109 
13110   SelectionDAG &DAG = DCI.DAG;
13111   SDLoc dl(N);
13112 
13113   SDValue FirstInput = N->getOperand(0);
13114   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13115          "The input operand must be an fp-to-int conversion.");
13116 
13117   // This combine happens after legalization so the fp_to_[su]i nodes are
13118   // already converted to PPCSISD nodes.
13119   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13120   if (FirstConversion == PPCISD::FCTIDZ ||
13121       FirstConversion == PPCISD::FCTIDUZ ||
13122       FirstConversion == PPCISD::FCTIWZ ||
13123       FirstConversion == PPCISD::FCTIWUZ) {
13124     bool IsSplat = true;
13125     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13126       FirstConversion == PPCISD::FCTIWUZ;
13127     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13128     SmallVector<SDValue, 4> Ops;
13129     EVT TargetVT = N->getValueType(0);
13130     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13131       SDValue NextOp = N->getOperand(i);
13132       if (NextOp.getOpcode() != PPCISD::MFVSR)
13133         return SDValue();
13134       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13135       if (NextConversion != FirstConversion)
13136         return SDValue();
13137       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13138       // This is not valid if the input was originally double precision. It is
13139       // also not profitable to do unless this is an extending load in which
13140       // case doing this combine will allow us to combine consecutive loads.
13141       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13142         return SDValue();
13143       if (N->getOperand(i) != FirstInput)
13144         IsSplat = false;
13145     }
13146 
13147     // If this is a splat, we leave it as-is since there will be only a single
13148     // fp-to-int conversion followed by a splat of the integer. This is better
13149     // for 32-bit and smaller ints and neutral for 64-bit ints.
13150     if (IsSplat)
13151       return SDValue();
13152 
13153     // Now that we know we have the right type of node, get its operands
13154     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13155       SDValue In = N->getOperand(i).getOperand(0);
13156       if (Is32Bit) {
13157         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13158         // here, we know that all inputs are extending loads so this is safe).
13159         if (In.isUndef())
13160           Ops.push_back(DAG.getUNDEF(SrcVT));
13161         else {
13162           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13163                                       MVT::f32, In.getOperand(0),
13164                                       DAG.getIntPtrConstant(1, dl));
13165           Ops.push_back(Trunc);
13166         }
13167       } else
13168         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13169     }
13170 
13171     unsigned Opcode;
13172     if (FirstConversion == PPCISD::FCTIDZ ||
13173         FirstConversion == PPCISD::FCTIWZ)
13174       Opcode = ISD::FP_TO_SINT;
13175     else
13176       Opcode = ISD::FP_TO_UINT;
13177 
13178     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13179     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13180     return DAG.getNode(Opcode, dl, TargetVT, BV);
13181   }
13182   return SDValue();
13183 }
13184 
13185 /// Reduce the number of loads when building a vector.
13186 ///
13187 /// Building a vector out of multiple loads can be converted to a load
13188 /// of the vector type if the loads are consecutive. If the loads are
13189 /// consecutive but in descending order, a shuffle is added at the end
13190 /// to reorder the vector.
13191 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13192   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13193          "Should be called with a BUILD_VECTOR node");
13194 
13195   SDLoc dl(N);
13196 
13197   // Return early for non byte-sized type, as they can't be consecutive.
13198   if (!N->getValueType(0).getVectorElementType().isByteSized())
13199     return SDValue();
13200 
13201   bool InputsAreConsecutiveLoads = true;
13202   bool InputsAreReverseConsecutive = true;
13203   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13204   SDValue FirstInput = N->getOperand(0);
13205   bool IsRoundOfExtLoad = false;
13206 
13207   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13208       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13209     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13210     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13211   }
13212   // Not a build vector of (possibly fp_rounded) loads.
13213   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13214       N->getNumOperands() == 1)
13215     return SDValue();
13216 
13217   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13218     // If any inputs are fp_round(extload), they all must be.
13219     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13220       return SDValue();
13221 
13222     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13223       N->getOperand(i);
13224     if (NextInput.getOpcode() != ISD::LOAD)
13225       return SDValue();
13226 
13227     SDValue PreviousInput =
13228       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13229     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13230     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13231 
13232     // If any inputs are fp_round(extload), they all must be.
13233     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13234       return SDValue();
13235 
13236     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13237       InputsAreConsecutiveLoads = false;
13238     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13239       InputsAreReverseConsecutive = false;
13240 
13241     // Exit early if the loads are neither consecutive nor reverse consecutive.
13242     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13243       return SDValue();
13244   }
13245 
13246   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13247          "The loads cannot be both consecutive and reverse consecutive.");
13248 
13249   SDValue FirstLoadOp =
13250     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13251   SDValue LastLoadOp =
13252     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13253                        N->getOperand(N->getNumOperands()-1);
13254 
13255   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13256   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13257   if (InputsAreConsecutiveLoads) {
13258     assert(LD1 && "Input needs to be a LoadSDNode.");
13259     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13260                        LD1->getBasePtr(), LD1->getPointerInfo(),
13261                        LD1->getAlignment());
13262   }
13263   if (InputsAreReverseConsecutive) {
13264     assert(LDL && "Input needs to be a LoadSDNode.");
13265     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13266                                LDL->getBasePtr(), LDL->getPointerInfo(),
13267                                LDL->getAlignment());
13268     SmallVector<int, 16> Ops;
13269     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13270       Ops.push_back(i);
13271 
13272     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13273                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13274   }
13275   return SDValue();
13276 }
13277 
13278 // This function adds the required vector_shuffle needed to get
13279 // the elements of the vector extract in the correct position
13280 // as specified by the CorrectElems encoding.
13281 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13282                                       SDValue Input, uint64_t Elems,
13283                                       uint64_t CorrectElems) {
13284   SDLoc dl(N);
13285 
13286   unsigned NumElems = Input.getValueType().getVectorNumElements();
13287   SmallVector<int, 16> ShuffleMask(NumElems, -1);
13288 
13289   // Knowing the element indices being extracted from the original
13290   // vector and the order in which they're being inserted, just put
13291   // them at element indices required for the instruction.
13292   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13293     if (DAG.getDataLayout().isLittleEndian())
13294       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13295     else
13296       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13297     CorrectElems = CorrectElems >> 8;
13298     Elems = Elems >> 8;
13299   }
13300 
13301   SDValue Shuffle =
13302       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13303                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13304 
13305   EVT VT = N->getValueType(0);
13306   SDValue Conv = DAG.getBitcast(VT, Shuffle);
13307 
13308   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13309                                Input.getValueType().getVectorElementType(),
13310                                VT.getVectorNumElements());
13311   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13312                      DAG.getValueType(ExtVT));
13313 }
13314 
13315 // Look for build vector patterns where input operands come from sign
13316 // extended vector_extract elements of specific indices. If the correct indices
13317 // aren't used, add a vector shuffle to fix up the indices and create
13318 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13319 // during instruction selection.
13320 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13321   // This array encodes the indices that the vector sign extend instructions
13322   // extract from when extending from one type to another for both BE and LE.
13323   // The right nibble of each byte corresponds to the LE incides.
13324   // and the left nibble of each byte corresponds to the BE incides.
13325   // For example: 0x3074B8FC  byte->word
13326   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13327   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13328   // For example: 0x000070F8  byte->double word
13329   // For LE: the allowed indices are: 0x0,0x8
13330   // For BE: the allowed indices are: 0x7,0xF
13331   uint64_t TargetElems[] = {
13332       0x3074B8FC, // b->w
13333       0x000070F8, // b->d
13334       0x10325476, // h->w
13335       0x00003074, // h->d
13336       0x00001032, // w->d
13337   };
13338 
13339   uint64_t Elems = 0;
13340   int Index;
13341   SDValue Input;
13342 
13343   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13344     if (!Op)
13345       return false;
13346     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13347         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13348       return false;
13349 
13350     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13351     // of the right width.
13352     SDValue Extract = Op.getOperand(0);
13353     if (Extract.getOpcode() == ISD::ANY_EXTEND)
13354       Extract = Extract.getOperand(0);
13355     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13356       return false;
13357 
13358     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13359     if (!ExtOp)
13360       return false;
13361 
13362     Index = ExtOp->getZExtValue();
13363     if (Input && Input != Extract.getOperand(0))
13364       return false;
13365 
13366     if (!Input)
13367       Input = Extract.getOperand(0);
13368 
13369     Elems = Elems << 8;
13370     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13371     Elems |= Index;
13372 
13373     return true;
13374   };
13375 
13376   // If the build vector operands aren't sign extended vector extracts,
13377   // of the same input vector, then return.
13378   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13379     if (!isSExtOfVecExtract(N->getOperand(i))) {
13380       return SDValue();
13381     }
13382   }
13383 
13384   // If the vector extract indicies are not correct, add the appropriate
13385   // vector_shuffle.
13386   int TgtElemArrayIdx;
13387   int InputSize = Input.getValueType().getScalarSizeInBits();
13388   int OutputSize = N->getValueType(0).getScalarSizeInBits();
13389   if (InputSize + OutputSize == 40)
13390     TgtElemArrayIdx = 0;
13391   else if (InputSize + OutputSize == 72)
13392     TgtElemArrayIdx = 1;
13393   else if (InputSize + OutputSize == 48)
13394     TgtElemArrayIdx = 2;
13395   else if (InputSize + OutputSize == 80)
13396     TgtElemArrayIdx = 3;
13397   else if (InputSize + OutputSize == 96)
13398     TgtElemArrayIdx = 4;
13399   else
13400     return SDValue();
13401 
13402   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13403   CorrectElems = DAG.getDataLayout().isLittleEndian()
13404                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13405                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13406   if (Elems != CorrectElems) {
13407     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13408   }
13409 
13410   // Regular lowering will catch cases where a shuffle is not needed.
13411   return SDValue();
13412 }
13413 
13414 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13415                                                  DAGCombinerInfo &DCI) const {
13416   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13417          "Should be called with a BUILD_VECTOR node");
13418 
13419   SelectionDAG &DAG = DCI.DAG;
13420   SDLoc dl(N);
13421 
13422   if (!Subtarget.hasVSX())
13423     return SDValue();
13424 
13425   // The target independent DAG combiner will leave a build_vector of
13426   // float-to-int conversions intact. We can generate MUCH better code for
13427   // a float-to-int conversion of a vector of floats.
13428   SDValue FirstInput = N->getOperand(0);
13429   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13430     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13431     if (Reduced)
13432       return Reduced;
13433   }
13434 
13435   // If we're building a vector out of consecutive loads, just load that
13436   // vector type.
13437   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13438   if (Reduced)
13439     return Reduced;
13440 
13441   // If we're building a vector out of extended elements from another vector
13442   // we have P9 vector integer extend instructions. The code assumes legal
13443   // input types (i.e. it can't handle things like v4i16) so do not run before
13444   // legalization.
13445   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13446     Reduced = combineBVOfVecSExt(N, DAG);
13447     if (Reduced)
13448       return Reduced;
13449   }
13450 
13451 
13452   if (N->getValueType(0) != MVT::v2f64)
13453     return SDValue();
13454 
13455   // Looking for:
13456   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13457   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13458       FirstInput.getOpcode() != ISD::UINT_TO_FP)
13459     return SDValue();
13460   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13461       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13462     return SDValue();
13463   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13464     return SDValue();
13465 
13466   SDValue Ext1 = FirstInput.getOperand(0);
13467   SDValue Ext2 = N->getOperand(1).getOperand(0);
13468   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13469      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13470     return SDValue();
13471 
13472   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13473   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13474   if (!Ext1Op || !Ext2Op)
13475     return SDValue();
13476   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13477       Ext1.getOperand(0) != Ext2.getOperand(0))
13478     return SDValue();
13479 
13480   int FirstElem = Ext1Op->getZExtValue();
13481   int SecondElem = Ext2Op->getZExtValue();
13482   int SubvecIdx;
13483   if (FirstElem == 0 && SecondElem == 1)
13484     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13485   else if (FirstElem == 2 && SecondElem == 3)
13486     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13487   else
13488     return SDValue();
13489 
13490   SDValue SrcVec = Ext1.getOperand(0);
13491   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13492     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13493   return DAG.getNode(NodeType, dl, MVT::v2f64,
13494                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13495 }
13496 
13497 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13498                                               DAGCombinerInfo &DCI) const {
13499   assert((N->getOpcode() == ISD::SINT_TO_FP ||
13500           N->getOpcode() == ISD::UINT_TO_FP) &&
13501          "Need an int -> FP conversion node here");
13502 
13503   if (useSoftFloat() || !Subtarget.has64BitSupport())
13504     return SDValue();
13505 
13506   SelectionDAG &DAG = DCI.DAG;
13507   SDLoc dl(N);
13508   SDValue Op(N, 0);
13509 
13510   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13511   // from the hardware.
13512   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13513     return SDValue();
13514   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13515       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13516     return SDValue();
13517 
13518   SDValue FirstOperand(Op.getOperand(0));
13519   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13520     (FirstOperand.getValueType() == MVT::i8 ||
13521      FirstOperand.getValueType() == MVT::i16);
13522   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
13523     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
13524     bool DstDouble = Op.getValueType() == MVT::f64;
13525     unsigned ConvOp = Signed ?
13526       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
13527       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
13528     SDValue WidthConst =
13529       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
13530                             dl, false);
13531     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
13532     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
13533     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
13534                                          DAG.getVTList(MVT::f64, MVT::Other),
13535                                          Ops, MVT::i8, LDN->getMemOperand());
13536 
13537     // For signed conversion, we need to sign-extend the value in the VSR
13538     if (Signed) {
13539       SDValue ExtOps[] = { Ld, WidthConst };
13540       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
13541       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
13542     } else
13543       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
13544   }
13545 
13546 
13547   // For i32 intermediate values, unfortunately, the conversion functions
13548   // leave the upper 32 bits of the value are undefined. Within the set of
13549   // scalar instructions, we have no method for zero- or sign-extending the
13550   // value. Thus, we cannot handle i32 intermediate values here.
13551   if (Op.getOperand(0).getValueType() == MVT::i32)
13552     return SDValue();
13553 
13554   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
13555          "UINT_TO_FP is supported only with FPCVT");
13556 
13557   // If we have FCFIDS, then use it when converting to single-precision.
13558   // Otherwise, convert to double-precision and then round.
13559   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13560                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
13561                                                             : PPCISD::FCFIDS)
13562                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
13563                                                             : PPCISD::FCFID);
13564   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13565                   ? MVT::f32
13566                   : MVT::f64;
13567 
13568   // If we're converting from a float, to an int, and back to a float again,
13569   // then we don't need the store/load pair at all.
13570   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
13571        Subtarget.hasFPCVT()) ||
13572       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
13573     SDValue Src = Op.getOperand(0).getOperand(0);
13574     if (Src.getValueType() == MVT::f32) {
13575       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
13576       DCI.AddToWorklist(Src.getNode());
13577     } else if (Src.getValueType() != MVT::f64) {
13578       // Make sure that we don't pick up a ppc_fp128 source value.
13579       return SDValue();
13580     }
13581 
13582     unsigned FCTOp =
13583       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
13584                                                         PPCISD::FCTIDUZ;
13585 
13586     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
13587     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
13588 
13589     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
13590       FP = DAG.getNode(ISD::FP_ROUND, dl,
13591                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
13592       DCI.AddToWorklist(FP.getNode());
13593     }
13594 
13595     return FP;
13596   }
13597 
13598   return SDValue();
13599 }
13600 
13601 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
13602 // builtins) into loads with swaps.
13603 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
13604                                               DAGCombinerInfo &DCI) const {
13605   SelectionDAG &DAG = DCI.DAG;
13606   SDLoc dl(N);
13607   SDValue Chain;
13608   SDValue Base;
13609   MachineMemOperand *MMO;
13610 
13611   switch (N->getOpcode()) {
13612   default:
13613     llvm_unreachable("Unexpected opcode for little endian VSX load");
13614   case ISD::LOAD: {
13615     LoadSDNode *LD = cast<LoadSDNode>(N);
13616     Chain = LD->getChain();
13617     Base = LD->getBasePtr();
13618     MMO = LD->getMemOperand();
13619     // If the MMO suggests this isn't a load of a full vector, leave
13620     // things alone.  For a built-in, we have to make the change for
13621     // correctness, so if there is a size problem that will be a bug.
13622     if (MMO->getSize() < 16)
13623       return SDValue();
13624     break;
13625   }
13626   case ISD::INTRINSIC_W_CHAIN: {
13627     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13628     Chain = Intrin->getChain();
13629     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
13630     // us what we want. Get operand 2 instead.
13631     Base = Intrin->getOperand(2);
13632     MMO = Intrin->getMemOperand();
13633     break;
13634   }
13635   }
13636 
13637   MVT VecTy = N->getValueType(0).getSimpleVT();
13638 
13639   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
13640   // aligned and the type is a vector with elements up to 4 bytes
13641   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13642       VecTy.getScalarSizeInBits() <= 32) {
13643     return SDValue();
13644   }
13645 
13646   SDValue LoadOps[] = { Chain, Base };
13647   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
13648                                          DAG.getVTList(MVT::v2f64, MVT::Other),
13649                                          LoadOps, MVT::v2f64, MMO);
13650 
13651   DCI.AddToWorklist(Load.getNode());
13652   Chain = Load.getValue(1);
13653   SDValue Swap = DAG.getNode(
13654       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
13655   DCI.AddToWorklist(Swap.getNode());
13656 
13657   // Add a bitcast if the resulting load type doesn't match v2f64.
13658   if (VecTy != MVT::v2f64) {
13659     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
13660     DCI.AddToWorklist(N.getNode());
13661     // Package {bitcast value, swap's chain} to match Load's shape.
13662     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
13663                        N, Swap.getValue(1));
13664   }
13665 
13666   return Swap;
13667 }
13668 
13669 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
13670 // builtins) into stores with swaps.
13671 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
13672                                                DAGCombinerInfo &DCI) const {
13673   SelectionDAG &DAG = DCI.DAG;
13674   SDLoc dl(N);
13675   SDValue Chain;
13676   SDValue Base;
13677   unsigned SrcOpnd;
13678   MachineMemOperand *MMO;
13679 
13680   switch (N->getOpcode()) {
13681   default:
13682     llvm_unreachable("Unexpected opcode for little endian VSX store");
13683   case ISD::STORE: {
13684     StoreSDNode *ST = cast<StoreSDNode>(N);
13685     Chain = ST->getChain();
13686     Base = ST->getBasePtr();
13687     MMO = ST->getMemOperand();
13688     SrcOpnd = 1;
13689     // If the MMO suggests this isn't a store of a full vector, leave
13690     // things alone.  For a built-in, we have to make the change for
13691     // correctness, so if there is a size problem that will be a bug.
13692     if (MMO->getSize() < 16)
13693       return SDValue();
13694     break;
13695   }
13696   case ISD::INTRINSIC_VOID: {
13697     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13698     Chain = Intrin->getChain();
13699     // Intrin->getBasePtr() oddly does not get what we want.
13700     Base = Intrin->getOperand(3);
13701     MMO = Intrin->getMemOperand();
13702     SrcOpnd = 2;
13703     break;
13704   }
13705   }
13706 
13707   SDValue Src = N->getOperand(SrcOpnd);
13708   MVT VecTy = Src.getValueType().getSimpleVT();
13709 
13710   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
13711   // aligned and the type is a vector with elements up to 4 bytes
13712   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13713       VecTy.getScalarSizeInBits() <= 32) {
13714     return SDValue();
13715   }
13716 
13717   // All stores are done as v2f64 and possible bit cast.
13718   if (VecTy != MVT::v2f64) {
13719     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
13720     DCI.AddToWorklist(Src.getNode());
13721   }
13722 
13723   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
13724                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
13725   DCI.AddToWorklist(Swap.getNode());
13726   Chain = Swap.getValue(1);
13727   SDValue StoreOps[] = { Chain, Swap, Base };
13728   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
13729                                           DAG.getVTList(MVT::Other),
13730                                           StoreOps, VecTy, MMO);
13731   DCI.AddToWorklist(Store.getNode());
13732   return Store;
13733 }
13734 
13735 // Handle DAG combine for STORE (FP_TO_INT F).
13736 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
13737                                                DAGCombinerInfo &DCI) const {
13738 
13739   SelectionDAG &DAG = DCI.DAG;
13740   SDLoc dl(N);
13741   unsigned Opcode = N->getOperand(1).getOpcode();
13742 
13743   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
13744          && "Not a FP_TO_INT Instruction!");
13745 
13746   SDValue Val = N->getOperand(1).getOperand(0);
13747   EVT Op1VT = N->getOperand(1).getValueType();
13748   EVT ResVT = Val.getValueType();
13749 
13750   // Floating point types smaller than 32 bits are not legal on Power.
13751   if (ResVT.getScalarSizeInBits() < 32)
13752     return SDValue();
13753 
13754   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
13755   bool ValidTypeForStoreFltAsInt =
13756         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
13757          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
13758 
13759   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
13760       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
13761     return SDValue();
13762 
13763   // Extend f32 values to f64
13764   if (ResVT.getScalarSizeInBits() == 32) {
13765     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
13766     DCI.AddToWorklist(Val.getNode());
13767   }
13768 
13769   // Set signed or unsigned conversion opcode.
13770   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
13771                           PPCISD::FP_TO_SINT_IN_VSR :
13772                           PPCISD::FP_TO_UINT_IN_VSR;
13773 
13774   Val = DAG.getNode(ConvOpcode,
13775                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
13776   DCI.AddToWorklist(Val.getNode());
13777 
13778   // Set number of bytes being converted.
13779   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
13780   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
13781                     DAG.getIntPtrConstant(ByteSize, dl, false),
13782                     DAG.getValueType(Op1VT) };
13783 
13784   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
13785           DAG.getVTList(MVT::Other), Ops,
13786           cast<StoreSDNode>(N)->getMemoryVT(),
13787           cast<StoreSDNode>(N)->getMemOperand());
13788 
13789   DCI.AddToWorklist(Val.getNode());
13790   return Val;
13791 }
13792 
13793 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
13794   // Check that the source of the element keeps flipping
13795   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
13796   bool PrevElemFromFirstVec = Mask[0] < NumElts;
13797   for (int i = 1, e = Mask.size(); i < e; i++) {
13798     if (PrevElemFromFirstVec && Mask[i] < NumElts)
13799       return false;
13800     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
13801       return false;
13802     PrevElemFromFirstVec = !PrevElemFromFirstVec;
13803   }
13804   return true;
13805 }
13806 
13807 static bool isSplatBV(SDValue Op) {
13808   if (Op.getOpcode() != ISD::BUILD_VECTOR)
13809     return false;
13810   SDValue FirstOp;
13811 
13812   // Find first non-undef input.
13813   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
13814     FirstOp = Op.getOperand(i);
13815     if (!FirstOp.isUndef())
13816       break;
13817   }
13818 
13819   // All inputs are undef or the same as the first non-undef input.
13820   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
13821     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
13822       return false;
13823   return true;
13824 }
13825 
13826 static SDValue isScalarToVec(SDValue Op) {
13827   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
13828     return Op;
13829   if (Op.getOpcode() != ISD::BITCAST)
13830     return SDValue();
13831   Op = Op.getOperand(0);
13832   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
13833     return Op;
13834   return SDValue();
13835 }
13836 
13837 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
13838                                             int LHSMaxIdx, int RHSMinIdx,
13839                                             int RHSMaxIdx, int HalfVec) {
13840   for (int i = 0, e = ShuffV.size(); i < e; i++) {
13841     int Idx = ShuffV[i];
13842     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
13843       ShuffV[i] += HalfVec;
13844   }
13845   return;
13846 }
13847 
13848 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
13849 // the original is:
13850 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
13851 // In such a case, just change the shuffle mask to extract the element
13852 // from the permuted index.
13853 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
13854   SDLoc dl(OrigSToV);
13855   EVT VT = OrigSToV.getValueType();
13856   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
13857          "Expecting a SCALAR_TO_VECTOR here");
13858   SDValue Input = OrigSToV.getOperand(0);
13859 
13860   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
13861     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
13862     SDValue OrigVector = Input.getOperand(0);
13863 
13864     // Can't handle non-const element indices or different vector types
13865     // for the input to the extract and the output of the scalar_to_vector.
13866     if (Idx && VT == OrigVector.getValueType()) {
13867       SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
13868       NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
13869       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
13870     }
13871   }
13872   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
13873                      OrigSToV.getOperand(0));
13874 }
13875 
13876 // On little endian subtargets, combine shuffles such as:
13877 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
13878 // into:
13879 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
13880 // because the latter can be matched to a single instruction merge.
13881 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
13882 // to put the value into element zero. Adjust the shuffle mask so that the
13883 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
13884 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
13885                                                 SelectionDAG &DAG) const {
13886   SDValue LHS = SVN->getOperand(0);
13887   SDValue RHS = SVN->getOperand(1);
13888   auto Mask = SVN->getMask();
13889   int NumElts = LHS.getValueType().getVectorNumElements();
13890   SDValue Res(SVN, 0);
13891   SDLoc dl(SVN);
13892 
13893   // None of these combines are useful on big endian systems since the ISA
13894   // already has a big endian bias.
13895   if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
13896     return Res;
13897 
13898   // If this is not a shuffle of a shuffle and the first element comes from
13899   // the second vector, canonicalize to the commuted form. This will make it
13900   // more likely to match one of the single instruction patterns.
13901   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
13902       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
13903     std::swap(LHS, RHS);
13904     Res = DAG.getCommutedVectorShuffle(*SVN);
13905     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
13906   }
13907 
13908   // Adjust the shuffle mask if either input vector comes from a
13909   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
13910   // form (to prevent the need for a swap).
13911   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
13912   SDValue SToVLHS = isScalarToVec(LHS);
13913   SDValue SToVRHS = isScalarToVec(RHS);
13914   if (SToVLHS || SToVRHS) {
13915     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
13916                             : SToVRHS.getValueType().getVectorNumElements();
13917     int NumEltsOut = ShuffV.size();
13918 
13919     // Initially assume that neither input is permuted. These will be adjusted
13920     // accordingly if either input is.
13921     int LHSMaxIdx = -1;
13922     int RHSMinIdx = -1;
13923     int RHSMaxIdx = -1;
13924     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
13925 
13926     // Get the permuted scalar to vector nodes for the source(s) that come from
13927     // ISD::SCALAR_TO_VECTOR.
13928     if (SToVLHS) {
13929       // Set up the values for the shuffle vector fixup.
13930       LHSMaxIdx = NumEltsOut / NumEltsIn;
13931       SToVLHS = getSToVPermuted(SToVLHS, DAG);
13932       if (SToVLHS.getValueType() != LHS.getValueType())
13933         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
13934       LHS = SToVLHS;
13935     }
13936     if (SToVRHS) {
13937       RHSMinIdx = NumEltsOut;
13938       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
13939       SToVRHS = getSToVPermuted(SToVRHS, DAG);
13940       if (SToVRHS.getValueType() != RHS.getValueType())
13941         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
13942       RHS = SToVRHS;
13943     }
13944 
13945     // Fix up the shuffle mask to reflect where the desired element actually is.
13946     // The minimum and maximum indices that correspond to element zero for both
13947     // the LHS and RHS are computed and will control which shuffle mask entries
13948     // are to be changed. For example, if the RHS is permuted, any shuffle mask
13949     // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
13950     // HalfVec to refer to the corresponding element in the permuted vector.
13951     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
13952                                     HalfVec);
13953     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
13954 
13955     // We may have simplified away the shuffle. We won't be able to do anything
13956     // further with it here.
13957     if (!isa<ShuffleVectorSDNode>(Res))
13958       return Res;
13959     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
13960   }
13961 
13962   // The common case after we commuted the shuffle is that the RHS is a splat
13963   // and we have elements coming in from the splat at indices that are not
13964   // conducive to using a merge.
13965   // Example:
13966   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
13967   if (!isSplatBV(RHS))
13968     return Res;
13969 
13970   // We are looking for a mask such that all even elements are from
13971   // one vector and all odd elements from the other.
13972   if (!isAlternatingShuffMask(Mask, NumElts))
13973     return Res;
13974 
13975   // Adjust the mask so we are pulling in the same index from the splat
13976   // as the index from the interesting vector in consecutive elements.
13977   // Example (even elements from first vector):
13978   // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
13979   if (Mask[0] < NumElts)
13980     for (int i = 1, e = Mask.size(); i < e; i += 2)
13981       ShuffV[i] = (ShuffV[i - 1] + NumElts);
13982   // Example (odd elements from first vector):
13983   // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
13984   else
13985     for (int i = 0, e = Mask.size(); i < e; i += 2)
13986       ShuffV[i] = (ShuffV[i + 1] + NumElts);
13987 
13988   // If the RHS has undefs, we need to remove them since we may have created
13989   // a shuffle that adds those instead of the splat value.
13990   SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
13991   RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
13992 
13993   Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
13994   return Res;
13995 }
13996 
13997 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
13998                                                 LSBaseSDNode *LSBase,
13999                                                 DAGCombinerInfo &DCI) const {
14000   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14001         "Not a reverse memop pattern!");
14002 
14003   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14004     auto Mask = SVN->getMask();
14005     int i = 0;
14006     auto I = Mask.rbegin();
14007     auto E = Mask.rend();
14008 
14009     for (; I != E; ++I) {
14010       if (*I != i)
14011         return false;
14012       i++;
14013     }
14014     return true;
14015   };
14016 
14017   SelectionDAG &DAG = DCI.DAG;
14018   EVT VT = SVN->getValueType(0);
14019 
14020   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14021     return SDValue();
14022 
14023   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14024   // See comment in PPCVSXSwapRemoval.cpp.
14025   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14026   if (!Subtarget.hasP9Vector())
14027     return SDValue();
14028 
14029   if(!IsElementReverse(SVN))
14030     return SDValue();
14031 
14032   if (LSBase->getOpcode() == ISD::LOAD) {
14033     SDLoc dl(SVN);
14034     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14035     return DAG.getMemIntrinsicNode(
14036         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14037         LSBase->getMemoryVT(), LSBase->getMemOperand());
14038   }
14039 
14040   if (LSBase->getOpcode() == ISD::STORE) {
14041     SDLoc dl(LSBase);
14042     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14043                           LSBase->getBasePtr()};
14044     return DAG.getMemIntrinsicNode(
14045         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14046         LSBase->getMemoryVT(), LSBase->getMemOperand());
14047   }
14048 
14049   llvm_unreachable("Expected a load or store node here");
14050 }
14051 
14052 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14053                                              DAGCombinerInfo &DCI) const {
14054   SelectionDAG &DAG = DCI.DAG;
14055   SDLoc dl(N);
14056   switch (N->getOpcode()) {
14057   default: break;
14058   case ISD::ADD:
14059     return combineADD(N, DCI);
14060   case ISD::SHL:
14061     return combineSHL(N, DCI);
14062   case ISD::SRA:
14063     return combineSRA(N, DCI);
14064   case ISD::SRL:
14065     return combineSRL(N, DCI);
14066   case ISD::MUL:
14067     return combineMUL(N, DCI);
14068   case ISD::FMA:
14069   case PPCISD::FNMSUB:
14070     return combineFMALike(N, DCI);
14071   case PPCISD::SHL:
14072     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14073         return N->getOperand(0);
14074     break;
14075   case PPCISD::SRL:
14076     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14077         return N->getOperand(0);
14078     break;
14079   case PPCISD::SRA:
14080     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14081       if (C->isNullValue() ||   //  0 >>s V -> 0.
14082           C->isAllOnesValue())    // -1 >>s V -> -1.
14083         return N->getOperand(0);
14084     }
14085     break;
14086   case ISD::SIGN_EXTEND:
14087   case ISD::ZERO_EXTEND:
14088   case ISD::ANY_EXTEND:
14089     return DAGCombineExtBoolTrunc(N, DCI);
14090   case ISD::TRUNCATE:
14091     return combineTRUNCATE(N, DCI);
14092   case ISD::SETCC:
14093     if (SDValue CSCC = combineSetCC(N, DCI))
14094       return CSCC;
14095     LLVM_FALLTHROUGH;
14096   case ISD::SELECT_CC:
14097     return DAGCombineTruncBoolExt(N, DCI);
14098   case ISD::SINT_TO_FP:
14099   case ISD::UINT_TO_FP:
14100     return combineFPToIntToFP(N, DCI);
14101   case ISD::VECTOR_SHUFFLE:
14102     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14103       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14104       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14105     }
14106     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14107   case ISD::STORE: {
14108 
14109     EVT Op1VT = N->getOperand(1).getValueType();
14110     unsigned Opcode = N->getOperand(1).getOpcode();
14111 
14112     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14113       SDValue Val= combineStoreFPToInt(N, DCI);
14114       if (Val)
14115         return Val;
14116     }
14117 
14118     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14119       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14120       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14121       if (Val)
14122         return Val;
14123     }
14124 
14125     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14126     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14127         N->getOperand(1).getNode()->hasOneUse() &&
14128         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14129          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14130 
14131       // STBRX can only handle simple types and it makes no sense to store less
14132       // two bytes in byte-reversed order.
14133       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14134       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14135         break;
14136 
14137       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14138       // Do an any-extend to 32-bits if this is a half-word input.
14139       if (BSwapOp.getValueType() == MVT::i16)
14140         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14141 
14142       // If the type of BSWAP operand is wider than stored memory width
14143       // it need to be shifted to the right side before STBRX.
14144       if (Op1VT.bitsGT(mVT)) {
14145         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14146         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14147                               DAG.getConstant(Shift, dl, MVT::i32));
14148         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14149         if (Op1VT == MVT::i64)
14150           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14151       }
14152 
14153       SDValue Ops[] = {
14154         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14155       };
14156       return
14157         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14158                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14159                                 cast<StoreSDNode>(N)->getMemOperand());
14160     }
14161 
14162     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14163     // So it can increase the chance of CSE constant construction.
14164     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14165         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14166       // Need to sign-extended to 64-bits to handle negative values.
14167       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14168       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14169                                     MemVT.getSizeInBits());
14170       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14171 
14172       // DAG.getTruncStore() can't be used here because it doesn't accept
14173       // the general (base + offset) addressing mode.
14174       // So we use UpdateNodeOperands and setTruncatingStore instead.
14175       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14176                              N->getOperand(3));
14177       cast<StoreSDNode>(N)->setTruncatingStore(true);
14178       return SDValue(N, 0);
14179     }
14180 
14181     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14182     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14183     if (Op1VT.isSimple()) {
14184       MVT StoreVT = Op1VT.getSimpleVT();
14185       if (Subtarget.needsSwapsForVSXMemOps() &&
14186           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14187            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14188         return expandVSXStoreForLE(N, DCI);
14189     }
14190     break;
14191   }
14192   case ISD::LOAD: {
14193     LoadSDNode *LD = cast<LoadSDNode>(N);
14194     EVT VT = LD->getValueType(0);
14195 
14196     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14197     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14198     if (VT.isSimple()) {
14199       MVT LoadVT = VT.getSimpleVT();
14200       if (Subtarget.needsSwapsForVSXMemOps() &&
14201           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14202            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14203         return expandVSXLoadForLE(N, DCI);
14204     }
14205 
14206     // We sometimes end up with a 64-bit integer load, from which we extract
14207     // two single-precision floating-point numbers. This happens with
14208     // std::complex<float>, and other similar structures, because of the way we
14209     // canonicalize structure copies. However, if we lack direct moves,
14210     // then the final bitcasts from the extracted integer values to the
14211     // floating-point numbers turn into store/load pairs. Even with direct moves,
14212     // just loading the two floating-point numbers is likely better.
14213     auto ReplaceTwoFloatLoad = [&]() {
14214       if (VT != MVT::i64)
14215         return false;
14216 
14217       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14218           LD->isVolatile())
14219         return false;
14220 
14221       //  We're looking for a sequence like this:
14222       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14223       //      t16: i64 = srl t13, Constant:i32<32>
14224       //    t17: i32 = truncate t16
14225       //  t18: f32 = bitcast t17
14226       //    t19: i32 = truncate t13
14227       //  t20: f32 = bitcast t19
14228 
14229       if (!LD->hasNUsesOfValue(2, 0))
14230         return false;
14231 
14232       auto UI = LD->use_begin();
14233       while (UI.getUse().getResNo() != 0) ++UI;
14234       SDNode *Trunc = *UI++;
14235       while (UI.getUse().getResNo() != 0) ++UI;
14236       SDNode *RightShift = *UI;
14237       if (Trunc->getOpcode() != ISD::TRUNCATE)
14238         std::swap(Trunc, RightShift);
14239 
14240       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14241           Trunc->getValueType(0) != MVT::i32 ||
14242           !Trunc->hasOneUse())
14243         return false;
14244       if (RightShift->getOpcode() != ISD::SRL ||
14245           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14246           RightShift->getConstantOperandVal(1) != 32 ||
14247           !RightShift->hasOneUse())
14248         return false;
14249 
14250       SDNode *Trunc2 = *RightShift->use_begin();
14251       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14252           Trunc2->getValueType(0) != MVT::i32 ||
14253           !Trunc2->hasOneUse())
14254         return false;
14255 
14256       SDNode *Bitcast = *Trunc->use_begin();
14257       SDNode *Bitcast2 = *Trunc2->use_begin();
14258 
14259       if (Bitcast->getOpcode() != ISD::BITCAST ||
14260           Bitcast->getValueType(0) != MVT::f32)
14261         return false;
14262       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14263           Bitcast2->getValueType(0) != MVT::f32)
14264         return false;
14265 
14266       if (Subtarget.isLittleEndian())
14267         std::swap(Bitcast, Bitcast2);
14268 
14269       // Bitcast has the second float (in memory-layout order) and Bitcast2
14270       // has the first one.
14271 
14272       SDValue BasePtr = LD->getBasePtr();
14273       if (LD->isIndexed()) {
14274         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14275                "Non-pre-inc AM on PPC?");
14276         BasePtr =
14277           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14278                       LD->getOffset());
14279       }
14280 
14281       auto MMOFlags =
14282           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14283       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14284                                       LD->getPointerInfo(), LD->getAlignment(),
14285                                       MMOFlags, LD->getAAInfo());
14286       SDValue AddPtr =
14287         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14288                     BasePtr, DAG.getIntPtrConstant(4, dl));
14289       SDValue FloatLoad2 = DAG.getLoad(
14290           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14291           LD->getPointerInfo().getWithOffset(4),
14292           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14293 
14294       if (LD->isIndexed()) {
14295         // Note that DAGCombine should re-form any pre-increment load(s) from
14296         // what is produced here if that makes sense.
14297         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14298       }
14299 
14300       DCI.CombineTo(Bitcast2, FloatLoad);
14301       DCI.CombineTo(Bitcast, FloatLoad2);
14302 
14303       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14304                                     SDValue(FloatLoad2.getNode(), 1));
14305       return true;
14306     };
14307 
14308     if (ReplaceTwoFloatLoad())
14309       return SDValue(N, 0);
14310 
14311     EVT MemVT = LD->getMemoryVT();
14312     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
14313     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
14314     if (LD->isUnindexed() && VT.isVector() &&
14315         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
14316           // P8 and later hardware should just use LOAD.
14317           !Subtarget.hasP8Vector() &&
14318           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
14319            VT == MVT::v4f32))) &&
14320         LD->getAlign() < ABIAlignment) {
14321       // This is a type-legal unaligned Altivec load.
14322       SDValue Chain = LD->getChain();
14323       SDValue Ptr = LD->getBasePtr();
14324       bool isLittleEndian = Subtarget.isLittleEndian();
14325 
14326       // This implements the loading of unaligned vectors as described in
14327       // the venerable Apple Velocity Engine overview. Specifically:
14328       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
14329       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
14330       //
14331       // The general idea is to expand a sequence of one or more unaligned
14332       // loads into an alignment-based permutation-control instruction (lvsl
14333       // or lvsr), a series of regular vector loads (which always truncate
14334       // their input address to an aligned address), and a series of
14335       // permutations.  The results of these permutations are the requested
14336       // loaded values.  The trick is that the last "extra" load is not taken
14337       // from the address you might suspect (sizeof(vector) bytes after the
14338       // last requested load), but rather sizeof(vector) - 1 bytes after the
14339       // last requested vector. The point of this is to avoid a page fault if
14340       // the base address happened to be aligned. This works because if the
14341       // base address is aligned, then adding less than a full vector length
14342       // will cause the last vector in the sequence to be (re)loaded.
14343       // Otherwise, the next vector will be fetched as you might suspect was
14344       // necessary.
14345 
14346       // We might be able to reuse the permutation generation from
14347       // a different base address offset from this one by an aligned amount.
14348       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14349       // optimization later.
14350       Intrinsic::ID Intr, IntrLD, IntrPerm;
14351       MVT PermCntlTy, PermTy, LDTy;
14352       Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14353                             : Intrinsic::ppc_altivec_lvsl;
14354       IntrLD = Intrinsic::ppc_altivec_lvx;
14355       IntrPerm = Intrinsic::ppc_altivec_vperm;
14356       PermCntlTy = MVT::v16i8;
14357       PermTy = MVT::v4i32;
14358       LDTy = MVT::v4i32;
14359 
14360       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14361 
14362       // Create the new MMO for the new base load. It is like the original MMO,
14363       // but represents an area in memory almost twice the vector size centered
14364       // on the original address. If the address is unaligned, we might start
14365       // reading up to (sizeof(vector)-1) bytes below the address of the
14366       // original unaligned load.
14367       MachineFunction &MF = DAG.getMachineFunction();
14368       MachineMemOperand *BaseMMO =
14369         MF.getMachineMemOperand(LD->getMemOperand(),
14370                                 -(long)MemVT.getStoreSize()+1,
14371                                 2*MemVT.getStoreSize()-1);
14372 
14373       // Create the new base load.
14374       SDValue LDXIntID =
14375           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14376       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14377       SDValue BaseLoad =
14378         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14379                                 DAG.getVTList(PermTy, MVT::Other),
14380                                 BaseLoadOps, LDTy, BaseMMO);
14381 
14382       // Note that the value of IncOffset (which is provided to the next
14383       // load's pointer info offset value, and thus used to calculate the
14384       // alignment), and the value of IncValue (which is actually used to
14385       // increment the pointer value) are different! This is because we
14386       // require the next load to appear to be aligned, even though it
14387       // is actually offset from the base pointer by a lesser amount.
14388       int IncOffset = VT.getSizeInBits() / 8;
14389       int IncValue = IncOffset;
14390 
14391       // Walk (both up and down) the chain looking for another load at the real
14392       // (aligned) offset (the alignment of the other load does not matter in
14393       // this case). If found, then do not use the offset reduction trick, as
14394       // that will prevent the loads from being later combined (as they would
14395       // otherwise be duplicates).
14396       if (!findConsecutiveLoad(LD, DAG))
14397         --IncValue;
14398 
14399       SDValue Increment =
14400           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14401       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14402 
14403       MachineMemOperand *ExtraMMO =
14404         MF.getMachineMemOperand(LD->getMemOperand(),
14405                                 1, 2*MemVT.getStoreSize()-1);
14406       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14407       SDValue ExtraLoad =
14408         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14409                                 DAG.getVTList(PermTy, MVT::Other),
14410                                 ExtraLoadOps, LDTy, ExtraMMO);
14411 
14412       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14413         BaseLoad.getValue(1), ExtraLoad.getValue(1));
14414 
14415       // Because vperm has a big-endian bias, we must reverse the order
14416       // of the input vectors and complement the permute control vector
14417       // when generating little endian code.  We have already handled the
14418       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14419       // and ExtraLoad here.
14420       SDValue Perm;
14421       if (isLittleEndian)
14422         Perm = BuildIntrinsicOp(IntrPerm,
14423                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14424       else
14425         Perm = BuildIntrinsicOp(IntrPerm,
14426                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14427 
14428       if (VT != PermTy)
14429         Perm = Subtarget.hasAltivec()
14430                    ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
14431                    : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
14432                                  DAG.getTargetConstant(1, dl, MVT::i64));
14433                                // second argument is 1 because this rounding
14434                                // is always exact.
14435 
14436       // The output of the permutation is our loaded result, the TokenFactor is
14437       // our new chain.
14438       DCI.CombineTo(N, Perm, TF);
14439       return SDValue(N, 0);
14440     }
14441     }
14442     break;
14443     case ISD::INTRINSIC_WO_CHAIN: {
14444       bool isLittleEndian = Subtarget.isLittleEndian();
14445       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14446       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14447                                            : Intrinsic::ppc_altivec_lvsl);
14448       if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
14449         SDValue Add = N->getOperand(1);
14450 
14451         int Bits = 4 /* 16 byte alignment */;
14452 
14453         if (DAG.MaskedValueIsZero(Add->getOperand(1),
14454                                   APInt::getAllOnesValue(Bits /* alignment */)
14455                                       .zext(Add.getScalarValueSizeInBits()))) {
14456           SDNode *BasePtr = Add->getOperand(0).getNode();
14457           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14458                                     UE = BasePtr->use_end();
14459                UI != UE; ++UI) {
14460             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14461                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
14462                     IID) {
14463               // We've found another LVSL/LVSR, and this address is an aligned
14464               // multiple of that one. The results will be the same, so use the
14465               // one we've just found instead.
14466 
14467               return SDValue(*UI, 0);
14468             }
14469           }
14470         }
14471 
14472         if (isa<ConstantSDNode>(Add->getOperand(1))) {
14473           SDNode *BasePtr = Add->getOperand(0).getNode();
14474           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14475                UE = BasePtr->use_end(); UI != UE; ++UI) {
14476             if (UI->getOpcode() == ISD::ADD &&
14477                 isa<ConstantSDNode>(UI->getOperand(1)) &&
14478                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
14479                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
14480                 (1ULL << Bits) == 0) {
14481               SDNode *OtherAdd = *UI;
14482               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
14483                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
14484                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14485                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
14486                   return SDValue(*VI, 0);
14487                 }
14488               }
14489             }
14490           }
14491         }
14492       }
14493 
14494       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
14495       // Expose the vabsduw/h/b opportunity for down stream
14496       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
14497           (IID == Intrinsic::ppc_altivec_vmaxsw ||
14498            IID == Intrinsic::ppc_altivec_vmaxsh ||
14499            IID == Intrinsic::ppc_altivec_vmaxsb)) {
14500         SDValue V1 = N->getOperand(1);
14501         SDValue V2 = N->getOperand(2);
14502         if ((V1.getSimpleValueType() == MVT::v4i32 ||
14503              V1.getSimpleValueType() == MVT::v8i16 ||
14504              V1.getSimpleValueType() == MVT::v16i8) &&
14505             V1.getSimpleValueType() == V2.getSimpleValueType()) {
14506           // (0-a, a)
14507           if (V1.getOpcode() == ISD::SUB &&
14508               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
14509               V1.getOperand(1) == V2) {
14510             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
14511           }
14512           // (a, 0-a)
14513           if (V2.getOpcode() == ISD::SUB &&
14514               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
14515               V2.getOperand(1) == V1) {
14516             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14517           }
14518           // (x-y, y-x)
14519           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
14520               V1.getOperand(0) == V2.getOperand(1) &&
14521               V1.getOperand(1) == V2.getOperand(0)) {
14522             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14523           }
14524         }
14525       }
14526     }
14527 
14528     break;
14529   case ISD::INTRINSIC_W_CHAIN:
14530     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14531     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14532     if (Subtarget.needsSwapsForVSXMemOps()) {
14533       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14534       default:
14535         break;
14536       case Intrinsic::ppc_vsx_lxvw4x:
14537       case Intrinsic::ppc_vsx_lxvd2x:
14538         return expandVSXLoadForLE(N, DCI);
14539       }
14540     }
14541     break;
14542   case ISD::INTRINSIC_VOID:
14543     // For little endian, VSX stores require generating xxswapd/stxvd2x.
14544     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14545     if (Subtarget.needsSwapsForVSXMemOps()) {
14546       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14547       default:
14548         break;
14549       case Intrinsic::ppc_vsx_stxvw4x:
14550       case Intrinsic::ppc_vsx_stxvd2x:
14551         return expandVSXStoreForLE(N, DCI);
14552       }
14553     }
14554     break;
14555   case ISD::BSWAP:
14556     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
14557     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
14558         N->getOperand(0).hasOneUse() &&
14559         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
14560          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
14561           N->getValueType(0) == MVT::i64))) {
14562       SDValue Load = N->getOperand(0);
14563       LoadSDNode *LD = cast<LoadSDNode>(Load);
14564       // Create the byte-swapping load.
14565       SDValue Ops[] = {
14566         LD->getChain(),    // Chain
14567         LD->getBasePtr(),  // Ptr
14568         DAG.getValueType(N->getValueType(0)) // VT
14569       };
14570       SDValue BSLoad =
14571         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
14572                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
14573                                               MVT::i64 : MVT::i32, MVT::Other),
14574                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
14575 
14576       // If this is an i16 load, insert the truncate.
14577       SDValue ResVal = BSLoad;
14578       if (N->getValueType(0) == MVT::i16)
14579         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
14580 
14581       // First, combine the bswap away.  This makes the value produced by the
14582       // load dead.
14583       DCI.CombineTo(N, ResVal);
14584 
14585       // Next, combine the load away, we give it a bogus result value but a real
14586       // chain result.  The result value is dead because the bswap is dead.
14587       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
14588 
14589       // Return N so it doesn't get rechecked!
14590       return SDValue(N, 0);
14591     }
14592     break;
14593   case PPCISD::VCMP:
14594     // If a VCMPo node already exists with exactly the same operands as this
14595     // node, use its result instead of this node (VCMPo computes both a CR6 and
14596     // a normal output).
14597     //
14598     if (!N->getOperand(0).hasOneUse() &&
14599         !N->getOperand(1).hasOneUse() &&
14600         !N->getOperand(2).hasOneUse()) {
14601 
14602       // Scan all of the users of the LHS, looking for VCMPo's that match.
14603       SDNode *VCMPoNode = nullptr;
14604 
14605       SDNode *LHSN = N->getOperand(0).getNode();
14606       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
14607            UI != E; ++UI)
14608         if (UI->getOpcode() == PPCISD::VCMPo &&
14609             UI->getOperand(1) == N->getOperand(1) &&
14610             UI->getOperand(2) == N->getOperand(2) &&
14611             UI->getOperand(0) == N->getOperand(0)) {
14612           VCMPoNode = *UI;
14613           break;
14614         }
14615 
14616       // If there is no VCMPo node, or if the flag value has a single use, don't
14617       // transform this.
14618       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
14619         break;
14620 
14621       // Look at the (necessarily single) use of the flag value.  If it has a
14622       // chain, this transformation is more complex.  Note that multiple things
14623       // could use the value result, which we should ignore.
14624       SDNode *FlagUser = nullptr;
14625       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
14626            FlagUser == nullptr; ++UI) {
14627         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
14628         SDNode *User = *UI;
14629         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
14630           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
14631             FlagUser = User;
14632             break;
14633           }
14634         }
14635       }
14636 
14637       // If the user is a MFOCRF instruction, we know this is safe.
14638       // Otherwise we give up for right now.
14639       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
14640         return SDValue(VCMPoNode, 0);
14641     }
14642     break;
14643   case ISD::BRCOND: {
14644     SDValue Cond = N->getOperand(1);
14645     SDValue Target = N->getOperand(2);
14646 
14647     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14648         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
14649           Intrinsic::loop_decrement) {
14650 
14651       // We now need to make the intrinsic dead (it cannot be instruction
14652       // selected).
14653       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
14654       assert(Cond.getNode()->hasOneUse() &&
14655              "Counter decrement has more than one use");
14656 
14657       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
14658                          N->getOperand(0), Target);
14659     }
14660   }
14661   break;
14662   case ISD::BR_CC: {
14663     // If this is a branch on an altivec predicate comparison, lower this so
14664     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
14665     // lowering is done pre-legalize, because the legalizer lowers the predicate
14666     // compare down to code that is difficult to reassemble.
14667     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
14668     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
14669 
14670     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
14671     // value. If so, pass-through the AND to get to the intrinsic.
14672     if (LHS.getOpcode() == ISD::AND &&
14673         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14674         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
14675           Intrinsic::loop_decrement &&
14676         isa<ConstantSDNode>(LHS.getOperand(1)) &&
14677         !isNullConstant(LHS.getOperand(1)))
14678       LHS = LHS.getOperand(0);
14679 
14680     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14681         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
14682           Intrinsic::loop_decrement &&
14683         isa<ConstantSDNode>(RHS)) {
14684       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
14685              "Counter decrement comparison is not EQ or NE");
14686 
14687       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14688       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
14689                     (CC == ISD::SETNE && !Val);
14690 
14691       // We now need to make the intrinsic dead (it cannot be instruction
14692       // selected).
14693       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
14694       assert(LHS.getNode()->hasOneUse() &&
14695              "Counter decrement has more than one use");
14696 
14697       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
14698                          N->getOperand(0), N->getOperand(4));
14699     }
14700 
14701     int CompareOpc;
14702     bool isDot;
14703 
14704     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14705         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
14706         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
14707       assert(isDot && "Can't compare against a vector result!");
14708 
14709       // If this is a comparison against something other than 0/1, then we know
14710       // that the condition is never/always true.
14711       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14712       if (Val != 0 && Val != 1) {
14713         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
14714           return N->getOperand(0);
14715         // Always !=, turn it into an unconditional branch.
14716         return DAG.getNode(ISD::BR, dl, MVT::Other,
14717                            N->getOperand(0), N->getOperand(4));
14718       }
14719 
14720       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
14721 
14722       // Create the PPCISD altivec 'dot' comparison node.
14723       SDValue Ops[] = {
14724         LHS.getOperand(2),  // LHS of compare
14725         LHS.getOperand(3),  // RHS of compare
14726         DAG.getConstant(CompareOpc, dl, MVT::i32)
14727       };
14728       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
14729       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
14730 
14731       // Unpack the result based on how the target uses it.
14732       PPC::Predicate CompOpc;
14733       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
14734       default:  // Can't happen, don't crash on invalid number though.
14735       case 0:   // Branch on the value of the EQ bit of CR6.
14736         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
14737         break;
14738       case 1:   // Branch on the inverted value of the EQ bit of CR6.
14739         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
14740         break;
14741       case 2:   // Branch on the value of the LT bit of CR6.
14742         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
14743         break;
14744       case 3:   // Branch on the inverted value of the LT bit of CR6.
14745         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
14746         break;
14747       }
14748 
14749       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
14750                          DAG.getConstant(CompOpc, dl, MVT::i32),
14751                          DAG.getRegister(PPC::CR6, MVT::i32),
14752                          N->getOperand(4), CompNode.getValue(1));
14753     }
14754     break;
14755   }
14756   case ISD::BUILD_VECTOR:
14757     return DAGCombineBuildVector(N, DCI);
14758   case ISD::ABS:
14759     return combineABS(N, DCI);
14760   case ISD::VSELECT:
14761     return combineVSelect(N, DCI);
14762   }
14763 
14764   return SDValue();
14765 }
14766 
14767 SDValue
14768 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
14769                                  SelectionDAG &DAG,
14770                                  SmallVectorImpl<SDNode *> &Created) const {
14771   // fold (sdiv X, pow2)
14772   EVT VT = N->getValueType(0);
14773   if (VT == MVT::i64 && !Subtarget.isPPC64())
14774     return SDValue();
14775   if ((VT != MVT::i32 && VT != MVT::i64) ||
14776       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
14777     return SDValue();
14778 
14779   SDLoc DL(N);
14780   SDValue N0 = N->getOperand(0);
14781 
14782   bool IsNegPow2 = (-Divisor).isPowerOf2();
14783   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
14784   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
14785 
14786   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
14787   Created.push_back(Op.getNode());
14788 
14789   if (IsNegPow2) {
14790     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
14791     Created.push_back(Op.getNode());
14792   }
14793 
14794   return Op;
14795 }
14796 
14797 //===----------------------------------------------------------------------===//
14798 // Inline Assembly Support
14799 //===----------------------------------------------------------------------===//
14800 
14801 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
14802                                                       KnownBits &Known,
14803                                                       const APInt &DemandedElts,
14804                                                       const SelectionDAG &DAG,
14805                                                       unsigned Depth) const {
14806   Known.resetAll();
14807   switch (Op.getOpcode()) {
14808   default: break;
14809   case PPCISD::LBRX: {
14810     // lhbrx is known to have the top bits cleared out.
14811     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
14812       Known.Zero = 0xFFFF0000;
14813     break;
14814   }
14815   case ISD::INTRINSIC_WO_CHAIN: {
14816     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
14817     default: break;
14818     case Intrinsic::ppc_altivec_vcmpbfp_p:
14819     case Intrinsic::ppc_altivec_vcmpeqfp_p:
14820     case Intrinsic::ppc_altivec_vcmpequb_p:
14821     case Intrinsic::ppc_altivec_vcmpequh_p:
14822     case Intrinsic::ppc_altivec_vcmpequw_p:
14823     case Intrinsic::ppc_altivec_vcmpequd_p:
14824     case Intrinsic::ppc_altivec_vcmpgefp_p:
14825     case Intrinsic::ppc_altivec_vcmpgtfp_p:
14826     case Intrinsic::ppc_altivec_vcmpgtsb_p:
14827     case Intrinsic::ppc_altivec_vcmpgtsh_p:
14828     case Intrinsic::ppc_altivec_vcmpgtsw_p:
14829     case Intrinsic::ppc_altivec_vcmpgtsd_p:
14830     case Intrinsic::ppc_altivec_vcmpgtub_p:
14831     case Intrinsic::ppc_altivec_vcmpgtuh_p:
14832     case Intrinsic::ppc_altivec_vcmpgtuw_p:
14833     case Intrinsic::ppc_altivec_vcmpgtud_p:
14834       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
14835       break;
14836     }
14837   }
14838   }
14839 }
14840 
14841 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
14842   switch (Subtarget.getCPUDirective()) {
14843   default: break;
14844   case PPC::DIR_970:
14845   case PPC::DIR_PWR4:
14846   case PPC::DIR_PWR5:
14847   case PPC::DIR_PWR5X:
14848   case PPC::DIR_PWR6:
14849   case PPC::DIR_PWR6X:
14850   case PPC::DIR_PWR7:
14851   case PPC::DIR_PWR8:
14852   case PPC::DIR_PWR9:
14853   case PPC::DIR_PWR10:
14854   case PPC::DIR_PWR_FUTURE: {
14855     if (!ML)
14856       break;
14857 
14858     if (!DisableInnermostLoopAlign32) {
14859       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
14860       // so that we can decrease cache misses and branch-prediction misses.
14861       // Actual alignment of the loop will depend on the hotness check and other
14862       // logic in alignBlocks.
14863       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
14864         return Align(32);
14865     }
14866 
14867     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
14868 
14869     // For small loops (between 5 and 8 instructions), align to a 32-byte
14870     // boundary so that the entire loop fits in one instruction-cache line.
14871     uint64_t LoopSize = 0;
14872     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
14873       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
14874         LoopSize += TII->getInstSizeInBytes(*J);
14875         if (LoopSize > 32)
14876           break;
14877       }
14878 
14879     if (LoopSize > 16 && LoopSize <= 32)
14880       return Align(32);
14881 
14882     break;
14883   }
14884   }
14885 
14886   return TargetLowering::getPrefLoopAlignment(ML);
14887 }
14888 
14889 /// getConstraintType - Given a constraint, return the type of
14890 /// constraint it is for this target.
14891 PPCTargetLowering::ConstraintType
14892 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
14893   if (Constraint.size() == 1) {
14894     switch (Constraint[0]) {
14895     default: break;
14896     case 'b':
14897     case 'r':
14898     case 'f':
14899     case 'd':
14900     case 'v':
14901     case 'y':
14902       return C_RegisterClass;
14903     case 'Z':
14904       // FIXME: While Z does indicate a memory constraint, it specifically
14905       // indicates an r+r address (used in conjunction with the 'y' modifier
14906       // in the replacement string). Currently, we're forcing the base
14907       // register to be r0 in the asm printer (which is interpreted as zero)
14908       // and forming the complete address in the second register. This is
14909       // suboptimal.
14910       return C_Memory;
14911     }
14912   } else if (Constraint == "wc") { // individual CR bits.
14913     return C_RegisterClass;
14914   } else if (Constraint == "wa" || Constraint == "wd" ||
14915              Constraint == "wf" || Constraint == "ws" ||
14916              Constraint == "wi" || Constraint == "ww") {
14917     return C_RegisterClass; // VSX registers.
14918   }
14919   return TargetLowering::getConstraintType(Constraint);
14920 }
14921 
14922 /// Examine constraint type and operand type and determine a weight value.
14923 /// This object must already have been set up with the operand type
14924 /// and the current alternative constraint selected.
14925 TargetLowering::ConstraintWeight
14926 PPCTargetLowering::getSingleConstraintMatchWeight(
14927     AsmOperandInfo &info, const char *constraint) const {
14928   ConstraintWeight weight = CW_Invalid;
14929   Value *CallOperandVal = info.CallOperandVal;
14930     // If we don't have a value, we can't do a match,
14931     // but allow it at the lowest weight.
14932   if (!CallOperandVal)
14933     return CW_Default;
14934   Type *type = CallOperandVal->getType();
14935 
14936   // Look at the constraint type.
14937   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
14938     return CW_Register; // an individual CR bit.
14939   else if ((StringRef(constraint) == "wa" ||
14940             StringRef(constraint) == "wd" ||
14941             StringRef(constraint) == "wf") &&
14942            type->isVectorTy())
14943     return CW_Register;
14944   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
14945     return CW_Register; // just hold 64-bit integers data.
14946   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
14947     return CW_Register;
14948   else if (StringRef(constraint) == "ww" && type->isFloatTy())
14949     return CW_Register;
14950 
14951   switch (*constraint) {
14952   default:
14953     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
14954     break;
14955   case 'b':
14956     if (type->isIntegerTy())
14957       weight = CW_Register;
14958     break;
14959   case 'f':
14960     if (type->isFloatTy())
14961       weight = CW_Register;
14962     break;
14963   case 'd':
14964     if (type->isDoubleTy())
14965       weight = CW_Register;
14966     break;
14967   case 'v':
14968     if (type->isVectorTy())
14969       weight = CW_Register;
14970     break;
14971   case 'y':
14972     weight = CW_Register;
14973     break;
14974   case 'Z':
14975     weight = CW_Memory;
14976     break;
14977   }
14978   return weight;
14979 }
14980 
14981 std::pair<unsigned, const TargetRegisterClass *>
14982 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
14983                                                 StringRef Constraint,
14984                                                 MVT VT) const {
14985   if (Constraint.size() == 1) {
14986     // GCC RS6000 Constraint Letters
14987     switch (Constraint[0]) {
14988     case 'b':   // R1-R31
14989       if (VT == MVT::i64 && Subtarget.isPPC64())
14990         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
14991       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
14992     case 'r':   // R0-R31
14993       if (VT == MVT::i64 && Subtarget.isPPC64())
14994         return std::make_pair(0U, &PPC::G8RCRegClass);
14995       return std::make_pair(0U, &PPC::GPRCRegClass);
14996     // 'd' and 'f' constraints are both defined to be "the floating point
14997     // registers", where one is for 32-bit and the other for 64-bit. We don't
14998     // really care overly much here so just give them all the same reg classes.
14999     case 'd':
15000     case 'f':
15001       if (Subtarget.hasSPE()) {
15002         if (VT == MVT::f32 || VT == MVT::i32)
15003           return std::make_pair(0U, &PPC::GPRCRegClass);
15004         if (VT == MVT::f64 || VT == MVT::i64)
15005           return std::make_pair(0U, &PPC::SPERCRegClass);
15006       } else {
15007         if (VT == MVT::f32 || VT == MVT::i32)
15008           return std::make_pair(0U, &PPC::F4RCRegClass);
15009         if (VT == MVT::f64 || VT == MVT::i64)
15010           return std::make_pair(0U, &PPC::F8RCRegClass);
15011       }
15012       break;
15013     case 'v':
15014       if (Subtarget.hasAltivec())
15015         return std::make_pair(0U, &PPC::VRRCRegClass);
15016       break;
15017     case 'y':   // crrc
15018       return std::make_pair(0U, &PPC::CRRCRegClass);
15019     }
15020   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15021     // An individual CR bit.
15022     return std::make_pair(0U, &PPC::CRBITRCRegClass);
15023   } else if ((Constraint == "wa" || Constraint == "wd" ||
15024              Constraint == "wf" || Constraint == "wi") &&
15025              Subtarget.hasVSX()) {
15026     return std::make_pair(0U, &PPC::VSRCRegClass);
15027   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15028     if (VT == MVT::f32 && Subtarget.hasP8Vector())
15029       return std::make_pair(0U, &PPC::VSSRCRegClass);
15030     else
15031       return std::make_pair(0U, &PPC::VSFRCRegClass);
15032   }
15033 
15034   // If we name a VSX register, we can't defer to the base class because it
15035   // will not recognize the correct register (their names will be VSL{0-31}
15036   // and V{0-31} so they won't match). So we match them here.
15037   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15038     int VSNum = atoi(Constraint.data() + 3);
15039     assert(VSNum >= 0 && VSNum <= 63 &&
15040            "Attempted to access a vsr out of range");
15041     if (VSNum < 32)
15042       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15043     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15044   }
15045   std::pair<unsigned, const TargetRegisterClass *> R =
15046       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15047 
15048   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15049   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15050   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15051   // register.
15052   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15053   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15054   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15055       PPC::GPRCRegClass.contains(R.first))
15056     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15057                             PPC::sub_32, &PPC::G8RCRegClass),
15058                           &PPC::G8RCRegClass);
15059 
15060   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15061   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15062     R.first = PPC::CR0;
15063     R.second = &PPC::CRRCRegClass;
15064   }
15065 
15066   return R;
15067 }
15068 
15069 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15070 /// vector.  If it is invalid, don't add anything to Ops.
15071 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15072                                                      std::string &Constraint,
15073                                                      std::vector<SDValue>&Ops,
15074                                                      SelectionDAG &DAG) const {
15075   SDValue Result;
15076 
15077   // Only support length 1 constraints.
15078   if (Constraint.length() > 1) return;
15079 
15080   char Letter = Constraint[0];
15081   switch (Letter) {
15082   default: break;
15083   case 'I':
15084   case 'J':
15085   case 'K':
15086   case 'L':
15087   case 'M':
15088   case 'N':
15089   case 'O':
15090   case 'P': {
15091     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15092     if (!CST) return; // Must be an immediate to match.
15093     SDLoc dl(Op);
15094     int64_t Value = CST->getSExtValue();
15095     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15096                          // numbers are printed as such.
15097     switch (Letter) {
15098     default: llvm_unreachable("Unknown constraint letter!");
15099     case 'I':  // "I" is a signed 16-bit constant.
15100       if (isInt<16>(Value))
15101         Result = DAG.getTargetConstant(Value, dl, TCVT);
15102       break;
15103     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15104       if (isShiftedUInt<16, 16>(Value))
15105         Result = DAG.getTargetConstant(Value, dl, TCVT);
15106       break;
15107     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15108       if (isShiftedInt<16, 16>(Value))
15109         Result = DAG.getTargetConstant(Value, dl, TCVT);
15110       break;
15111     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15112       if (isUInt<16>(Value))
15113         Result = DAG.getTargetConstant(Value, dl, TCVT);
15114       break;
15115     case 'M':  // "M" is a constant that is greater than 31.
15116       if (Value > 31)
15117         Result = DAG.getTargetConstant(Value, dl, TCVT);
15118       break;
15119     case 'N':  // "N" is a positive constant that is an exact power of two.
15120       if (Value > 0 && isPowerOf2_64(Value))
15121         Result = DAG.getTargetConstant(Value, dl, TCVT);
15122       break;
15123     case 'O':  // "O" is the constant zero.
15124       if (Value == 0)
15125         Result = DAG.getTargetConstant(Value, dl, TCVT);
15126       break;
15127     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15128       if (isInt<16>(-Value))
15129         Result = DAG.getTargetConstant(Value, dl, TCVT);
15130       break;
15131     }
15132     break;
15133   }
15134   }
15135 
15136   if (Result.getNode()) {
15137     Ops.push_back(Result);
15138     return;
15139   }
15140 
15141   // Handle standard constraint letters.
15142   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15143 }
15144 
15145 // isLegalAddressingMode - Return true if the addressing mode represented
15146 // by AM is legal for this target, for a load/store of the specified type.
15147 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15148                                               const AddrMode &AM, Type *Ty,
15149                                               unsigned AS,
15150                                               Instruction *I) const {
15151   // Vector type r+i form is supported since power9 as DQ form. We don't check
15152   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
15153   // imm form is preferred and the offset can be adjusted to use imm form later
15154   // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
15155   // max offset to check legal addressing mode, we should be a little aggressive
15156   // to contain other offsets for that LSRUse.
15157   if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
15158     return false;
15159 
15160   // PPC allows a sign-extended 16-bit immediate field.
15161   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15162     return false;
15163 
15164   // No global is ever allowed as a base.
15165   if (AM.BaseGV)
15166     return false;
15167 
15168   // PPC only support r+r,
15169   switch (AM.Scale) {
15170   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15171     break;
15172   case 1:
15173     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15174       return false;
15175     // Otherwise we have r+r or r+i.
15176     break;
15177   case 2:
15178     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15179       return false;
15180     // Allow 2*r as r+r.
15181     break;
15182   default:
15183     // No other scales are supported.
15184     return false;
15185   }
15186 
15187   return true;
15188 }
15189 
15190 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15191                                            SelectionDAG &DAG) const {
15192   MachineFunction &MF = DAG.getMachineFunction();
15193   MachineFrameInfo &MFI = MF.getFrameInfo();
15194   MFI.setReturnAddressIsTaken(true);
15195 
15196   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15197     return SDValue();
15198 
15199   SDLoc dl(Op);
15200   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15201 
15202   // Make sure the function does not optimize away the store of the RA to
15203   // the stack.
15204   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15205   FuncInfo->setLRStoreRequired();
15206   bool isPPC64 = Subtarget.isPPC64();
15207   auto PtrVT = getPointerTy(MF.getDataLayout());
15208 
15209   if (Depth > 0) {
15210     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15211     SDValue Offset =
15212         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15213                         isPPC64 ? MVT::i64 : MVT::i32);
15214     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15215                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15216                        MachinePointerInfo());
15217   }
15218 
15219   // Just load the return address off the stack.
15220   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15221   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15222                      MachinePointerInfo());
15223 }
15224 
15225 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15226                                           SelectionDAG &DAG) const {
15227   SDLoc dl(Op);
15228   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15229 
15230   MachineFunction &MF = DAG.getMachineFunction();
15231   MachineFrameInfo &MFI = MF.getFrameInfo();
15232   MFI.setFrameAddressIsTaken(true);
15233 
15234   EVT PtrVT = getPointerTy(MF.getDataLayout());
15235   bool isPPC64 = PtrVT == MVT::i64;
15236 
15237   // Naked functions never have a frame pointer, and so we use r1. For all
15238   // other functions, this decision must be delayed until during PEI.
15239   unsigned FrameReg;
15240   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15241     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15242   else
15243     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15244 
15245   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15246                                          PtrVT);
15247   while (Depth--)
15248     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15249                             FrameAddr, MachinePointerInfo());
15250   return FrameAddr;
15251 }
15252 
15253 // FIXME? Maybe this could be a TableGen attribute on some registers and
15254 // this table could be generated automatically from RegInfo.
15255 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15256                                               const MachineFunction &MF) const {
15257   bool isPPC64 = Subtarget.isPPC64();
15258 
15259   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15260   if (!is64Bit && VT != LLT::scalar(32))
15261     report_fatal_error("Invalid register global variable type");
15262 
15263   Register Reg = StringSwitch<Register>(RegName)
15264                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15265                      .Case("r2", isPPC64 ? Register() : PPC::R2)
15266                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15267                      .Default(Register());
15268 
15269   if (Reg)
15270     return Reg;
15271   report_fatal_error("Invalid register name global variable");
15272 }
15273 
15274 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15275   // 32-bit SVR4 ABI access everything as got-indirect.
15276   if (Subtarget.is32BitELFABI())
15277     return true;
15278 
15279   // AIX accesses everything indirectly through the TOC, which is similar to
15280   // the GOT.
15281   if (Subtarget.isAIXABI())
15282     return true;
15283 
15284   CodeModel::Model CModel = getTargetMachine().getCodeModel();
15285   // If it is small or large code model, module locals are accessed
15286   // indirectly by loading their address from .toc/.got.
15287   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15288     return true;
15289 
15290   // JumpTable and BlockAddress are accessed as got-indirect.
15291   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15292     return true;
15293 
15294   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15295     return Subtarget.isGVIndirectSymbol(G->getGlobal());
15296 
15297   return false;
15298 }
15299 
15300 bool
15301 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15302   // The PowerPC target isn't yet aware of offsets.
15303   return false;
15304 }
15305 
15306 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15307                                            const CallInst &I,
15308                                            MachineFunction &MF,
15309                                            unsigned Intrinsic) const {
15310   switch (Intrinsic) {
15311   case Intrinsic::ppc_altivec_lvx:
15312   case Intrinsic::ppc_altivec_lvxl:
15313   case Intrinsic::ppc_altivec_lvebx:
15314   case Intrinsic::ppc_altivec_lvehx:
15315   case Intrinsic::ppc_altivec_lvewx:
15316   case Intrinsic::ppc_vsx_lxvd2x:
15317   case Intrinsic::ppc_vsx_lxvw4x: {
15318     EVT VT;
15319     switch (Intrinsic) {
15320     case Intrinsic::ppc_altivec_lvebx:
15321       VT = MVT::i8;
15322       break;
15323     case Intrinsic::ppc_altivec_lvehx:
15324       VT = MVT::i16;
15325       break;
15326     case Intrinsic::ppc_altivec_lvewx:
15327       VT = MVT::i32;
15328       break;
15329     case Intrinsic::ppc_vsx_lxvd2x:
15330       VT = MVT::v2f64;
15331       break;
15332     default:
15333       VT = MVT::v4i32;
15334       break;
15335     }
15336 
15337     Info.opc = ISD::INTRINSIC_W_CHAIN;
15338     Info.memVT = VT;
15339     Info.ptrVal = I.getArgOperand(0);
15340     Info.offset = -VT.getStoreSize()+1;
15341     Info.size = 2*VT.getStoreSize()-1;
15342     Info.align = Align(1);
15343     Info.flags = MachineMemOperand::MOLoad;
15344     return true;
15345   }
15346   case Intrinsic::ppc_altivec_stvx:
15347   case Intrinsic::ppc_altivec_stvxl:
15348   case Intrinsic::ppc_altivec_stvebx:
15349   case Intrinsic::ppc_altivec_stvehx:
15350   case Intrinsic::ppc_altivec_stvewx:
15351   case Intrinsic::ppc_vsx_stxvd2x:
15352   case Intrinsic::ppc_vsx_stxvw4x: {
15353     EVT VT;
15354     switch (Intrinsic) {
15355     case Intrinsic::ppc_altivec_stvebx:
15356       VT = MVT::i8;
15357       break;
15358     case Intrinsic::ppc_altivec_stvehx:
15359       VT = MVT::i16;
15360       break;
15361     case Intrinsic::ppc_altivec_stvewx:
15362       VT = MVT::i32;
15363       break;
15364     case Intrinsic::ppc_vsx_stxvd2x:
15365       VT = MVT::v2f64;
15366       break;
15367     default:
15368       VT = MVT::v4i32;
15369       break;
15370     }
15371 
15372     Info.opc = ISD::INTRINSIC_VOID;
15373     Info.memVT = VT;
15374     Info.ptrVal = I.getArgOperand(1);
15375     Info.offset = -VT.getStoreSize()+1;
15376     Info.size = 2*VT.getStoreSize()-1;
15377     Info.align = Align(1);
15378     Info.flags = MachineMemOperand::MOStore;
15379     return true;
15380   }
15381   default:
15382     break;
15383   }
15384 
15385   return false;
15386 }
15387 
15388 /// It returns EVT::Other if the type should be determined using generic
15389 /// target-independent logic.
15390 EVT PPCTargetLowering::getOptimalMemOpType(
15391     const MemOp &Op, const AttributeList &FuncAttributes) const {
15392   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15393     // We should use Altivec/VSX loads and stores when available. For unaligned
15394     // addresses, unaligned VSX loads are only fast starting with the P8.
15395     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
15396         (Op.isAligned(Align(16)) ||
15397          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15398       return MVT::v4i32;
15399   }
15400 
15401   if (Subtarget.isPPC64()) {
15402     return MVT::i64;
15403   }
15404 
15405   return MVT::i32;
15406 }
15407 
15408 /// Returns true if it is beneficial to convert a load of a constant
15409 /// to just the constant itself.
15410 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15411                                                           Type *Ty) const {
15412   assert(Ty->isIntegerTy());
15413 
15414   unsigned BitSize = Ty->getPrimitiveSizeInBits();
15415   return !(BitSize == 0 || BitSize > 64);
15416 }
15417 
15418 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15419   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15420     return false;
15421   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
15422   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
15423   return NumBits1 == 64 && NumBits2 == 32;
15424 }
15425 
15426 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15427   if (!VT1.isInteger() || !VT2.isInteger())
15428     return false;
15429   unsigned NumBits1 = VT1.getSizeInBits();
15430   unsigned NumBits2 = VT2.getSizeInBits();
15431   return NumBits1 == 64 && NumBits2 == 32;
15432 }
15433 
15434 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15435   // Generally speaking, zexts are not free, but they are free when they can be
15436   // folded with other operations.
15437   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
15438     EVT MemVT = LD->getMemoryVT();
15439     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
15440          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
15441         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
15442          LD->getExtensionType() == ISD::ZEXTLOAD))
15443       return true;
15444   }
15445 
15446   // FIXME: Add other cases...
15447   //  - 32-bit shifts with a zext to i64
15448   //  - zext after ctlz, bswap, etc.
15449   //  - zext after and by a constant mask
15450 
15451   return TargetLowering::isZExtFree(Val, VT2);
15452 }
15453 
15454 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
15455   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
15456          "invalid fpext types");
15457   // Extending to float128 is not free.
15458   if (DestVT == MVT::f128)
15459     return false;
15460   return true;
15461 }
15462 
15463 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15464   return isInt<16>(Imm) || isUInt<16>(Imm);
15465 }
15466 
15467 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15468   return isInt<16>(Imm) || isUInt<16>(Imm);
15469 }
15470 
15471 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
15472                                                        unsigned,
15473                                                        unsigned,
15474                                                        MachineMemOperand::Flags,
15475                                                        bool *Fast) const {
15476   if (DisablePPCUnaligned)
15477     return false;
15478 
15479   // PowerPC supports unaligned memory access for simple non-vector types.
15480   // Although accessing unaligned addresses is not as efficient as accessing
15481   // aligned addresses, it is generally more efficient than manual expansion,
15482   // and generally only traps for software emulation when crossing page
15483   // boundaries.
15484 
15485   if (!VT.isSimple())
15486     return false;
15487 
15488   if (VT.isFloatingPoint() && !VT.isVector() &&
15489       !Subtarget.allowsUnalignedFPAccess())
15490     return false;
15491 
15492   if (VT.getSimpleVT().isVector()) {
15493     if (Subtarget.hasVSX()) {
15494       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
15495           VT != MVT::v4f32 && VT != MVT::v4i32)
15496         return false;
15497     } else {
15498       return false;
15499     }
15500   }
15501 
15502   if (VT == MVT::ppcf128)
15503     return false;
15504 
15505   if (Fast)
15506     *Fast = true;
15507 
15508   return true;
15509 }
15510 
15511 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15512                                                    EVT VT) const {
15513   return isFMAFasterThanFMulAndFAdd(
15514       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
15515 }
15516 
15517 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
15518                                                    Type *Ty) const {
15519   switch (Ty->getScalarType()->getTypeID()) {
15520   case Type::FloatTyID:
15521   case Type::DoubleTyID:
15522     return true;
15523   case Type::FP128TyID:
15524     return Subtarget.hasP9Vector();
15525   default:
15526     return false;
15527   }
15528 }
15529 
15530 // FIXME: add more patterns which are not profitable to hoist.
15531 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
15532   if (!I->hasOneUse())
15533     return true;
15534 
15535   Instruction *User = I->user_back();
15536   assert(User && "A single use instruction with no uses.");
15537 
15538   switch (I->getOpcode()) {
15539   case Instruction::FMul: {
15540     // Don't break FMA, PowerPC prefers FMA.
15541     if (User->getOpcode() != Instruction::FSub &&
15542         User->getOpcode() != Instruction::FAdd)
15543       return true;
15544 
15545     const TargetOptions &Options = getTargetMachine().Options;
15546     const Function *F = I->getFunction();
15547     const DataLayout &DL = F->getParent()->getDataLayout();
15548     Type *Ty = User->getOperand(0)->getType();
15549 
15550     return !(
15551         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
15552         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
15553         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
15554   }
15555   case Instruction::Load: {
15556     // Don't break "store (load float*)" pattern, this pattern will be combined
15557     // to "store (load int32)" in later InstCombine pass. See function
15558     // combineLoadToOperationType. On PowerPC, loading a float point takes more
15559     // cycles than loading a 32 bit integer.
15560     LoadInst *LI = cast<LoadInst>(I);
15561     // For the loads that combineLoadToOperationType does nothing, like
15562     // ordered load, it should be profitable to hoist them.
15563     // For swifterror load, it can only be used for pointer to pointer type, so
15564     // later type check should get rid of this case.
15565     if (!LI->isUnordered())
15566       return true;
15567 
15568     if (User->getOpcode() != Instruction::Store)
15569       return true;
15570 
15571     if (I->getType()->getTypeID() != Type::FloatTyID)
15572       return true;
15573 
15574     return false;
15575   }
15576   default:
15577     return true;
15578   }
15579   return true;
15580 }
15581 
15582 const MCPhysReg *
15583 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
15584   // LR is a callee-save register, but we must treat it as clobbered by any call
15585   // site. Hence we include LR in the scratch registers, which are in turn added
15586   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
15587   // to CTR, which is used by any indirect call.
15588   static const MCPhysReg ScratchRegs[] = {
15589     PPC::X12, PPC::LR8, PPC::CTR8, 0
15590   };
15591 
15592   return ScratchRegs;
15593 }
15594 
15595 Register PPCTargetLowering::getExceptionPointerRegister(
15596     const Constant *PersonalityFn) const {
15597   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
15598 }
15599 
15600 Register PPCTargetLowering::getExceptionSelectorRegister(
15601     const Constant *PersonalityFn) const {
15602   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
15603 }
15604 
15605 bool
15606 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
15607                      EVT VT , unsigned DefinedValues) const {
15608   if (VT == MVT::v2i64)
15609     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
15610 
15611   if (Subtarget.hasVSX())
15612     return true;
15613 
15614   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
15615 }
15616 
15617 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
15618   if (DisableILPPref || Subtarget.enableMachineScheduler())
15619     return TargetLowering::getSchedulingPreference(N);
15620 
15621   return Sched::ILP;
15622 }
15623 
15624 // Create a fast isel object.
15625 FastISel *
15626 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
15627                                   const TargetLibraryInfo *LibInfo) const {
15628   return PPC::createFastISel(FuncInfo, LibInfo);
15629 }
15630 
15631 // 'Inverted' means the FMA opcode after negating one multiplicand.
15632 // For example, (fma -a b c) = (fnmsub a b c)
15633 static unsigned invertFMAOpcode(unsigned Opc) {
15634   switch (Opc) {
15635   default:
15636     llvm_unreachable("Invalid FMA opcode for PowerPC!");
15637   case ISD::FMA:
15638     return PPCISD::FNMSUB;
15639   case PPCISD::FNMSUB:
15640     return ISD::FMA;
15641   }
15642 }
15643 
15644 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
15645                                                 bool LegalOps, bool OptForSize,
15646                                                 NegatibleCost &Cost,
15647                                                 unsigned Depth) const {
15648   if (Depth > SelectionDAG::MaxRecursionDepth)
15649     return SDValue();
15650 
15651   unsigned Opc = Op.getOpcode();
15652   EVT VT = Op.getValueType();
15653   SDNodeFlags Flags = Op.getNode()->getFlags();
15654 
15655   switch (Opc) {
15656   case PPCISD::FNMSUB:
15657     if (!Op.hasOneUse() || !isTypeLegal(VT))
15658       break;
15659 
15660     const TargetOptions &Options = getTargetMachine().Options;
15661     SDValue N0 = Op.getOperand(0);
15662     SDValue N1 = Op.getOperand(1);
15663     SDValue N2 = Op.getOperand(2);
15664     SDLoc Loc(Op);
15665 
15666     NegatibleCost N2Cost = NegatibleCost::Expensive;
15667     SDValue NegN2 =
15668         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
15669 
15670     if (!NegN2)
15671       return SDValue();
15672 
15673     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
15674     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
15675     // These transformations may change sign of zeroes. For example,
15676     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
15677     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
15678       // Try and choose the cheaper one to negate.
15679       NegatibleCost N0Cost = NegatibleCost::Expensive;
15680       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
15681                                            N0Cost, Depth + 1);
15682 
15683       NegatibleCost N1Cost = NegatibleCost::Expensive;
15684       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
15685                                            N1Cost, Depth + 1);
15686 
15687       if (NegN0 && N0Cost <= N1Cost) {
15688         Cost = std::min(N0Cost, N2Cost);
15689         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
15690       } else if (NegN1) {
15691         Cost = std::min(N1Cost, N2Cost);
15692         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
15693       }
15694     }
15695 
15696     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
15697     if (isOperationLegal(ISD::FMA, VT)) {
15698       Cost = N2Cost;
15699       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
15700     }
15701 
15702     break;
15703   }
15704 
15705   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
15706                                               Cost, Depth);
15707 }
15708 
15709 // Override to enable LOAD_STACK_GUARD lowering on Linux.
15710 bool PPCTargetLowering::useLoadStackGuardNode() const {
15711   if (!Subtarget.isTargetLinux())
15712     return TargetLowering::useLoadStackGuardNode();
15713   return true;
15714 }
15715 
15716 // Override to disable global variable loading on Linux.
15717 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
15718   if (!Subtarget.isTargetLinux())
15719     return TargetLowering::insertSSPDeclarations(M);
15720 }
15721 
15722 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
15723                                      bool ForCodeSize) const {
15724   if (!VT.isSimple() || !Subtarget.hasVSX())
15725     return false;
15726 
15727   switch(VT.getSimpleVT().SimpleTy) {
15728   default:
15729     // For FP types that are currently not supported by PPC backend, return
15730     // false. Examples: f16, f80.
15731     return false;
15732   case MVT::f32:
15733   case MVT::f64:
15734     if (Subtarget.hasPrefixInstrs()) {
15735       // With prefixed instructions, we can materialize anything that can be
15736       // represented with a 32-bit immediate, not just positive zero.
15737       APFloat APFloatOfImm = Imm;
15738       return convertToNonDenormSingle(APFloatOfImm);
15739     }
15740     LLVM_FALLTHROUGH;
15741   case MVT::ppcf128:
15742     return Imm.isPosZero();
15743   }
15744 }
15745 
15746 // For vector shift operation op, fold
15747 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
15748 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
15749                                   SelectionDAG &DAG) {
15750   SDValue N0 = N->getOperand(0);
15751   SDValue N1 = N->getOperand(1);
15752   EVT VT = N0.getValueType();
15753   unsigned OpSizeInBits = VT.getScalarSizeInBits();
15754   unsigned Opcode = N->getOpcode();
15755   unsigned TargetOpcode;
15756 
15757   switch (Opcode) {
15758   default:
15759     llvm_unreachable("Unexpected shift operation");
15760   case ISD::SHL:
15761     TargetOpcode = PPCISD::SHL;
15762     break;
15763   case ISD::SRL:
15764     TargetOpcode = PPCISD::SRL;
15765     break;
15766   case ISD::SRA:
15767     TargetOpcode = PPCISD::SRA;
15768     break;
15769   }
15770 
15771   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
15772       N1->getOpcode() == ISD::AND)
15773     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
15774       if (Mask->getZExtValue() == OpSizeInBits - 1)
15775         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
15776 
15777   return SDValue();
15778 }
15779 
15780 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
15781   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15782     return Value;
15783 
15784   SDValue N0 = N->getOperand(0);
15785   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
15786   if (!Subtarget.isISA3_0() ||
15787       N0.getOpcode() != ISD::SIGN_EXTEND ||
15788       N0.getOperand(0).getValueType() != MVT::i32 ||
15789       CN1 == nullptr || N->getValueType(0) != MVT::i64)
15790     return SDValue();
15791 
15792   // We can't save an operation here if the value is already extended, and
15793   // the existing shift is easier to combine.
15794   SDValue ExtsSrc = N0.getOperand(0);
15795   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
15796       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
15797     return SDValue();
15798 
15799   SDLoc DL(N0);
15800   SDValue ShiftBy = SDValue(CN1, 0);
15801   // We want the shift amount to be i32 on the extswli, but the shift could
15802   // have an i64.
15803   if (ShiftBy.getValueType() == MVT::i64)
15804     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
15805 
15806   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
15807                          ShiftBy);
15808 }
15809 
15810 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
15811   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15812     return Value;
15813 
15814   return SDValue();
15815 }
15816 
15817 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
15818   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15819     return Value;
15820 
15821   return SDValue();
15822 }
15823 
15824 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
15825 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
15826 // When C is zero, the equation (addi Z, -C) can be simplified to Z
15827 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
15828 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
15829                                  const PPCSubtarget &Subtarget) {
15830   if (!Subtarget.isPPC64())
15831     return SDValue();
15832 
15833   SDValue LHS = N->getOperand(0);
15834   SDValue RHS = N->getOperand(1);
15835 
15836   auto isZextOfCompareWithConstant = [](SDValue Op) {
15837     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
15838         Op.getValueType() != MVT::i64)
15839       return false;
15840 
15841     SDValue Cmp = Op.getOperand(0);
15842     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
15843         Cmp.getOperand(0).getValueType() != MVT::i64)
15844       return false;
15845 
15846     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
15847       int64_t NegConstant = 0 - Constant->getSExtValue();
15848       // Due to the limitations of the addi instruction,
15849       // -C is required to be [-32768, 32767].
15850       return isInt<16>(NegConstant);
15851     }
15852 
15853     return false;
15854   };
15855 
15856   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
15857   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
15858 
15859   // If there is a pattern, canonicalize a zext operand to the RHS.
15860   if (LHSHasPattern && !RHSHasPattern)
15861     std::swap(LHS, RHS);
15862   else if (!LHSHasPattern && !RHSHasPattern)
15863     return SDValue();
15864 
15865   SDLoc DL(N);
15866   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
15867   SDValue Cmp = RHS.getOperand(0);
15868   SDValue Z = Cmp.getOperand(0);
15869   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
15870 
15871   assert(Constant && "Constant Should not be a null pointer.");
15872   int64_t NegConstant = 0 - Constant->getSExtValue();
15873 
15874   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
15875   default: break;
15876   case ISD::SETNE: {
15877     //                                 when C == 0
15878     //                             --> addze X, (addic Z, -1).carry
15879     //                            /
15880     // add X, (zext(setne Z, C))--
15881     //                            \    when -32768 <= -C <= 32767 && C != 0
15882     //                             --> addze X, (addic (addi Z, -C), -1).carry
15883     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15884                               DAG.getConstant(NegConstant, DL, MVT::i64));
15885     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15886     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15887                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
15888     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15889                        SDValue(Addc.getNode(), 1));
15890     }
15891   case ISD::SETEQ: {
15892     //                                 when C == 0
15893     //                             --> addze X, (subfic Z, 0).carry
15894     //                            /
15895     // add X, (zext(sete  Z, C))--
15896     //                            \    when -32768 <= -C <= 32767 && C != 0
15897     //                             --> addze X, (subfic (addi Z, -C), 0).carry
15898     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15899                               DAG.getConstant(NegConstant, DL, MVT::i64));
15900     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15901     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15902                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
15903     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15904                        SDValue(Subc.getNode(), 1));
15905     }
15906   }
15907 
15908   return SDValue();
15909 }
15910 
15911 // Transform
15912 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
15913 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
15914 // In this case both C1 and C2 must be known constants.
15915 // C1+C2 must fit into a 34 bit signed integer.
15916 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
15917                                           const PPCSubtarget &Subtarget) {
15918   if (!Subtarget.isUsingPCRelativeCalls())
15919     return SDValue();
15920 
15921   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
15922   // If we find that node try to cast the Global Address and the Constant.
15923   SDValue LHS = N->getOperand(0);
15924   SDValue RHS = N->getOperand(1);
15925 
15926   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
15927     std::swap(LHS, RHS);
15928 
15929   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
15930     return SDValue();
15931 
15932   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
15933   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
15934   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
15935 
15936   // Check that both casts succeeded.
15937   if (!GSDN || !ConstNode)
15938     return SDValue();
15939 
15940   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
15941   SDLoc DL(GSDN);
15942 
15943   // The signed int offset needs to fit in 34 bits.
15944   if (!isInt<34>(NewOffset))
15945     return SDValue();
15946 
15947   // The new global address is a copy of the old global address except
15948   // that it has the updated Offset.
15949   SDValue GA =
15950       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
15951                                  NewOffset, GSDN->getTargetFlags());
15952   SDValue MatPCRel =
15953       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
15954   return MatPCRel;
15955 }
15956 
15957 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
15958   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
15959     return Value;
15960 
15961   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
15962     return Value;
15963 
15964   return SDValue();
15965 }
15966 
15967 // Detect TRUNCATE operations on bitcasts of float128 values.
15968 // What we are looking for here is the situtation where we extract a subset
15969 // of bits from a 128 bit float.
15970 // This can be of two forms:
15971 // 1) BITCAST of f128 feeding TRUNCATE
15972 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
15973 // The reason this is required is because we do not have a legal i128 type
15974 // and so we want to prevent having to store the f128 and then reload part
15975 // of it.
15976 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
15977                                            DAGCombinerInfo &DCI) const {
15978   // If we are using CRBits then try that first.
15979   if (Subtarget.useCRBits()) {
15980     // Check if CRBits did anything and return that if it did.
15981     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
15982       return CRTruncValue;
15983   }
15984 
15985   SDLoc dl(N);
15986   SDValue Op0 = N->getOperand(0);
15987 
15988   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
15989   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
15990     EVT VT = N->getValueType(0);
15991     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
15992       return SDValue();
15993     SDValue Sub = Op0.getOperand(0);
15994     if (Sub.getOpcode() == ISD::SUB) {
15995       SDValue SubOp0 = Sub.getOperand(0);
15996       SDValue SubOp1 = Sub.getOperand(1);
15997       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
15998           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
15999         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16000                                SubOp1.getOperand(0),
16001                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16002       }
16003     }
16004   }
16005 
16006   // Looking for a truncate of i128 to i64.
16007   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16008     return SDValue();
16009 
16010   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16011 
16012   // SRL feeding TRUNCATE.
16013   if (Op0.getOpcode() == ISD::SRL) {
16014     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16015     // The right shift has to be by 64 bits.
16016     if (!ConstNode || ConstNode->getZExtValue() != 64)
16017       return SDValue();
16018 
16019     // Switch the element number to extract.
16020     EltToExtract = EltToExtract ? 0 : 1;
16021     // Update Op0 past the SRL.
16022     Op0 = Op0.getOperand(0);
16023   }
16024 
16025   // BITCAST feeding a TRUNCATE possibly via SRL.
16026   if (Op0.getOpcode() == ISD::BITCAST &&
16027       Op0.getValueType() == MVT::i128 &&
16028       Op0.getOperand(0).getValueType() == MVT::f128) {
16029     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16030     return DCI.DAG.getNode(
16031         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16032         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16033   }
16034   return SDValue();
16035 }
16036 
16037 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16038   SelectionDAG &DAG = DCI.DAG;
16039 
16040   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16041   if (!ConstOpOrElement)
16042     return SDValue();
16043 
16044   // An imul is usually smaller than the alternative sequence for legal type.
16045   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16046       isOperationLegal(ISD::MUL, N->getValueType(0)))
16047     return SDValue();
16048 
16049   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16050     switch (this->Subtarget.getCPUDirective()) {
16051     default:
16052       // TODO: enhance the condition for subtarget before pwr8
16053       return false;
16054     case PPC::DIR_PWR8:
16055       //  type        mul     add    shl
16056       // scalar        4       1      1
16057       // vector        7       2      2
16058       return true;
16059     case PPC::DIR_PWR9:
16060     case PPC::DIR_PWR10:
16061     case PPC::DIR_PWR_FUTURE:
16062       //  type        mul     add    shl
16063       // scalar        5       2      2
16064       // vector        7       2      2
16065 
16066       // The cycle RATIO of related operations are showed as a table above.
16067       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16068       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16069       // are 4, it is always profitable; but for 3 instrs patterns
16070       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16071       // So we should only do it for vector type.
16072       return IsAddOne && IsNeg ? VT.isVector() : true;
16073     }
16074   };
16075 
16076   EVT VT = N->getValueType(0);
16077   SDLoc DL(N);
16078 
16079   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16080   bool IsNeg = MulAmt.isNegative();
16081   APInt MulAmtAbs = MulAmt.abs();
16082 
16083   if ((MulAmtAbs - 1).isPowerOf2()) {
16084     // (mul x, 2^N + 1) => (add (shl x, N), x)
16085     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16086 
16087     if (!IsProfitable(IsNeg, true, VT))
16088       return SDValue();
16089 
16090     SDValue Op0 = N->getOperand(0);
16091     SDValue Op1 =
16092         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16093                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16094     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16095 
16096     if (!IsNeg)
16097       return Res;
16098 
16099     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16100   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16101     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16102     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16103 
16104     if (!IsProfitable(IsNeg, false, VT))
16105       return SDValue();
16106 
16107     SDValue Op0 = N->getOperand(0);
16108     SDValue Op1 =
16109         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16110                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16111 
16112     if (!IsNeg)
16113       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16114     else
16115       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16116 
16117   } else {
16118     return SDValue();
16119   }
16120 }
16121 
16122 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16123 // in combiner since we need to check SD flags and other subtarget features.
16124 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16125                                           DAGCombinerInfo &DCI) const {
16126   SDValue N0 = N->getOperand(0);
16127   SDValue N1 = N->getOperand(1);
16128   SDValue N2 = N->getOperand(2);
16129   SDNodeFlags Flags = N->getFlags();
16130   EVT VT = N->getValueType(0);
16131   SelectionDAG &DAG = DCI.DAG;
16132   const TargetOptions &Options = getTargetMachine().Options;
16133   unsigned Opc = N->getOpcode();
16134   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16135   bool LegalOps = !DCI.isBeforeLegalizeOps();
16136   SDLoc Loc(N);
16137 
16138   if (!isOperationLegal(ISD::FMA, VT))
16139     return SDValue();
16140 
16141   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16142   // since (fnmsub a b c)=-0 while c-ab=+0.
16143   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16144     return SDValue();
16145 
16146   // (fma (fneg a) b c) => (fnmsub a b c)
16147   // (fnmsub (fneg a) b c) => (fma a b c)
16148   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16149     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16150 
16151   // (fma a (fneg b) c) => (fnmsub a b c)
16152   // (fnmsub a (fneg b) c) => (fma a b c)
16153   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16154     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16155 
16156   return SDValue();
16157 }
16158 
16159 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16160   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16161   if (!Subtarget.is64BitELFABI())
16162     return false;
16163 
16164   // If not a tail call then no need to proceed.
16165   if (!CI->isTailCall())
16166     return false;
16167 
16168   // If sibling calls have been disabled and tail-calls aren't guaranteed
16169   // there is no reason to duplicate.
16170   auto &TM = getTargetMachine();
16171   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16172     return false;
16173 
16174   // Can't tail call a function called indirectly, or if it has variadic args.
16175   const Function *Callee = CI->getCalledFunction();
16176   if (!Callee || Callee->isVarArg())
16177     return false;
16178 
16179   // Make sure the callee and caller calling conventions are eligible for tco.
16180   const Function *Caller = CI->getParent()->getParent();
16181   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
16182                                            CI->getCallingConv()))
16183       return false;
16184 
16185   // If the function is local then we have a good chance at tail-calling it
16186   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
16187 }
16188 
16189 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
16190   if (!Subtarget.hasVSX())
16191     return false;
16192   if (Subtarget.hasP9Vector() && VT == MVT::f128)
16193     return true;
16194   return VT == MVT::f32 || VT == MVT::f64 ||
16195     VT == MVT::v4f32 || VT == MVT::v2f64;
16196 }
16197 
16198 bool PPCTargetLowering::
16199 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
16200   const Value *Mask = AndI.getOperand(1);
16201   // If the mask is suitable for andi. or andis. we should sink the and.
16202   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
16203     // Can't handle constants wider than 64-bits.
16204     if (CI->getBitWidth() > 64)
16205       return false;
16206     int64_t ConstVal = CI->getZExtValue();
16207     return isUInt<16>(ConstVal) ||
16208       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16209   }
16210 
16211   // For non-constant masks, we can always use the record-form and.
16212   return true;
16213 }
16214 
16215 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16216 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16217 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16218 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16219 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
16220 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16221   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
16222   assert(Subtarget.hasP9Altivec() &&
16223          "Only combine this when P9 altivec supported!");
16224   EVT VT = N->getValueType(0);
16225   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16226     return SDValue();
16227 
16228   SelectionDAG &DAG = DCI.DAG;
16229   SDLoc dl(N);
16230   if (N->getOperand(0).getOpcode() == ISD::SUB) {
16231     // Even for signed integers, if it's known to be positive (as signed
16232     // integer) due to zero-extended inputs.
16233     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16234     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16235     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16236          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16237         (SubOpcd1 == ISD::ZERO_EXTEND ||
16238          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16239       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16240                          N->getOperand(0)->getOperand(0),
16241                          N->getOperand(0)->getOperand(1),
16242                          DAG.getTargetConstant(0, dl, MVT::i32));
16243     }
16244 
16245     // For type v4i32, it can be optimized with xvnegsp + vabsduw
16246     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16247         N->getOperand(0).hasOneUse()) {
16248       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16249                          N->getOperand(0)->getOperand(0),
16250                          N->getOperand(0)->getOperand(1),
16251                          DAG.getTargetConstant(1, dl, MVT::i32));
16252     }
16253   }
16254 
16255   return SDValue();
16256 }
16257 
16258 // For type v4i32/v8ii16/v16i8, transform
16259 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16260 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16261 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16262 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
16263 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16264                                           DAGCombinerInfo &DCI) const {
16265   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
16266   assert(Subtarget.hasP9Altivec() &&
16267          "Only combine this when P9 altivec supported!");
16268 
16269   SelectionDAG &DAG = DCI.DAG;
16270   SDLoc dl(N);
16271   SDValue Cond = N->getOperand(0);
16272   SDValue TrueOpnd = N->getOperand(1);
16273   SDValue FalseOpnd = N->getOperand(2);
16274   EVT VT = N->getOperand(1).getValueType();
16275 
16276   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
16277       FalseOpnd.getOpcode() != ISD::SUB)
16278     return SDValue();
16279 
16280   // ABSD only available for type v4i32/v8i16/v16i8
16281   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16282     return SDValue();
16283 
16284   // At least to save one more dependent computation
16285   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
16286     return SDValue();
16287 
16288   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
16289 
16290   // Can only handle unsigned comparison here
16291   switch (CC) {
16292   default:
16293     return SDValue();
16294   case ISD::SETUGT:
16295   case ISD::SETUGE:
16296     break;
16297   case ISD::SETULT:
16298   case ISD::SETULE:
16299     std::swap(TrueOpnd, FalseOpnd);
16300     break;
16301   }
16302 
16303   SDValue CmpOpnd1 = Cond.getOperand(0);
16304   SDValue CmpOpnd2 = Cond.getOperand(1);
16305 
16306   // SETCC CmpOpnd1 CmpOpnd2 cond
16307   // TrueOpnd = CmpOpnd1 - CmpOpnd2
16308   // FalseOpnd = CmpOpnd2 - CmpOpnd1
16309   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
16310       TrueOpnd.getOperand(1) == CmpOpnd2 &&
16311       FalseOpnd.getOperand(0) == CmpOpnd2 &&
16312       FalseOpnd.getOperand(1) == CmpOpnd1) {
16313     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
16314                        CmpOpnd1, CmpOpnd2,
16315                        DAG.getTargetConstant(0, dl, MVT::i32));
16316   }
16317 
16318   return SDValue();
16319 }
16320