1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the PPCISelLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PPCISelLowering.h" 14 #include "MCTargetDesc/PPCPredicates.h" 15 #include "PPC.h" 16 #include "PPCCCState.h" 17 #include "PPCCallingConv.h" 18 #include "PPCFrameLowering.h" 19 #include "PPCInstrInfo.h" 20 #include "PPCMachineFunctionInfo.h" 21 #include "PPCPerfectShuffle.h" 22 #include "PPCRegisterInfo.h" 23 #include "PPCSubtarget.h" 24 #include "PPCTargetMachine.h" 25 #include "llvm/ADT/APFloat.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/CodeGen/CallingConvLower.h" 38 #include "llvm/CodeGen/ISDOpcodes.h" 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineFrameInfo.h" 41 #include "llvm/CodeGen/MachineFunction.h" 42 #include "llvm/CodeGen/MachineInstr.h" 43 #include "llvm/CodeGen/MachineInstrBuilder.h" 44 #include "llvm/CodeGen/MachineJumpTableInfo.h" 45 #include "llvm/CodeGen/MachineLoopInfo.h" 46 #include "llvm/CodeGen/MachineMemOperand.h" 47 #include "llvm/CodeGen/MachineModuleInfo.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/CallingConv.h" 59 #include "llvm/IR/Constant.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/DataLayout.h" 62 #include "llvm/IR/DebugLoc.h" 63 #include "llvm/IR/DerivedTypes.h" 64 #include "llvm/IR/Function.h" 65 #include "llvm/IR/GlobalValue.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/Intrinsics.h" 69 #include "llvm/IR/IntrinsicsPowerPC.h" 70 #include "llvm/IR/Module.h" 71 #include "llvm/IR/Type.h" 72 #include "llvm/IR/Use.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/MC/MCContext.h" 75 #include "llvm/MC/MCExpr.h" 76 #include "llvm/MC/MCRegisterInfo.h" 77 #include "llvm/MC/MCSectionXCOFF.h" 78 #include "llvm/MC/MCSymbolXCOFF.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/BranchProbability.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/CodeGen.h" 83 #include "llvm/Support/CommandLine.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/Debug.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/Format.h" 88 #include "llvm/Support/KnownBits.h" 89 #include "llvm/Support/MachineValueType.h" 90 #include "llvm/Support/MathExtras.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Target/TargetMachine.h" 93 #include "llvm/Target/TargetOptions.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <iterator> 98 #include <list> 99 #include <utility> 100 #include <vector> 101 102 using namespace llvm; 103 104 #define DEBUG_TYPE "ppc-lowering" 105 106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 108 109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 111 112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 114 115 static cl::opt<bool> DisableSCO("disable-ppc-sco", 116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 117 118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", 119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden); 120 121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables", 122 cl::desc("use absolute jump tables on ppc"), cl::Hidden); 123 124 // TODO - Remove this option if soft fp128 has been fully supported . 125 static cl::opt<bool> 126 EnableSoftFP128("enable-soft-fp128", 127 cl::desc("temp option to enable soft fp128"), cl::Hidden); 128 129 STATISTIC(NumTailCalls, "Number of tail calls"); 130 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 131 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM"); 132 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed"); 133 134 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 135 136 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl); 137 138 // FIXME: Remove this once the bug has been fixed! 139 extern cl::opt<bool> ANDIGlueBug; 140 141 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 142 const PPCSubtarget &STI) 143 : TargetLowering(TM), Subtarget(STI) { 144 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 145 // arguments are at least 4/8 bytes aligned. 146 bool isPPC64 = Subtarget.isPPC64(); 147 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); 148 149 // Set up the register classes. 150 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 151 if (!useSoftFloat()) { 152 if (hasSPE()) { 153 addRegisterClass(MVT::f32, &PPC::GPRCRegClass); 154 // EFPU2 APU only supports f32 155 if (!Subtarget.hasEFPU2()) 156 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 157 } else { 158 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 159 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 160 } 161 } 162 163 // Match BITREVERSE to customized fast code sequence in the td file. 164 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 165 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 166 167 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 168 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 169 170 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 171 for (MVT VT : MVT::integer_valuetypes()) { 172 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 173 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 174 } 175 176 if (Subtarget.isISA3_0()) { 177 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal); 178 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal); 179 setTruncStoreAction(MVT::f64, MVT::f16, Legal); 180 setTruncStoreAction(MVT::f32, MVT::f16, Legal); 181 } else { 182 // No extending loads from f16 or HW conversions back and forth. 183 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 184 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 185 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 186 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 187 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 188 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 189 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 190 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 191 } 192 193 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 194 195 // PowerPC has pre-inc load and store's. 196 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 197 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 198 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 199 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 200 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 201 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 202 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 203 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 204 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 205 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 206 if (!Subtarget.hasSPE()) { 207 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 208 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 209 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 210 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 211 } 212 213 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 214 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 215 for (MVT VT : ScalarIntVTs) { 216 setOperationAction(ISD::ADDC, VT, Legal); 217 setOperationAction(ISD::ADDE, VT, Legal); 218 setOperationAction(ISD::SUBC, VT, Legal); 219 setOperationAction(ISD::SUBE, VT, Legal); 220 } 221 222 if (Subtarget.useCRBits()) { 223 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 224 225 if (isPPC64 || Subtarget.hasFPCVT()) { 226 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote); 227 AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1, 228 isPPC64 ? MVT::i64 : MVT::i32); 229 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote); 230 AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1, 231 isPPC64 ? MVT::i64 : MVT::i32); 232 233 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 234 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 235 isPPC64 ? MVT::i64 : MVT::i32); 236 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 237 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 238 isPPC64 ? MVT::i64 : MVT::i32); 239 240 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote); 241 AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1, 242 isPPC64 ? MVT::i64 : MVT::i32); 243 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote); 244 AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1, 245 isPPC64 ? MVT::i64 : MVT::i32); 246 247 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); 248 AddPromotedToType(ISD::FP_TO_SINT, MVT::i1, 249 isPPC64 ? MVT::i64 : MVT::i32); 250 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); 251 AddPromotedToType(ISD::FP_TO_UINT, MVT::i1, 252 isPPC64 ? MVT::i64 : MVT::i32); 253 } else { 254 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom); 255 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom); 256 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 257 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 258 } 259 260 // PowerPC does not support direct load/store of condition registers. 261 setOperationAction(ISD::LOAD, MVT::i1, Custom); 262 setOperationAction(ISD::STORE, MVT::i1, Custom); 263 264 // FIXME: Remove this once the ANDI glue bug is fixed: 265 if (ANDIGlueBug) 266 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 267 268 for (MVT VT : MVT::integer_valuetypes()) { 269 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 270 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 271 setTruncStoreAction(VT, MVT::i1, Expand); 272 } 273 274 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 275 } 276 277 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 278 // PPC (the libcall is not available). 279 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 280 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 281 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom); 282 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom); 283 284 // We do not currently implement these libm ops for PowerPC. 285 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 286 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 287 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 288 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 289 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 290 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 291 292 // PowerPC has no SREM/UREM instructions unless we are on P9 293 // On P9 we may use a hardware instruction to compute the remainder. 294 // When the result of both the remainder and the division is required it is 295 // more efficient to compute the remainder from the result of the division 296 // rather than use the remainder instruction. The instructions are legalized 297 // directly because the DivRemPairsPass performs the transformation at the IR 298 // level. 299 if (Subtarget.isISA3_0()) { 300 setOperationAction(ISD::SREM, MVT::i32, Legal); 301 setOperationAction(ISD::UREM, MVT::i32, Legal); 302 setOperationAction(ISD::SREM, MVT::i64, Legal); 303 setOperationAction(ISD::UREM, MVT::i64, Legal); 304 } else { 305 setOperationAction(ISD::SREM, MVT::i32, Expand); 306 setOperationAction(ISD::UREM, MVT::i32, Expand); 307 setOperationAction(ISD::SREM, MVT::i64, Expand); 308 setOperationAction(ISD::UREM, MVT::i64, Expand); 309 } 310 311 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 312 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 313 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 314 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 315 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 316 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 317 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 318 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 319 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 320 321 // Handle constrained floating-point operations of scalar. 322 // TODO: Handle SPE specific operation. 323 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); 324 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); 325 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); 326 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); 327 setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal); 328 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); 329 330 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); 331 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); 332 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); 333 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); 334 setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal); 335 if (Subtarget.hasVSX()) { 336 setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal); 337 setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal); 338 } 339 340 if (Subtarget.hasFSQRT()) { 341 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); 342 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); 343 } 344 345 if (Subtarget.hasFPRND()) { 346 setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal); 347 setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal); 348 setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal); 349 setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal); 350 351 setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal); 352 setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal); 353 setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal); 354 setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal); 355 } 356 357 // We don't support sin/cos/sqrt/fmod/pow 358 setOperationAction(ISD::FSIN , MVT::f64, Expand); 359 setOperationAction(ISD::FCOS , MVT::f64, Expand); 360 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 361 setOperationAction(ISD::FREM , MVT::f64, Expand); 362 setOperationAction(ISD::FPOW , MVT::f64, Expand); 363 setOperationAction(ISD::FSIN , MVT::f32, Expand); 364 setOperationAction(ISD::FCOS , MVT::f32, Expand); 365 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 366 setOperationAction(ISD::FREM , MVT::f32, Expand); 367 setOperationAction(ISD::FPOW , MVT::f32, Expand); 368 if (Subtarget.hasSPE()) { 369 setOperationAction(ISD::FMA , MVT::f64, Expand); 370 setOperationAction(ISD::FMA , MVT::f32, Expand); 371 } else { 372 setOperationAction(ISD::FMA , MVT::f64, Legal); 373 setOperationAction(ISD::FMA , MVT::f32, Legal); 374 } 375 376 if (Subtarget.hasSPE()) 377 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 378 379 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 380 381 // If we're enabling GP optimizations, use hardware square root 382 if (!Subtarget.hasFSQRT() && 383 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 384 Subtarget.hasFRE())) 385 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 386 387 if (!Subtarget.hasFSQRT() && 388 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 389 Subtarget.hasFRES())) 390 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 391 392 if (Subtarget.hasFCPSGN()) { 393 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 394 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 395 } else { 396 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 397 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 398 } 399 400 if (Subtarget.hasFPRND()) { 401 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 402 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 403 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 404 setOperationAction(ISD::FROUND, MVT::f64, Legal); 405 406 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 407 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 408 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 409 setOperationAction(ISD::FROUND, MVT::f32, Legal); 410 } 411 412 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 413 // to speed up scalar BSWAP64. 414 // CTPOP or CTTZ were introduced in P8/P9 respectively 415 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 416 if (Subtarget.hasP9Vector()) 417 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 418 else 419 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 420 if (Subtarget.isISA3_0()) { 421 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 422 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 423 } else { 424 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 425 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 426 } 427 428 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 429 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 430 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 431 } else { 432 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 433 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 434 } 435 436 // PowerPC does not have ROTR 437 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 438 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 439 440 if (!Subtarget.useCRBits()) { 441 // PowerPC does not have Select 442 setOperationAction(ISD::SELECT, MVT::i32, Expand); 443 setOperationAction(ISD::SELECT, MVT::i64, Expand); 444 setOperationAction(ISD::SELECT, MVT::f32, Expand); 445 setOperationAction(ISD::SELECT, MVT::f64, Expand); 446 } 447 448 // PowerPC wants to turn select_cc of FP into fsel when possible. 449 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 450 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 451 452 // PowerPC wants to optimize integer setcc a bit 453 if (!Subtarget.useCRBits()) 454 setOperationAction(ISD::SETCC, MVT::i32, Custom); 455 456 if (Subtarget.hasFPU()) { 457 setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal); 458 setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal); 459 setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal); 460 461 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal); 462 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal); 463 setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal); 464 } 465 466 // PowerPC does not have BRCOND which requires SetCC 467 if (!Subtarget.useCRBits()) 468 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 469 470 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 471 472 if (Subtarget.hasSPE()) { 473 // SPE has built-in conversions 474 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal); 475 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal); 476 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal); 477 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 478 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 479 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 480 } else { 481 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 482 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 483 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 484 485 // PowerPC does not have [U|S]INT_TO_FP 486 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand); 487 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand); 488 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 489 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 490 } 491 492 if (Subtarget.hasDirectMove() && isPPC64) { 493 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 494 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 495 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 496 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 497 if (TM.Options.UnsafeFPMath) { 498 setOperationAction(ISD::LRINT, MVT::f64, Legal); 499 setOperationAction(ISD::LRINT, MVT::f32, Legal); 500 setOperationAction(ISD::LLRINT, MVT::f64, Legal); 501 setOperationAction(ISD::LLRINT, MVT::f32, Legal); 502 setOperationAction(ISD::LROUND, MVT::f64, Legal); 503 setOperationAction(ISD::LROUND, MVT::f32, Legal); 504 setOperationAction(ISD::LLROUND, MVT::f64, Legal); 505 setOperationAction(ISD::LLROUND, MVT::f32, Legal); 506 } 507 } else { 508 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 509 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 510 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 511 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 512 } 513 514 // We cannot sextinreg(i1). Expand to shifts. 515 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 516 517 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 518 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 519 // support continuation, user-level threading, and etc.. As a result, no 520 // other SjLj exception interfaces are implemented and please don't build 521 // your own exception handling based on them. 522 // LLVM/Clang supports zero-cost DWARF exception handling. 523 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 524 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 525 526 // We want to legalize GlobalAddress and ConstantPool nodes into the 527 // appropriate instructions to materialize the address. 528 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 529 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 530 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 531 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 532 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 533 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 534 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 535 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 536 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 537 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 538 539 // TRAP is legal. 540 setOperationAction(ISD::TRAP, MVT::Other, Legal); 541 542 // TRAMPOLINE is custom lowered. 543 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 544 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 545 546 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 547 setOperationAction(ISD::VASTART , MVT::Other, Custom); 548 549 if (Subtarget.is64BitELFABI()) { 550 // VAARG always uses double-word chunks, so promote anything smaller. 551 setOperationAction(ISD::VAARG, MVT::i1, Promote); 552 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); 553 setOperationAction(ISD::VAARG, MVT::i8, Promote); 554 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); 555 setOperationAction(ISD::VAARG, MVT::i16, Promote); 556 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); 557 setOperationAction(ISD::VAARG, MVT::i32, Promote); 558 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); 559 setOperationAction(ISD::VAARG, MVT::Other, Expand); 560 } else if (Subtarget.is32BitELFABI()) { 561 // VAARG is custom lowered with the 32-bit SVR4 ABI. 562 setOperationAction(ISD::VAARG, MVT::Other, Custom); 563 setOperationAction(ISD::VAARG, MVT::i64, Custom); 564 } else 565 setOperationAction(ISD::VAARG, MVT::Other, Expand); 566 567 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 568 if (Subtarget.is32BitELFABI()) 569 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 570 else 571 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 572 573 // Use the default implementation. 574 setOperationAction(ISD::VAEND , MVT::Other, Expand); 575 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 576 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 577 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 578 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 579 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 580 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 581 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 582 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 583 584 // We want to custom lower some of our intrinsics. 585 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 586 587 // To handle counter-based loop conditions. 588 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 589 590 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 591 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 592 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 593 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 594 595 // Comparisons that require checking two conditions. 596 if (Subtarget.hasSPE()) { 597 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 598 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 599 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 600 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 601 } 602 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 603 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 604 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 605 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 606 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 607 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 608 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 609 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 610 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 611 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 612 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 613 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 614 615 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal); 616 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); 617 618 if (Subtarget.has64BitSupport()) { 619 // They also have instructions for converting between i64 and fp. 620 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); 621 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand); 622 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); 623 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand); 624 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 625 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 626 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 627 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 628 // This is just the low 32 bits of a (signed) fp->i64 conversion. 629 // We cannot do this with Promote because i64 is not a legal type. 630 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 631 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 632 633 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) { 634 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 635 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); 636 } 637 } else { 638 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 639 if (Subtarget.hasSPE()) { 640 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal); 641 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 642 } else { 643 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand); 644 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 645 } 646 } 647 648 // With the instructions enabled under FPCVT, we can do everything. 649 if (Subtarget.hasFPCVT()) { 650 if (Subtarget.has64BitSupport()) { 651 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); 652 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); 653 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); 654 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); 655 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 656 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 657 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 658 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 659 } 660 661 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 662 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 663 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); 664 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); 665 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 666 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 667 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 668 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 669 } 670 671 if (Subtarget.use64BitRegs()) { 672 // 64-bit PowerPC implementations can support i64 types directly 673 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 674 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 675 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 676 // 64-bit PowerPC wants to expand i128 shifts itself. 677 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 678 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 679 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 680 } else { 681 // 32-bit PowerPC wants to expand i64 shifts itself. 682 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 683 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 684 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 685 } 686 687 // PowerPC has better expansions for funnel shifts than the generic 688 // TargetLowering::expandFunnelShift. 689 if (Subtarget.has64BitSupport()) { 690 setOperationAction(ISD::FSHL, MVT::i64, Custom); 691 setOperationAction(ISD::FSHR, MVT::i64, Custom); 692 } 693 setOperationAction(ISD::FSHL, MVT::i32, Custom); 694 setOperationAction(ISD::FSHR, MVT::i32, Custom); 695 696 if (Subtarget.hasVSX()) { 697 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); 698 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); 699 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); 700 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); 701 } 702 703 if (Subtarget.hasAltivec()) { 704 for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { 705 setOperationAction(ISD::SADDSAT, VT, Legal); 706 setOperationAction(ISD::SSUBSAT, VT, Legal); 707 setOperationAction(ISD::UADDSAT, VT, Legal); 708 setOperationAction(ISD::USUBSAT, VT, Legal); 709 } 710 // First set operation action for all vector types to expand. Then we 711 // will selectively turn on ones that can be effectively codegen'd. 712 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 713 // add/sub are legal for all supported vector VT's. 714 setOperationAction(ISD::ADD, VT, Legal); 715 setOperationAction(ISD::SUB, VT, Legal); 716 717 // For v2i64, these are only valid with P8Vector. This is corrected after 718 // the loop. 719 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { 720 setOperationAction(ISD::SMAX, VT, Legal); 721 setOperationAction(ISD::SMIN, VT, Legal); 722 setOperationAction(ISD::UMAX, VT, Legal); 723 setOperationAction(ISD::UMIN, VT, Legal); 724 } 725 else { 726 setOperationAction(ISD::SMAX, VT, Expand); 727 setOperationAction(ISD::SMIN, VT, Expand); 728 setOperationAction(ISD::UMAX, VT, Expand); 729 setOperationAction(ISD::UMIN, VT, Expand); 730 } 731 732 if (Subtarget.hasVSX()) { 733 setOperationAction(ISD::FMAXNUM, VT, Legal); 734 setOperationAction(ISD::FMINNUM, VT, Legal); 735 } 736 737 // Vector instructions introduced in P8 738 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 739 setOperationAction(ISD::CTPOP, VT, Legal); 740 setOperationAction(ISD::CTLZ, VT, Legal); 741 } 742 else { 743 setOperationAction(ISD::CTPOP, VT, Expand); 744 setOperationAction(ISD::CTLZ, VT, Expand); 745 } 746 747 // Vector instructions introduced in P9 748 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 749 setOperationAction(ISD::CTTZ, VT, Legal); 750 else 751 setOperationAction(ISD::CTTZ, VT, Expand); 752 753 // We promote all shuffles to v16i8. 754 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 755 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 756 757 // We promote all non-typed operations to v4i32. 758 setOperationAction(ISD::AND , VT, Promote); 759 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 760 setOperationAction(ISD::OR , VT, Promote); 761 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 762 setOperationAction(ISD::XOR , VT, Promote); 763 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 764 setOperationAction(ISD::LOAD , VT, Promote); 765 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 766 setOperationAction(ISD::SELECT, VT, Promote); 767 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 768 setOperationAction(ISD::VSELECT, VT, Legal); 769 setOperationAction(ISD::SELECT_CC, VT, Promote); 770 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 771 setOperationAction(ISD::STORE, VT, Promote); 772 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 773 774 // No other operations are legal. 775 setOperationAction(ISD::MUL , VT, Expand); 776 setOperationAction(ISD::SDIV, VT, Expand); 777 setOperationAction(ISD::SREM, VT, Expand); 778 setOperationAction(ISD::UDIV, VT, Expand); 779 setOperationAction(ISD::UREM, VT, Expand); 780 setOperationAction(ISD::FDIV, VT, Expand); 781 setOperationAction(ISD::FREM, VT, Expand); 782 setOperationAction(ISD::FNEG, VT, Expand); 783 setOperationAction(ISD::FSQRT, VT, Expand); 784 setOperationAction(ISD::FLOG, VT, Expand); 785 setOperationAction(ISD::FLOG10, VT, Expand); 786 setOperationAction(ISD::FLOG2, VT, Expand); 787 setOperationAction(ISD::FEXP, VT, Expand); 788 setOperationAction(ISD::FEXP2, VT, Expand); 789 setOperationAction(ISD::FSIN, VT, Expand); 790 setOperationAction(ISD::FCOS, VT, Expand); 791 setOperationAction(ISD::FABS, VT, Expand); 792 setOperationAction(ISD::FFLOOR, VT, Expand); 793 setOperationAction(ISD::FCEIL, VT, Expand); 794 setOperationAction(ISD::FTRUNC, VT, Expand); 795 setOperationAction(ISD::FRINT, VT, Expand); 796 setOperationAction(ISD::FNEARBYINT, VT, Expand); 797 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 798 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 799 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 800 setOperationAction(ISD::MULHU, VT, Expand); 801 setOperationAction(ISD::MULHS, VT, Expand); 802 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 803 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 804 setOperationAction(ISD::UDIVREM, VT, Expand); 805 setOperationAction(ISD::SDIVREM, VT, Expand); 806 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 807 setOperationAction(ISD::FPOW, VT, Expand); 808 setOperationAction(ISD::BSWAP, VT, Expand); 809 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 810 setOperationAction(ISD::ROTL, VT, Expand); 811 setOperationAction(ISD::ROTR, VT, Expand); 812 813 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 814 setTruncStoreAction(VT, InnerVT, Expand); 815 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 816 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 817 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 818 } 819 } 820 setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand); 821 if (!Subtarget.hasP8Vector()) { 822 setOperationAction(ISD::SMAX, MVT::v2i64, Expand); 823 setOperationAction(ISD::SMIN, MVT::v2i64, Expand); 824 setOperationAction(ISD::UMAX, MVT::v2i64, Expand); 825 setOperationAction(ISD::UMIN, MVT::v2i64, Expand); 826 } 827 828 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 829 // with merges, splats, etc. 830 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 831 832 // Vector truncates to sub-word integer that fit in an Altivec/VSX register 833 // are cheap, so handle them before they get expanded to scalar. 834 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); 835 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); 836 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); 837 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); 838 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); 839 840 setOperationAction(ISD::AND , MVT::v4i32, Legal); 841 setOperationAction(ISD::OR , MVT::v4i32, Legal); 842 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 843 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 844 setOperationAction(ISD::SELECT, MVT::v4i32, 845 Subtarget.useCRBits() ? Legal : Expand); 846 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 847 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); 848 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); 849 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); 850 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal); 851 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 852 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 853 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 854 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 855 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 856 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 857 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 858 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 859 860 // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8. 861 setOperationAction(ISD::ROTL, MVT::v1i128, Custom); 862 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). 863 if (Subtarget.hasAltivec()) 864 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) 865 setOperationAction(ISD::ROTL, VT, Legal); 866 // With hasP8Altivec set, we can lower ISD::ROTL to vrld. 867 if (Subtarget.hasP8Altivec()) 868 setOperationAction(ISD::ROTL, MVT::v2i64, Legal); 869 870 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 871 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 872 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 873 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 874 875 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 876 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 877 878 if (Subtarget.hasVSX()) { 879 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 880 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 881 } 882 883 if (Subtarget.hasP8Altivec()) 884 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 885 else 886 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 887 888 if (Subtarget.isISA3_1()) { 889 setOperationAction(ISD::MUL, MVT::v2i64, Legal); 890 setOperationAction(ISD::MULHS, MVT::v2i64, Legal); 891 setOperationAction(ISD::MULHU, MVT::v2i64, Legal); 892 setOperationAction(ISD::MULHS, MVT::v4i32, Legal); 893 setOperationAction(ISD::MULHU, MVT::v4i32, Legal); 894 setOperationAction(ISD::UDIV, MVT::v2i64, Legal); 895 setOperationAction(ISD::SDIV, MVT::v2i64, Legal); 896 setOperationAction(ISD::UDIV, MVT::v4i32, Legal); 897 setOperationAction(ISD::SDIV, MVT::v4i32, Legal); 898 setOperationAction(ISD::UREM, MVT::v2i64, Legal); 899 setOperationAction(ISD::SREM, MVT::v2i64, Legal); 900 setOperationAction(ISD::UREM, MVT::v4i32, Legal); 901 setOperationAction(ISD::SREM, MVT::v4i32, Legal); 902 setOperationAction(ISD::UREM, MVT::v1i128, Legal); 903 setOperationAction(ISD::SREM, MVT::v1i128, Legal); 904 setOperationAction(ISD::UDIV, MVT::v1i128, Legal); 905 setOperationAction(ISD::SDIV, MVT::v1i128, Legal); 906 setOperationAction(ISD::ROTL, MVT::v1i128, Legal); 907 } 908 909 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 910 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 911 912 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 913 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 914 915 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 916 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 917 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 918 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 919 920 // Altivec does not contain unordered floating-point compare instructions 921 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 922 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 923 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 924 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 925 926 if (Subtarget.hasVSX()) { 927 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 928 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 929 if (Subtarget.hasP8Vector()) { 930 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 931 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 932 } 933 if (Subtarget.hasDirectMove() && isPPC64) { 934 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 935 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 936 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 937 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 938 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 939 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 940 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 941 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 942 } 943 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 944 945 // The nearbyint variants are not allowed to raise the inexact exception 946 // so we can only code-gen them with unsafe math. 947 if (TM.Options.UnsafeFPMath) { 948 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 949 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 950 } 951 952 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 953 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 954 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 955 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 956 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 957 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 958 setOperationAction(ISD::FROUND, MVT::f64, Legal); 959 setOperationAction(ISD::FRINT, MVT::f64, Legal); 960 961 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 962 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 963 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 964 setOperationAction(ISD::FROUND, MVT::f32, Legal); 965 setOperationAction(ISD::FRINT, MVT::f32, Legal); 966 967 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 968 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 969 970 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 971 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 972 973 // Share the Altivec comparison restrictions. 974 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 975 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 976 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 977 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 978 979 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 980 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 981 982 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 983 984 if (Subtarget.hasP8Vector()) 985 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 986 987 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 988 989 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 990 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 991 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 992 993 if (Subtarget.hasP8Altivec()) { 994 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 995 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 996 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 997 998 // 128 bit shifts can be accomplished via 3 instructions for SHL and 999 // SRL, but not for SRA because of the instructions available: 1000 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 1001 // doing 1002 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 1003 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 1004 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 1005 1006 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 1007 } 1008 else { 1009 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 1010 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 1011 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 1012 1013 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 1014 1015 // VSX v2i64 only supports non-arithmetic operations. 1016 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 1017 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 1018 } 1019 1020 if (Subtarget.isISA3_1()) 1021 setOperationAction(ISD::SETCC, MVT::v1i128, Legal); 1022 else 1023 setOperationAction(ISD::SETCC, MVT::v1i128, Expand); 1024 1025 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 1026 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 1027 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 1028 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 1029 1030 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 1031 1032 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal); 1033 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal); 1034 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); 1035 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); 1036 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 1037 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 1038 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 1039 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 1040 1041 // Custom handling for partial vectors of integers converted to 1042 // floating point. We already have optimal handling for v2i32 through 1043 // the DAG combine, so those aren't necessary. 1044 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom); 1045 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom); 1046 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom); 1047 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom); 1048 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom); 1049 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom); 1050 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom); 1051 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom); 1052 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); 1053 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 1054 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); 1055 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 1056 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); 1057 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); 1058 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); 1059 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 1060 1061 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 1062 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 1063 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 1064 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 1065 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 1066 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); 1067 1068 if (Subtarget.hasDirectMove()) 1069 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 1070 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 1071 1072 // Handle constrained floating-point operations of vector. 1073 // The predictor is `hasVSX` because altivec instruction has 1074 // no exception but VSX vector instruction has. 1075 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); 1076 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); 1077 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); 1078 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); 1079 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal); 1080 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); 1081 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal); 1082 setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal); 1083 setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal); 1084 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); 1085 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); 1086 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); 1087 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); 1088 1089 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); 1090 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); 1091 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); 1092 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); 1093 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal); 1094 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); 1095 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal); 1096 setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal); 1097 setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal); 1098 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); 1099 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); 1100 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); 1101 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); 1102 1103 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 1104 } 1105 1106 if (Subtarget.hasP8Altivec()) { 1107 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 1108 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 1109 } 1110 1111 if (Subtarget.hasP9Vector()) { 1112 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 1113 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 1114 1115 // 128 bit shifts can be accomplished via 3 instructions for SHL and 1116 // SRL, but not for SRA because of the instructions available: 1117 // VS{RL} and VS{RL}O. 1118 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 1119 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 1120 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 1121 1122 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 1123 setOperationAction(ISD::FADD, MVT::f128, Legal); 1124 setOperationAction(ISD::FSUB, MVT::f128, Legal); 1125 setOperationAction(ISD::FDIV, MVT::f128, Legal); 1126 setOperationAction(ISD::FMUL, MVT::f128, Legal); 1127 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 1128 // No extending loads to f128 on PPC. 1129 for (MVT FPT : MVT::fp_valuetypes()) 1130 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 1131 setOperationAction(ISD::FMA, MVT::f128, Legal); 1132 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 1133 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 1134 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 1135 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 1136 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 1137 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 1138 1139 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 1140 setOperationAction(ISD::FRINT, MVT::f128, Legal); 1141 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 1142 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 1143 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 1144 setOperationAction(ISD::FROUND, MVT::f128, Legal); 1145 1146 setOperationAction(ISD::SELECT, MVT::f128, Expand); 1147 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 1148 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 1149 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 1150 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 1151 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 1152 // No implementation for these ops for PowerPC. 1153 setOperationAction(ISD::FSIN, MVT::f128, Expand); 1154 setOperationAction(ISD::FCOS, MVT::f128, Expand); 1155 setOperationAction(ISD::FPOW, MVT::f128, Expand); 1156 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 1157 setOperationAction(ISD::FREM, MVT::f128, Expand); 1158 1159 // Handle constrained floating-point operations of fp128 1160 setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal); 1161 setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal); 1162 setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal); 1163 setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal); 1164 setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal); 1165 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal); 1166 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal); 1167 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); 1168 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); 1169 setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal); 1170 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal); 1171 setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal); 1172 setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal); 1173 setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal); 1174 setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal); 1175 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 1176 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); 1177 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal); 1178 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal); 1179 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal); 1180 } else if (Subtarget.hasAltivec() && EnableSoftFP128) { 1181 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 1182 1183 for (MVT FPT : MVT::fp_valuetypes()) 1184 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 1185 1186 setOperationAction(ISD::LOAD, MVT::f128, Promote); 1187 setOperationAction(ISD::STORE, MVT::f128, Promote); 1188 1189 AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32); 1190 AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32); 1191 1192 // Set FADD/FSUB as libcall to avoid the legalizer to expand the 1193 // fp_to_uint and int_to_fp. 1194 setOperationAction(ISD::FADD, MVT::f128, LibCall); 1195 setOperationAction(ISD::FSUB, MVT::f128, LibCall); 1196 1197 setOperationAction(ISD::FMUL, MVT::f128, Expand); 1198 setOperationAction(ISD::FDIV, MVT::f128, Expand); 1199 setOperationAction(ISD::FNEG, MVT::f128, Expand); 1200 setOperationAction(ISD::FABS, MVT::f128, Expand); 1201 setOperationAction(ISD::FSIN, MVT::f128, Expand); 1202 setOperationAction(ISD::FCOS, MVT::f128, Expand); 1203 setOperationAction(ISD::FPOW, MVT::f128, Expand); 1204 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 1205 setOperationAction(ISD::FREM, MVT::f128, Expand); 1206 setOperationAction(ISD::FSQRT, MVT::f128, Expand); 1207 setOperationAction(ISD::FMA, MVT::f128, Expand); 1208 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 1209 1210 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 1211 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 1212 1213 // Expand the fp_extend if the target type is fp128. 1214 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); 1215 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand); 1216 1217 // Expand the fp_round if the source type is fp128. 1218 for (MVT VT : {MVT::f32, MVT::f64}) { 1219 setOperationAction(ISD::FP_ROUND, VT, Custom); 1220 setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom); 1221 } 1222 } 1223 1224 if (Subtarget.hasP9Altivec()) { 1225 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 1226 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 1227 1228 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); 1229 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); 1230 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); 1231 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); 1232 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); 1233 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 1234 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 1235 } 1236 } 1237 1238 if (Subtarget.pairedVectorMemops()) { 1239 addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass); 1240 setOperationAction(ISD::LOAD, MVT::v256i1, Custom); 1241 setOperationAction(ISD::STORE, MVT::v256i1, Custom); 1242 } 1243 if (Subtarget.hasMMA()) { 1244 addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass); 1245 setOperationAction(ISD::LOAD, MVT::v512i1, Custom); 1246 setOperationAction(ISD::STORE, MVT::v512i1, Custom); 1247 setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom); 1248 } 1249 1250 if (Subtarget.has64BitSupport()) 1251 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1252 1253 if (Subtarget.isISA3_1()) 1254 setOperationAction(ISD::SRA, MVT::v1i128, Legal); 1255 1256 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1257 1258 if (!isPPC64) { 1259 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1260 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1261 } 1262 1263 setBooleanContents(ZeroOrOneBooleanContent); 1264 1265 if (Subtarget.hasAltivec()) { 1266 // Altivec instructions set fields to all zeros or all ones. 1267 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1268 } 1269 1270 if (!isPPC64) { 1271 // These libcalls are not available in 32-bit. 1272 setLibcallName(RTLIB::SHL_I128, nullptr); 1273 setLibcallName(RTLIB::SRL_I128, nullptr); 1274 setLibcallName(RTLIB::SRA_I128, nullptr); 1275 } 1276 1277 if (!isPPC64) 1278 setMaxAtomicSizeInBitsSupported(32); 1279 1280 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1281 1282 // We have target-specific dag combine patterns for the following nodes: 1283 setTargetDAGCombine(ISD::ADD); 1284 setTargetDAGCombine(ISD::SHL); 1285 setTargetDAGCombine(ISD::SRA); 1286 setTargetDAGCombine(ISD::SRL); 1287 setTargetDAGCombine(ISD::MUL); 1288 setTargetDAGCombine(ISD::FMA); 1289 setTargetDAGCombine(ISD::SINT_TO_FP); 1290 setTargetDAGCombine(ISD::BUILD_VECTOR); 1291 if (Subtarget.hasFPCVT()) 1292 setTargetDAGCombine(ISD::UINT_TO_FP); 1293 setTargetDAGCombine(ISD::LOAD); 1294 setTargetDAGCombine(ISD::STORE); 1295 setTargetDAGCombine(ISD::BR_CC); 1296 if (Subtarget.useCRBits()) 1297 setTargetDAGCombine(ISD::BRCOND); 1298 setTargetDAGCombine(ISD::BSWAP); 1299 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1300 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1301 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1302 1303 setTargetDAGCombine(ISD::SIGN_EXTEND); 1304 setTargetDAGCombine(ISD::ZERO_EXTEND); 1305 setTargetDAGCombine(ISD::ANY_EXTEND); 1306 1307 setTargetDAGCombine(ISD::TRUNCATE); 1308 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1309 1310 1311 if (Subtarget.useCRBits()) { 1312 setTargetDAGCombine(ISD::TRUNCATE); 1313 setTargetDAGCombine(ISD::SETCC); 1314 setTargetDAGCombine(ISD::SELECT_CC); 1315 } 1316 1317 if (Subtarget.hasP9Altivec()) { 1318 setTargetDAGCombine(ISD::ABS); 1319 setTargetDAGCombine(ISD::VSELECT); 1320 } 1321 1322 setLibcallName(RTLIB::LOG_F128, "logf128"); 1323 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1324 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1325 setLibcallName(RTLIB::EXP_F128, "expf128"); 1326 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1327 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1328 setLibcallName(RTLIB::COS_F128, "cosf128"); 1329 setLibcallName(RTLIB::POW_F128, "powf128"); 1330 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1331 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1332 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1333 setLibcallName(RTLIB::SQRT_F128, "sqrtf128"); 1334 setLibcallName(RTLIB::CEIL_F128, "ceilf128"); 1335 setLibcallName(RTLIB::FLOOR_F128, "floorf128"); 1336 setLibcallName(RTLIB::TRUNC_F128, "truncf128"); 1337 setLibcallName(RTLIB::ROUND_F128, "roundf128"); 1338 setLibcallName(RTLIB::LROUND_F128, "lroundf128"); 1339 setLibcallName(RTLIB::LLROUND_F128, "llroundf128"); 1340 setLibcallName(RTLIB::RINT_F128, "rintf128"); 1341 setLibcallName(RTLIB::LRINT_F128, "lrintf128"); 1342 setLibcallName(RTLIB::LLRINT_F128, "llrintf128"); 1343 setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128"); 1344 setLibcallName(RTLIB::FMA_F128, "fmaf128"); 1345 1346 // With 32 condition bits, we don't need to sink (and duplicate) compares 1347 // aggressively in CodeGenPrep. 1348 if (Subtarget.useCRBits()) { 1349 setHasMultipleConditionRegisters(); 1350 setJumpIsExpensive(); 1351 } 1352 1353 setMinFunctionAlignment(Align(4)); 1354 1355 switch (Subtarget.getCPUDirective()) { 1356 default: break; 1357 case PPC::DIR_970: 1358 case PPC::DIR_A2: 1359 case PPC::DIR_E500: 1360 case PPC::DIR_E500mc: 1361 case PPC::DIR_E5500: 1362 case PPC::DIR_PWR4: 1363 case PPC::DIR_PWR5: 1364 case PPC::DIR_PWR5X: 1365 case PPC::DIR_PWR6: 1366 case PPC::DIR_PWR6X: 1367 case PPC::DIR_PWR7: 1368 case PPC::DIR_PWR8: 1369 case PPC::DIR_PWR9: 1370 case PPC::DIR_PWR10: 1371 case PPC::DIR_PWR_FUTURE: 1372 setPrefLoopAlignment(Align(16)); 1373 setPrefFunctionAlignment(Align(16)); 1374 break; 1375 } 1376 1377 if (Subtarget.enableMachineScheduler()) 1378 setSchedulingPreference(Sched::Source); 1379 else 1380 setSchedulingPreference(Sched::Hybrid); 1381 1382 computeRegisterProperties(STI.getRegisterInfo()); 1383 1384 // The Freescale cores do better with aggressive inlining of memcpy and 1385 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1386 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc || 1387 Subtarget.getCPUDirective() == PPC::DIR_E5500) { 1388 MaxStoresPerMemset = 32; 1389 MaxStoresPerMemsetOptSize = 16; 1390 MaxStoresPerMemcpy = 32; 1391 MaxStoresPerMemcpyOptSize = 8; 1392 MaxStoresPerMemmove = 32; 1393 MaxStoresPerMemmoveOptSize = 8; 1394 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) { 1395 // The A2 also benefits from (very) aggressive inlining of memcpy and 1396 // friends. The overhead of a the function call, even when warm, can be 1397 // over one hundred cycles. 1398 MaxStoresPerMemset = 128; 1399 MaxStoresPerMemcpy = 128; 1400 MaxStoresPerMemmove = 128; 1401 MaxLoadsPerMemcmp = 128; 1402 } else { 1403 MaxLoadsPerMemcmp = 8; 1404 MaxLoadsPerMemcmpOptSize = 4; 1405 } 1406 1407 IsStrictFPEnabled = true; 1408 1409 // Let the subtarget (CPU) decide if a predictable select is more expensive 1410 // than the corresponding branch. This information is used in CGP to decide 1411 // when to convert selects into branches. 1412 PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive(); 1413 } 1414 1415 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1416 /// the desired ByVal argument alignment. 1417 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) { 1418 if (MaxAlign == MaxMaxAlign) 1419 return; 1420 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1421 if (MaxMaxAlign >= 32 && 1422 VTy->getPrimitiveSizeInBits().getFixedSize() >= 256) 1423 MaxAlign = Align(32); 1424 else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 && 1425 MaxAlign < 16) 1426 MaxAlign = Align(16); 1427 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1428 Align EltAlign; 1429 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1430 if (EltAlign > MaxAlign) 1431 MaxAlign = EltAlign; 1432 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1433 for (auto *EltTy : STy->elements()) { 1434 Align EltAlign; 1435 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1436 if (EltAlign > MaxAlign) 1437 MaxAlign = EltAlign; 1438 if (MaxAlign == MaxMaxAlign) 1439 break; 1440 } 1441 } 1442 } 1443 1444 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1445 /// function arguments in the caller parameter area. 1446 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1447 const DataLayout &DL) const { 1448 // 16byte and wider vectors are passed on 16byte boundary. 1449 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1450 Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4); 1451 if (Subtarget.hasAltivec()) 1452 getMaxByValAlign(Ty, Alignment, Align(16)); 1453 return Alignment.value(); 1454 } 1455 1456 bool PPCTargetLowering::useSoftFloat() const { 1457 return Subtarget.useSoftFloat(); 1458 } 1459 1460 bool PPCTargetLowering::hasSPE() const { 1461 return Subtarget.hasSPE(); 1462 } 1463 1464 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { 1465 return VT.isScalarInteger(); 1466 } 1467 1468 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1469 switch ((PPCISD::NodeType)Opcode) { 1470 case PPCISD::FIRST_NUMBER: break; 1471 case PPCISD::FSEL: return "PPCISD::FSEL"; 1472 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP"; 1473 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP"; 1474 case PPCISD::FCFID: return "PPCISD::FCFID"; 1475 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1476 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1477 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1478 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1479 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1480 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1481 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1482 case PPCISD::FP_TO_UINT_IN_VSR: 1483 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1484 case PPCISD::FP_TO_SINT_IN_VSR: 1485 return "PPCISD::FP_TO_SINT_IN_VSR"; 1486 case PPCISD::FRE: return "PPCISD::FRE"; 1487 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1488 case PPCISD::FTSQRT: 1489 return "PPCISD::FTSQRT"; 1490 case PPCISD::FSQRT: 1491 return "PPCISD::FSQRT"; 1492 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1493 case PPCISD::VPERM: return "PPCISD::VPERM"; 1494 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1495 case PPCISD::XXSPLTI_SP_TO_DP: 1496 return "PPCISD::XXSPLTI_SP_TO_DP"; 1497 case PPCISD::XXSPLTI32DX: 1498 return "PPCISD::XXSPLTI32DX"; 1499 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1500 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1501 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1502 case PPCISD::CMPB: return "PPCISD::CMPB"; 1503 case PPCISD::Hi: return "PPCISD::Hi"; 1504 case PPCISD::Lo: return "PPCISD::Lo"; 1505 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1506 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1507 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1508 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1509 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1510 case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA"; 1511 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1512 case PPCISD::SRL: return "PPCISD::SRL"; 1513 case PPCISD::SRA: return "PPCISD::SRA"; 1514 case PPCISD::SHL: return "PPCISD::SHL"; 1515 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1516 case PPCISD::CALL: return "PPCISD::CALL"; 1517 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1518 case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC"; 1519 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1520 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1521 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1522 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1523 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1524 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1525 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1526 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1527 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1528 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1529 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1530 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1531 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1532 case PPCISD::SCALAR_TO_VECTOR_PERMUTED: 1533 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED"; 1534 case PPCISD::ANDI_rec_1_EQ_BIT: 1535 return "PPCISD::ANDI_rec_1_EQ_BIT"; 1536 case PPCISD::ANDI_rec_1_GT_BIT: 1537 return "PPCISD::ANDI_rec_1_GT_BIT"; 1538 case PPCISD::VCMP: return "PPCISD::VCMP"; 1539 case PPCISD::VCMP_rec: return "PPCISD::VCMP_rec"; 1540 case PPCISD::LBRX: return "PPCISD::LBRX"; 1541 case PPCISD::STBRX: return "PPCISD::STBRX"; 1542 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1543 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1544 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1545 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1546 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1547 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1548 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1549 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE"; 1550 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE"; 1551 case PPCISD::ST_VSR_SCAL_INT: 1552 return "PPCISD::ST_VSR_SCAL_INT"; 1553 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1554 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1555 case PPCISD::BDZ: return "PPCISD::BDZ"; 1556 case PPCISD::MFFS: return "PPCISD::MFFS"; 1557 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1558 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1559 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1560 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1561 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1562 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1563 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1564 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1565 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1566 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1567 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1568 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1569 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1570 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1571 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1572 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1573 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1574 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1575 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1576 case PPCISD::PADDI_DTPREL: 1577 return "PPCISD::PADDI_DTPREL"; 1578 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1579 case PPCISD::SC: return "PPCISD::SC"; 1580 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1581 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1582 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1583 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1584 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1585 case PPCISD::VABSD: return "PPCISD::VABSD"; 1586 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1587 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64"; 1588 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE"; 1589 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; 1590 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; 1591 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; 1592 case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR"; 1593 case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR: 1594 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR"; 1595 case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR: 1596 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR"; 1597 case PPCISD::ACC_BUILD: return "PPCISD::ACC_BUILD"; 1598 case PPCISD::PAIR_BUILD: return "PPCISD::PAIR_BUILD"; 1599 case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG"; 1600 case PPCISD::XXMFACC: return "PPCISD::XXMFACC"; 1601 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; 1602 case PPCISD::FNMSUB: return "PPCISD::FNMSUB"; 1603 case PPCISD::STRICT_FADDRTZ: 1604 return "PPCISD::STRICT_FADDRTZ"; 1605 case PPCISD::STRICT_FCTIDZ: 1606 return "PPCISD::STRICT_FCTIDZ"; 1607 case PPCISD::STRICT_FCTIWZ: 1608 return "PPCISD::STRICT_FCTIWZ"; 1609 case PPCISD::STRICT_FCTIDUZ: 1610 return "PPCISD::STRICT_FCTIDUZ"; 1611 case PPCISD::STRICT_FCTIWUZ: 1612 return "PPCISD::STRICT_FCTIWUZ"; 1613 case PPCISD::STRICT_FCFID: 1614 return "PPCISD::STRICT_FCFID"; 1615 case PPCISD::STRICT_FCFIDU: 1616 return "PPCISD::STRICT_FCFIDU"; 1617 case PPCISD::STRICT_FCFIDS: 1618 return "PPCISD::STRICT_FCFIDS"; 1619 case PPCISD::STRICT_FCFIDUS: 1620 return "PPCISD::STRICT_FCFIDUS"; 1621 case PPCISD::LXVRZX: return "PPCISD::LXVRZX"; 1622 } 1623 return nullptr; 1624 } 1625 1626 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1627 EVT VT) const { 1628 if (!VT.isVector()) 1629 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1630 1631 return VT.changeVectorElementTypeToInteger(); 1632 } 1633 1634 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1635 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1636 return true; 1637 } 1638 1639 //===----------------------------------------------------------------------===// 1640 // Node matching predicates, for use by the tblgen matching code. 1641 //===----------------------------------------------------------------------===// 1642 1643 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1644 static bool isFloatingPointZero(SDValue Op) { 1645 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1646 return CFP->getValueAPF().isZero(); 1647 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1648 // Maybe this has already been legalized into the constant pool? 1649 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1650 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1651 return CFP->getValueAPF().isZero(); 1652 } 1653 return false; 1654 } 1655 1656 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1657 /// true if Op is undef or if it matches the specified value. 1658 static bool isConstantOrUndef(int Op, int Val) { 1659 return Op < 0 || Op == Val; 1660 } 1661 1662 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1663 /// VPKUHUM instruction. 1664 /// The ShuffleKind distinguishes between big-endian operations with 1665 /// two different inputs (0), either-endian operations with two identical 1666 /// inputs (1), and little-endian operations with two different inputs (2). 1667 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1668 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1669 SelectionDAG &DAG) { 1670 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1671 if (ShuffleKind == 0) { 1672 if (IsLE) 1673 return false; 1674 for (unsigned i = 0; i != 16; ++i) 1675 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1676 return false; 1677 } else if (ShuffleKind == 2) { 1678 if (!IsLE) 1679 return false; 1680 for (unsigned i = 0; i != 16; ++i) 1681 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1682 return false; 1683 } else if (ShuffleKind == 1) { 1684 unsigned j = IsLE ? 0 : 1; 1685 for (unsigned i = 0; i != 8; ++i) 1686 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1687 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1688 return false; 1689 } 1690 return true; 1691 } 1692 1693 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1694 /// VPKUWUM instruction. 1695 /// The ShuffleKind distinguishes between big-endian operations with 1696 /// two different inputs (0), either-endian operations with two identical 1697 /// inputs (1), and little-endian operations with two different inputs (2). 1698 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1699 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1700 SelectionDAG &DAG) { 1701 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1702 if (ShuffleKind == 0) { 1703 if (IsLE) 1704 return false; 1705 for (unsigned i = 0; i != 16; i += 2) 1706 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1707 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1708 return false; 1709 } else if (ShuffleKind == 2) { 1710 if (!IsLE) 1711 return false; 1712 for (unsigned i = 0; i != 16; i += 2) 1713 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1714 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1715 return false; 1716 } else if (ShuffleKind == 1) { 1717 unsigned j = IsLE ? 0 : 2; 1718 for (unsigned i = 0; i != 8; i += 2) 1719 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1720 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1721 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1722 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1723 return false; 1724 } 1725 return true; 1726 } 1727 1728 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1729 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1730 /// current subtarget. 1731 /// 1732 /// The ShuffleKind distinguishes between big-endian operations with 1733 /// two different inputs (0), either-endian operations with two identical 1734 /// inputs (1), and little-endian operations with two different inputs (2). 1735 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1736 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1737 SelectionDAG &DAG) { 1738 const PPCSubtarget& Subtarget = 1739 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1740 if (!Subtarget.hasP8Vector()) 1741 return false; 1742 1743 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1744 if (ShuffleKind == 0) { 1745 if (IsLE) 1746 return false; 1747 for (unsigned i = 0; i != 16; i += 4) 1748 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1749 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1750 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1751 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1752 return false; 1753 } else if (ShuffleKind == 2) { 1754 if (!IsLE) 1755 return false; 1756 for (unsigned i = 0; i != 16; i += 4) 1757 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1758 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1759 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1760 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1761 return false; 1762 } else if (ShuffleKind == 1) { 1763 unsigned j = IsLE ? 0 : 4; 1764 for (unsigned i = 0; i != 8; i += 4) 1765 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1766 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1767 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1768 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1769 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1770 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1771 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1772 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1773 return false; 1774 } 1775 return true; 1776 } 1777 1778 /// isVMerge - Common function, used to match vmrg* shuffles. 1779 /// 1780 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1781 unsigned LHSStart, unsigned RHSStart) { 1782 if (N->getValueType(0) != MVT::v16i8) 1783 return false; 1784 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1785 "Unsupported merge size!"); 1786 1787 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1788 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1789 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1790 LHSStart+j+i*UnitSize) || 1791 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1792 RHSStart+j+i*UnitSize)) 1793 return false; 1794 } 1795 return true; 1796 } 1797 1798 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1799 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1800 /// The ShuffleKind distinguishes between big-endian merges with two 1801 /// different inputs (0), either-endian merges with two identical inputs (1), 1802 /// and little-endian merges with two different inputs (2). For the latter, 1803 /// the input operands are swapped (see PPCInstrAltivec.td). 1804 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1805 unsigned ShuffleKind, SelectionDAG &DAG) { 1806 if (DAG.getDataLayout().isLittleEndian()) { 1807 if (ShuffleKind == 1) // unary 1808 return isVMerge(N, UnitSize, 0, 0); 1809 else if (ShuffleKind == 2) // swapped 1810 return isVMerge(N, UnitSize, 0, 16); 1811 else 1812 return false; 1813 } else { 1814 if (ShuffleKind == 1) // unary 1815 return isVMerge(N, UnitSize, 8, 8); 1816 else if (ShuffleKind == 0) // normal 1817 return isVMerge(N, UnitSize, 8, 24); 1818 else 1819 return false; 1820 } 1821 } 1822 1823 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1824 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1825 /// The ShuffleKind distinguishes between big-endian merges with two 1826 /// different inputs (0), either-endian merges with two identical inputs (1), 1827 /// and little-endian merges with two different inputs (2). For the latter, 1828 /// the input operands are swapped (see PPCInstrAltivec.td). 1829 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1830 unsigned ShuffleKind, SelectionDAG &DAG) { 1831 if (DAG.getDataLayout().isLittleEndian()) { 1832 if (ShuffleKind == 1) // unary 1833 return isVMerge(N, UnitSize, 8, 8); 1834 else if (ShuffleKind == 2) // swapped 1835 return isVMerge(N, UnitSize, 8, 24); 1836 else 1837 return false; 1838 } else { 1839 if (ShuffleKind == 1) // unary 1840 return isVMerge(N, UnitSize, 0, 0); 1841 else if (ShuffleKind == 0) // normal 1842 return isVMerge(N, UnitSize, 0, 16); 1843 else 1844 return false; 1845 } 1846 } 1847 1848 /** 1849 * Common function used to match vmrgew and vmrgow shuffles 1850 * 1851 * The indexOffset determines whether to look for even or odd words in 1852 * the shuffle mask. This is based on the of the endianness of the target 1853 * machine. 1854 * - Little Endian: 1855 * - Use offset of 0 to check for odd elements 1856 * - Use offset of 4 to check for even elements 1857 * - Big Endian: 1858 * - Use offset of 0 to check for even elements 1859 * - Use offset of 4 to check for odd elements 1860 * A detailed description of the vector element ordering for little endian and 1861 * big endian can be found at 1862 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1863 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1864 * compiler differences mean to you 1865 * 1866 * The mask to the shuffle vector instruction specifies the indices of the 1867 * elements from the two input vectors to place in the result. The elements are 1868 * numbered in array-access order, starting with the first vector. These vectors 1869 * are always of type v16i8, thus each vector will contain 16 elements of size 1870 * 8. More info on the shuffle vector can be found in the 1871 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1872 * Language Reference. 1873 * 1874 * The RHSStartValue indicates whether the same input vectors are used (unary) 1875 * or two different input vectors are used, based on the following: 1876 * - If the instruction uses the same vector for both inputs, the range of the 1877 * indices will be 0 to 15. In this case, the RHSStart value passed should 1878 * be 0. 1879 * - If the instruction has two different vectors then the range of the 1880 * indices will be 0 to 31. In this case, the RHSStart value passed should 1881 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1882 * to 31 specify elements in the second vector). 1883 * 1884 * \param[in] N The shuffle vector SD Node to analyze 1885 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1886 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1887 * vector to the shuffle_vector instruction 1888 * \return true iff this shuffle vector represents an even or odd word merge 1889 */ 1890 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1891 unsigned RHSStartValue) { 1892 if (N->getValueType(0) != MVT::v16i8) 1893 return false; 1894 1895 for (unsigned i = 0; i < 2; ++i) 1896 for (unsigned j = 0; j < 4; ++j) 1897 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1898 i*RHSStartValue+j+IndexOffset) || 1899 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1900 i*RHSStartValue+j+IndexOffset+8)) 1901 return false; 1902 return true; 1903 } 1904 1905 /** 1906 * Determine if the specified shuffle mask is suitable for the vmrgew or 1907 * vmrgow instructions. 1908 * 1909 * \param[in] N The shuffle vector SD Node to analyze 1910 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1911 * \param[in] ShuffleKind Identify the type of merge: 1912 * - 0 = big-endian merge with two different inputs; 1913 * - 1 = either-endian merge with two identical inputs; 1914 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1915 * little-endian merges). 1916 * \param[in] DAG The current SelectionDAG 1917 * \return true iff this shuffle mask 1918 */ 1919 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1920 unsigned ShuffleKind, SelectionDAG &DAG) { 1921 if (DAG.getDataLayout().isLittleEndian()) { 1922 unsigned indexOffset = CheckEven ? 4 : 0; 1923 if (ShuffleKind == 1) // Unary 1924 return isVMerge(N, indexOffset, 0); 1925 else if (ShuffleKind == 2) // swapped 1926 return isVMerge(N, indexOffset, 16); 1927 else 1928 return false; 1929 } 1930 else { 1931 unsigned indexOffset = CheckEven ? 0 : 4; 1932 if (ShuffleKind == 1) // Unary 1933 return isVMerge(N, indexOffset, 0); 1934 else if (ShuffleKind == 0) // Normal 1935 return isVMerge(N, indexOffset, 16); 1936 else 1937 return false; 1938 } 1939 return false; 1940 } 1941 1942 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1943 /// amount, otherwise return -1. 1944 /// The ShuffleKind distinguishes between big-endian operations with two 1945 /// different inputs (0), either-endian operations with two identical inputs 1946 /// (1), and little-endian operations with two different inputs (2). For the 1947 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1948 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1949 SelectionDAG &DAG) { 1950 if (N->getValueType(0) != MVT::v16i8) 1951 return -1; 1952 1953 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1954 1955 // Find the first non-undef value in the shuffle mask. 1956 unsigned i; 1957 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1958 /*search*/; 1959 1960 if (i == 16) return -1; // all undef. 1961 1962 // Otherwise, check to see if the rest of the elements are consecutively 1963 // numbered from this value. 1964 unsigned ShiftAmt = SVOp->getMaskElt(i); 1965 if (ShiftAmt < i) return -1; 1966 1967 ShiftAmt -= i; 1968 bool isLE = DAG.getDataLayout().isLittleEndian(); 1969 1970 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1971 // Check the rest of the elements to see if they are consecutive. 1972 for (++i; i != 16; ++i) 1973 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1974 return -1; 1975 } else if (ShuffleKind == 1) { 1976 // Check the rest of the elements to see if they are consecutive. 1977 for (++i; i != 16; ++i) 1978 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1979 return -1; 1980 } else 1981 return -1; 1982 1983 if (isLE) 1984 ShiftAmt = 16 - ShiftAmt; 1985 1986 return ShiftAmt; 1987 } 1988 1989 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1990 /// specifies a splat of a single element that is suitable for input to 1991 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.). 1992 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1993 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && 1994 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes"); 1995 1996 // The consecutive indices need to specify an element, not part of two 1997 // different elements. So abandon ship early if this isn't the case. 1998 if (N->getMaskElt(0) % EltSize != 0) 1999 return false; 2000 2001 // This is a splat operation if each element of the permute is the same, and 2002 // if the value doesn't reference the second vector. 2003 unsigned ElementBase = N->getMaskElt(0); 2004 2005 // FIXME: Handle UNDEF elements too! 2006 if (ElementBase >= 16) 2007 return false; 2008 2009 // Check that the indices are consecutive, in the case of a multi-byte element 2010 // splatted with a v16i8 mask. 2011 for (unsigned i = 1; i != EltSize; ++i) 2012 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 2013 return false; 2014 2015 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 2016 if (N->getMaskElt(i) < 0) continue; 2017 for (unsigned j = 0; j != EltSize; ++j) 2018 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 2019 return false; 2020 } 2021 return true; 2022 } 2023 2024 /// Check that the mask is shuffling N byte elements. Within each N byte 2025 /// element of the mask, the indices could be either in increasing or 2026 /// decreasing order as long as they are consecutive. 2027 /// \param[in] N the shuffle vector SD Node to analyze 2028 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 2029 /// Word/DoubleWord/QuadWord). 2030 /// \param[in] StepLen the delta indices number among the N byte element, if 2031 /// the mask is in increasing/decreasing order then it is 1/-1. 2032 /// \return true iff the mask is shuffling N byte elements. 2033 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 2034 int StepLen) { 2035 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 2036 "Unexpected element width."); 2037 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 2038 2039 unsigned NumOfElem = 16 / Width; 2040 unsigned MaskVal[16]; // Width is never greater than 16 2041 for (unsigned i = 0; i < NumOfElem; ++i) { 2042 MaskVal[0] = N->getMaskElt(i * Width); 2043 if ((StepLen == 1) && (MaskVal[0] % Width)) { 2044 return false; 2045 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 2046 return false; 2047 } 2048 2049 for (unsigned int j = 1; j < Width; ++j) { 2050 MaskVal[j] = N->getMaskElt(i * Width + j); 2051 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 2052 return false; 2053 } 2054 } 2055 } 2056 2057 return true; 2058 } 2059 2060 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 2061 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 2062 if (!isNByteElemShuffleMask(N, 4, 1)) 2063 return false; 2064 2065 // Now we look at mask elements 0,4,8,12 2066 unsigned M0 = N->getMaskElt(0) / 4; 2067 unsigned M1 = N->getMaskElt(4) / 4; 2068 unsigned M2 = N->getMaskElt(8) / 4; 2069 unsigned M3 = N->getMaskElt(12) / 4; 2070 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 2071 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 2072 2073 // Below, let H and L be arbitrary elements of the shuffle mask 2074 // where H is in the range [4,7] and L is in the range [0,3]. 2075 // H, 1, 2, 3 or L, 5, 6, 7 2076 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 2077 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 2078 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 2079 InsertAtByte = IsLE ? 12 : 0; 2080 Swap = M0 < 4; 2081 return true; 2082 } 2083 // 0, H, 2, 3 or 4, L, 6, 7 2084 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 2085 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 2086 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 2087 InsertAtByte = IsLE ? 8 : 4; 2088 Swap = M1 < 4; 2089 return true; 2090 } 2091 // 0, 1, H, 3 or 4, 5, L, 7 2092 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 2093 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 2094 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 2095 InsertAtByte = IsLE ? 4 : 8; 2096 Swap = M2 < 4; 2097 return true; 2098 } 2099 // 0, 1, 2, H or 4, 5, 6, L 2100 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 2101 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 2102 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 2103 InsertAtByte = IsLE ? 0 : 12; 2104 Swap = M3 < 4; 2105 return true; 2106 } 2107 2108 // If both vector operands for the shuffle are the same vector, the mask will 2109 // contain only elements from the first one and the second one will be undef. 2110 if (N->getOperand(1).isUndef()) { 2111 ShiftElts = 0; 2112 Swap = true; 2113 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 2114 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 2115 InsertAtByte = IsLE ? 12 : 0; 2116 return true; 2117 } 2118 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 2119 InsertAtByte = IsLE ? 8 : 4; 2120 return true; 2121 } 2122 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 2123 InsertAtByte = IsLE ? 4 : 8; 2124 return true; 2125 } 2126 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 2127 InsertAtByte = IsLE ? 0 : 12; 2128 return true; 2129 } 2130 } 2131 2132 return false; 2133 } 2134 2135 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 2136 bool &Swap, bool IsLE) { 2137 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2138 // Ensure each byte index of the word is consecutive. 2139 if (!isNByteElemShuffleMask(N, 4, 1)) 2140 return false; 2141 2142 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 2143 unsigned M0 = N->getMaskElt(0) / 4; 2144 unsigned M1 = N->getMaskElt(4) / 4; 2145 unsigned M2 = N->getMaskElt(8) / 4; 2146 unsigned M3 = N->getMaskElt(12) / 4; 2147 2148 // If both vector operands for the shuffle are the same vector, the mask will 2149 // contain only elements from the first one and the second one will be undef. 2150 if (N->getOperand(1).isUndef()) { 2151 assert(M0 < 4 && "Indexing into an undef vector?"); 2152 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 2153 return false; 2154 2155 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 2156 Swap = false; 2157 return true; 2158 } 2159 2160 // Ensure each word index of the ShuffleVector Mask is consecutive. 2161 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 2162 return false; 2163 2164 if (IsLE) { 2165 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 2166 // Input vectors don't need to be swapped if the leading element 2167 // of the result is one of the 3 left elements of the second vector 2168 // (or if there is no shift to be done at all). 2169 Swap = false; 2170 ShiftElts = (8 - M0) % 8; 2171 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 2172 // Input vectors need to be swapped if the leading element 2173 // of the result is one of the 3 left elements of the first vector 2174 // (or if we're shifting by 4 - thereby simply swapping the vectors). 2175 Swap = true; 2176 ShiftElts = (4 - M0) % 4; 2177 } 2178 2179 return true; 2180 } else { // BE 2181 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 2182 // Input vectors don't need to be swapped if the leading element 2183 // of the result is one of the 4 elements of the first vector. 2184 Swap = false; 2185 ShiftElts = M0; 2186 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 2187 // Input vectors need to be swapped if the leading element 2188 // of the result is one of the 4 elements of the right vector. 2189 Swap = true; 2190 ShiftElts = M0 - 4; 2191 } 2192 2193 return true; 2194 } 2195 } 2196 2197 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 2198 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2199 2200 if (!isNByteElemShuffleMask(N, Width, -1)) 2201 return false; 2202 2203 for (int i = 0; i < 16; i += Width) 2204 if (N->getMaskElt(i) != i + Width - 1) 2205 return false; 2206 2207 return true; 2208 } 2209 2210 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 2211 return isXXBRShuffleMaskHelper(N, 2); 2212 } 2213 2214 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 2215 return isXXBRShuffleMaskHelper(N, 4); 2216 } 2217 2218 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 2219 return isXXBRShuffleMaskHelper(N, 8); 2220 } 2221 2222 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 2223 return isXXBRShuffleMaskHelper(N, 16); 2224 } 2225 2226 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 2227 /// if the inputs to the instruction should be swapped and set \p DM to the 2228 /// value for the immediate. 2229 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 2230 /// AND element 0 of the result comes from the first input (LE) or second input 2231 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 2232 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 2233 /// mask. 2234 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 2235 bool &Swap, bool IsLE) { 2236 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2237 2238 // Ensure each byte index of the double word is consecutive. 2239 if (!isNByteElemShuffleMask(N, 8, 1)) 2240 return false; 2241 2242 unsigned M0 = N->getMaskElt(0) / 8; 2243 unsigned M1 = N->getMaskElt(8) / 8; 2244 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 2245 2246 // If both vector operands for the shuffle are the same vector, the mask will 2247 // contain only elements from the first one and the second one will be undef. 2248 if (N->getOperand(1).isUndef()) { 2249 if ((M0 | M1) < 2) { 2250 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 2251 Swap = false; 2252 return true; 2253 } else 2254 return false; 2255 } 2256 2257 if (IsLE) { 2258 if (M0 > 1 && M1 < 2) { 2259 Swap = false; 2260 } else if (M0 < 2 && M1 > 1) { 2261 M0 = (M0 + 2) % 4; 2262 M1 = (M1 + 2) % 4; 2263 Swap = true; 2264 } else 2265 return false; 2266 2267 // Note: if control flow comes here that means Swap is already set above 2268 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2269 return true; 2270 } else { // BE 2271 if (M0 < 2 && M1 > 1) { 2272 Swap = false; 2273 } else if (M0 > 1 && M1 < 2) { 2274 M0 = (M0 + 2) % 4; 2275 M1 = (M1 + 2) % 4; 2276 Swap = true; 2277 } else 2278 return false; 2279 2280 // Note: if control flow comes here that means Swap is already set above 2281 DM = (M0 << 1) + (M1 & 1); 2282 return true; 2283 } 2284 } 2285 2286 2287 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is 2288 /// appropriate for PPC mnemonics (which have a big endian bias - namely 2289 /// elements are counted from the left of the vector register). 2290 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, 2291 SelectionDAG &DAG) { 2292 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2293 assert(isSplatShuffleMask(SVOp, EltSize)); 2294 if (DAG.getDataLayout().isLittleEndian()) 2295 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2296 else 2297 return SVOp->getMaskElt(0) / EltSize; 2298 } 2299 2300 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2301 /// by using a vspltis[bhw] instruction of the specified element size, return 2302 /// the constant being splatted. The ByteSize field indicates the number of 2303 /// bytes of each element [124] -> [bhw]. 2304 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2305 SDValue OpVal(nullptr, 0); 2306 2307 // If ByteSize of the splat is bigger than the element size of the 2308 // build_vector, then we have a case where we are checking for a splat where 2309 // multiple elements of the buildvector are folded together into a single 2310 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2311 unsigned EltSize = 16/N->getNumOperands(); 2312 if (EltSize < ByteSize) { 2313 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2314 SDValue UniquedVals[4]; 2315 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2316 2317 // See if all of the elements in the buildvector agree across. 2318 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2319 if (N->getOperand(i).isUndef()) continue; 2320 // If the element isn't a constant, bail fully out. 2321 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2322 2323 if (!UniquedVals[i&(Multiple-1)].getNode()) 2324 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2325 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2326 return SDValue(); // no match. 2327 } 2328 2329 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2330 // either constant or undef values that are identical for each chunk. See 2331 // if these chunks can form into a larger vspltis*. 2332 2333 // Check to see if all of the leading entries are either 0 or -1. If 2334 // neither, then this won't fit into the immediate field. 2335 bool LeadingZero = true; 2336 bool LeadingOnes = true; 2337 for (unsigned i = 0; i != Multiple-1; ++i) { 2338 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2339 2340 LeadingZero &= isNullConstant(UniquedVals[i]); 2341 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2342 } 2343 // Finally, check the least significant entry. 2344 if (LeadingZero) { 2345 if (!UniquedVals[Multiple-1].getNode()) 2346 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2347 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2348 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2349 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2350 } 2351 if (LeadingOnes) { 2352 if (!UniquedVals[Multiple-1].getNode()) 2353 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2354 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2355 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2356 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2357 } 2358 2359 return SDValue(); 2360 } 2361 2362 // Check to see if this buildvec has a single non-undef value in its elements. 2363 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2364 if (N->getOperand(i).isUndef()) continue; 2365 if (!OpVal.getNode()) 2366 OpVal = N->getOperand(i); 2367 else if (OpVal != N->getOperand(i)) 2368 return SDValue(); 2369 } 2370 2371 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2372 2373 unsigned ValSizeInBytes = EltSize; 2374 uint64_t Value = 0; 2375 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2376 Value = CN->getZExtValue(); 2377 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2378 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2379 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2380 } 2381 2382 // If the splat value is larger than the element value, then we can never do 2383 // this splat. The only case that we could fit the replicated bits into our 2384 // immediate field for would be zero, and we prefer to use vxor for it. 2385 if (ValSizeInBytes < ByteSize) return SDValue(); 2386 2387 // If the element value is larger than the splat value, check if it consists 2388 // of a repeated bit pattern of size ByteSize. 2389 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2390 return SDValue(); 2391 2392 // Properly sign extend the value. 2393 int MaskVal = SignExtend32(Value, ByteSize * 8); 2394 2395 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2396 if (MaskVal == 0) return SDValue(); 2397 2398 // Finally, if this value fits in a 5 bit sext field, return it 2399 if (SignExtend32<5>(MaskVal) == MaskVal) 2400 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2401 return SDValue(); 2402 } 2403 2404 //===----------------------------------------------------------------------===// 2405 // Addressing Mode Selection 2406 //===----------------------------------------------------------------------===// 2407 2408 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2409 /// or 64-bit immediate, and if the value can be accurately represented as a 2410 /// sign extension from a 16-bit value. If so, this returns true and the 2411 /// immediate. 2412 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2413 if (!isa<ConstantSDNode>(N)) 2414 return false; 2415 2416 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2417 if (N->getValueType(0) == MVT::i32) 2418 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2419 else 2420 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2421 } 2422 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2423 return isIntS16Immediate(Op.getNode(), Imm); 2424 } 2425 2426 2427 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can 2428 /// be represented as an indexed [r+r] operation. 2429 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, 2430 SDValue &Index, 2431 SelectionDAG &DAG) const { 2432 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 2433 UI != E; ++UI) { 2434 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) { 2435 if (Memop->getMemoryVT() == MVT::f64) { 2436 Base = N.getOperand(0); 2437 Index = N.getOperand(1); 2438 return true; 2439 } 2440 } 2441 } 2442 return false; 2443 } 2444 2445 /// isIntS34Immediate - This method tests if value of node given can be 2446 /// accurately represented as a sign extension from a 34-bit value. If so, 2447 /// this returns true and the immediate. 2448 bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) { 2449 if (!isa<ConstantSDNode>(N)) 2450 return false; 2451 2452 Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2453 return isInt<34>(Imm); 2454 } 2455 bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) { 2456 return isIntS34Immediate(Op.getNode(), Imm); 2457 } 2458 2459 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2460 /// can be represented as an indexed [r+r] operation. Returns false if it 2461 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is 2462 /// non-zero and N can be represented by a base register plus a signed 16-bit 2463 /// displacement, make a more precise judgement by checking (displacement % \p 2464 /// EncodingAlignment). 2465 bool PPCTargetLowering::SelectAddressRegReg( 2466 SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, 2467 MaybeAlign EncodingAlignment) const { 2468 // If we have a PC Relative target flag don't select as [reg+reg]. It will be 2469 // a [pc+imm]. 2470 if (SelectAddressPCRel(N, Base)) 2471 return false; 2472 2473 int16_t Imm = 0; 2474 if (N.getOpcode() == ISD::ADD) { 2475 // Is there any SPE load/store (f64), which can't handle 16bit offset? 2476 // SPE load/store can only handle 8-bit offsets. 2477 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) 2478 return true; 2479 if (isIntS16Immediate(N.getOperand(1), Imm) && 2480 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) 2481 return false; // r+i 2482 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2483 return false; // r+i 2484 2485 Base = N.getOperand(0); 2486 Index = N.getOperand(1); 2487 return true; 2488 } else if (N.getOpcode() == ISD::OR) { 2489 if (isIntS16Immediate(N.getOperand(1), Imm) && 2490 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) 2491 return false; // r+i can fold it if we can. 2492 2493 // If this is an or of disjoint bitfields, we can codegen this as an add 2494 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2495 // disjoint. 2496 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2497 2498 if (LHSKnown.Zero.getBoolValue()) { 2499 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); 2500 // If all of the bits are known zero on the LHS or RHS, the add won't 2501 // carry. 2502 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2503 Base = N.getOperand(0); 2504 Index = N.getOperand(1); 2505 return true; 2506 } 2507 } 2508 } 2509 2510 return false; 2511 } 2512 2513 // If we happen to be doing an i64 load or store into a stack slot that has 2514 // less than a 4-byte alignment, then the frame-index elimination may need to 2515 // use an indexed load or store instruction (because the offset may not be a 2516 // multiple of 4). The extra register needed to hold the offset comes from the 2517 // register scavenger, and it is possible that the scavenger will need to use 2518 // an emergency spill slot. As a result, we need to make sure that a spill slot 2519 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2520 // stack slot. 2521 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2522 // FIXME: This does not handle the LWA case. 2523 if (VT != MVT::i64) 2524 return; 2525 2526 // NOTE: We'll exclude negative FIs here, which come from argument 2527 // lowering, because there are no known test cases triggering this problem 2528 // using packed structures (or similar). We can remove this exclusion if 2529 // we find such a test case. The reason why this is so test-case driven is 2530 // because this entire 'fixup' is only to prevent crashes (from the 2531 // register scavenger) on not-really-valid inputs. For example, if we have: 2532 // %a = alloca i1 2533 // %b = bitcast i1* %a to i64* 2534 // store i64* a, i64 b 2535 // then the store should really be marked as 'align 1', but is not. If it 2536 // were marked as 'align 1' then the indexed form would have been 2537 // instruction-selected initially, and the problem this 'fixup' is preventing 2538 // won't happen regardless. 2539 if (FrameIdx < 0) 2540 return; 2541 2542 MachineFunction &MF = DAG.getMachineFunction(); 2543 MachineFrameInfo &MFI = MF.getFrameInfo(); 2544 2545 if (MFI.getObjectAlign(FrameIdx) >= Align(4)) 2546 return; 2547 2548 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2549 FuncInfo->setHasNonRISpills(); 2550 } 2551 2552 /// Returns true if the address N can be represented by a base register plus 2553 /// a signed 16-bit displacement [r+imm], and if it is not better 2554 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept 2555 /// displacements that are multiples of that value. 2556 bool PPCTargetLowering::SelectAddressRegImm( 2557 SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, 2558 MaybeAlign EncodingAlignment) const { 2559 // FIXME dl should come from parent load or store, not from address 2560 SDLoc dl(N); 2561 2562 // If we have a PC Relative target flag don't select as [reg+imm]. It will be 2563 // a [pc+imm]. 2564 if (SelectAddressPCRel(N, Base)) 2565 return false; 2566 2567 // If this can be more profitably realized as r+r, fail. 2568 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment)) 2569 return false; 2570 2571 if (N.getOpcode() == ISD::ADD) { 2572 int16_t imm = 0; 2573 if (isIntS16Immediate(N.getOperand(1), imm) && 2574 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { 2575 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2576 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2577 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2578 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2579 } else { 2580 Base = N.getOperand(0); 2581 } 2582 return true; // [r+i] 2583 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2584 // Match LOAD (ADD (X, Lo(G))). 2585 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2586 && "Cannot handle constant offsets yet!"); 2587 Disp = N.getOperand(1).getOperand(0); // The global address. 2588 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2589 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2590 Disp.getOpcode() == ISD::TargetConstantPool || 2591 Disp.getOpcode() == ISD::TargetJumpTable); 2592 Base = N.getOperand(0); 2593 return true; // [&g+r] 2594 } 2595 } else if (N.getOpcode() == ISD::OR) { 2596 int16_t imm = 0; 2597 if (isIntS16Immediate(N.getOperand(1), imm) && 2598 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { 2599 // If this is an or of disjoint bitfields, we can codegen this as an add 2600 // (for better address arithmetic) if the LHS and RHS of the OR are 2601 // provably disjoint. 2602 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2603 2604 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2605 // If all of the bits are known zero on the LHS or RHS, the add won't 2606 // carry. 2607 if (FrameIndexSDNode *FI = 2608 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2609 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2610 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2611 } else { 2612 Base = N.getOperand(0); 2613 } 2614 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2615 return true; 2616 } 2617 } 2618 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2619 // Loading from a constant address. 2620 2621 // If this address fits entirely in a 16-bit sext immediate field, codegen 2622 // this as "d, 0" 2623 int16_t Imm; 2624 if (isIntS16Immediate(CN, Imm) && 2625 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) { 2626 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2627 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2628 CN->getValueType(0)); 2629 return true; 2630 } 2631 2632 // Handle 32-bit sext immediates with LIS + addr mode. 2633 if ((CN->getValueType(0) == MVT::i32 || 2634 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2635 (!EncodingAlignment || 2636 isAligned(*EncodingAlignment, CN->getZExtValue()))) { 2637 int Addr = (int)CN->getZExtValue(); 2638 2639 // Otherwise, break this down into an LIS + disp. 2640 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2641 2642 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2643 MVT::i32); 2644 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2645 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2646 return true; 2647 } 2648 } 2649 2650 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2651 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2652 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2653 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2654 } else 2655 Base = N; 2656 return true; // [r+0] 2657 } 2658 2659 /// Similar to the 16-bit case but for instructions that take a 34-bit 2660 /// displacement field (prefixed loads/stores). 2661 bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp, 2662 SDValue &Base, 2663 SelectionDAG &DAG) const { 2664 // Only on 64-bit targets. 2665 if (N.getValueType() != MVT::i64) 2666 return false; 2667 2668 SDLoc dl(N); 2669 int64_t Imm = 0; 2670 2671 if (N.getOpcode() == ISD::ADD) { 2672 if (!isIntS34Immediate(N.getOperand(1), Imm)) 2673 return false; 2674 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); 2675 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) 2676 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2677 else 2678 Base = N.getOperand(0); 2679 return true; 2680 } 2681 2682 if (N.getOpcode() == ISD::OR) { 2683 if (!isIntS34Immediate(N.getOperand(1), Imm)) 2684 return false; 2685 // If this is an or of disjoint bitfields, we can codegen this as an add 2686 // (for better address arithmetic) if the LHS and RHS of the OR are 2687 // provably disjoint. 2688 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2689 if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL) 2690 return false; 2691 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) 2692 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2693 else 2694 Base = N.getOperand(0); 2695 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); 2696 return true; 2697 } 2698 2699 if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const. 2700 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); 2701 Base = DAG.getRegister(PPC::ZERO8, N.getValueType()); 2702 return true; 2703 } 2704 2705 return false; 2706 } 2707 2708 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2709 /// represented as an indexed [r+r] operation. 2710 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2711 SDValue &Index, 2712 SelectionDAG &DAG) const { 2713 // Check to see if we can easily represent this as an [r+r] address. This 2714 // will fail if it thinks that the address is more profitably represented as 2715 // reg+imm, e.g. where imm = 0. 2716 if (SelectAddressRegReg(N, Base, Index, DAG)) 2717 return true; 2718 2719 // If the address is the result of an add, we will utilize the fact that the 2720 // address calculation includes an implicit add. However, we can reduce 2721 // register pressure if we do not materialize a constant just for use as the 2722 // index register. We only get rid of the add if it is not an add of a 2723 // value and a 16-bit signed constant and both have a single use. 2724 int16_t imm = 0; 2725 if (N.getOpcode() == ISD::ADD && 2726 (!isIntS16Immediate(N.getOperand(1), imm) || 2727 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2728 Base = N.getOperand(0); 2729 Index = N.getOperand(1); 2730 return true; 2731 } 2732 2733 // Otherwise, do it the hard way, using R0 as the base register. 2734 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2735 N.getValueType()); 2736 Index = N; 2737 return true; 2738 } 2739 2740 template <typename Ty> static bool isValidPCRelNode(SDValue N) { 2741 Ty *PCRelCand = dyn_cast<Ty>(N); 2742 return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG); 2743 } 2744 2745 /// Returns true if this address is a PC Relative address. 2746 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG 2747 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR. 2748 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const { 2749 // This is a materialize PC Relative node. Always select this as PC Relative. 2750 Base = N; 2751 if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR) 2752 return true; 2753 if (isValidPCRelNode<ConstantPoolSDNode>(N) || 2754 isValidPCRelNode<GlobalAddressSDNode>(N) || 2755 isValidPCRelNode<JumpTableSDNode>(N) || 2756 isValidPCRelNode<BlockAddressSDNode>(N)) 2757 return true; 2758 return false; 2759 } 2760 2761 /// Returns true if we should use a direct load into vector instruction 2762 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. 2763 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { 2764 2765 // If there are any other uses other than scalar to vector, then we should 2766 // keep it as a scalar load -> direct move pattern to prevent multiple 2767 // loads. 2768 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 2769 if (!LD) 2770 return false; 2771 2772 EVT MemVT = LD->getMemoryVT(); 2773 if (!MemVT.isSimple()) 2774 return false; 2775 switch(MemVT.getSimpleVT().SimpleTy) { 2776 case MVT::i64: 2777 break; 2778 case MVT::i32: 2779 if (!ST.hasP8Vector()) 2780 return false; 2781 break; 2782 case MVT::i16: 2783 case MVT::i8: 2784 if (!ST.hasP9Vector()) 2785 return false; 2786 break; 2787 default: 2788 return false; 2789 } 2790 2791 SDValue LoadedVal(N, 0); 2792 if (!LoadedVal.hasOneUse()) 2793 return false; 2794 2795 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); 2796 UI != UE; ++UI) 2797 if (UI.getUse().get().getResNo() == 0 && 2798 UI->getOpcode() != ISD::SCALAR_TO_VECTOR && 2799 UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED) 2800 return false; 2801 2802 return true; 2803 } 2804 2805 /// getPreIndexedAddressParts - returns true by value, base pointer and 2806 /// offset pointer and addressing mode by reference if the node's address 2807 /// can be legally represented as pre-indexed load / store address. 2808 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2809 SDValue &Offset, 2810 ISD::MemIndexedMode &AM, 2811 SelectionDAG &DAG) const { 2812 if (DisablePPCPreinc) return false; 2813 2814 bool isLoad = true; 2815 SDValue Ptr; 2816 EVT VT; 2817 unsigned Alignment; 2818 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2819 Ptr = LD->getBasePtr(); 2820 VT = LD->getMemoryVT(); 2821 Alignment = LD->getAlignment(); 2822 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2823 Ptr = ST->getBasePtr(); 2824 VT = ST->getMemoryVT(); 2825 Alignment = ST->getAlignment(); 2826 isLoad = false; 2827 } else 2828 return false; 2829 2830 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector 2831 // instructions because we can fold these into a more efficient instruction 2832 // instead, (such as LXSD). 2833 if (isLoad && usePartialVectorLoads(N, Subtarget)) { 2834 return false; 2835 } 2836 2837 // PowerPC doesn't have preinc load/store instructions for vectors 2838 if (VT.isVector()) 2839 return false; 2840 2841 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2842 // Common code will reject creating a pre-inc form if the base pointer 2843 // is a frame index, or if N is a store and the base pointer is either 2844 // the same as or a predecessor of the value being stored. Check for 2845 // those situations here, and try with swapped Base/Offset instead. 2846 bool Swap = false; 2847 2848 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2849 Swap = true; 2850 else if (!isLoad) { 2851 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2852 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2853 Swap = true; 2854 } 2855 2856 if (Swap) 2857 std::swap(Base, Offset); 2858 2859 AM = ISD::PRE_INC; 2860 return true; 2861 } 2862 2863 // LDU/STU can only handle immediates that are a multiple of 4. 2864 if (VT != MVT::i64) { 2865 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None)) 2866 return false; 2867 } else { 2868 // LDU/STU need an address with at least 4-byte alignment. 2869 if (Alignment < 4) 2870 return false; 2871 2872 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4))) 2873 return false; 2874 } 2875 2876 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2877 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2878 // sext i32 to i64 when addr mode is r+i. 2879 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2880 LD->getExtensionType() == ISD::SEXTLOAD && 2881 isa<ConstantSDNode>(Offset)) 2882 return false; 2883 } 2884 2885 AM = ISD::PRE_INC; 2886 return true; 2887 } 2888 2889 //===----------------------------------------------------------------------===// 2890 // LowerOperation implementation 2891 //===----------------------------------------------------------------------===// 2892 2893 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2894 /// and LoOpFlags to the target MO flags. 2895 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2896 unsigned &HiOpFlags, unsigned &LoOpFlags, 2897 const GlobalValue *GV = nullptr) { 2898 HiOpFlags = PPCII::MO_HA; 2899 LoOpFlags = PPCII::MO_LO; 2900 2901 // Don't use the pic base if not in PIC relocation model. 2902 if (IsPIC) { 2903 HiOpFlags |= PPCII::MO_PIC_FLAG; 2904 LoOpFlags |= PPCII::MO_PIC_FLAG; 2905 } 2906 } 2907 2908 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2909 SelectionDAG &DAG) { 2910 SDLoc DL(HiPart); 2911 EVT PtrVT = HiPart.getValueType(); 2912 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2913 2914 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2915 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2916 2917 // With PIC, the first instruction is actually "GR+hi(&G)". 2918 if (isPIC) 2919 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2920 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2921 2922 // Generate non-pic code that has direct accesses to the constant pool. 2923 // The address of the global is just (hi(&g)+lo(&g)). 2924 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2925 } 2926 2927 static void setUsesTOCBasePtr(MachineFunction &MF) { 2928 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2929 FuncInfo->setUsesTOCBasePtr(); 2930 } 2931 2932 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2933 setUsesTOCBasePtr(DAG.getMachineFunction()); 2934 } 2935 2936 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, 2937 SDValue GA) const { 2938 const bool Is64Bit = Subtarget.isPPC64(); 2939 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2940 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) 2941 : Subtarget.isAIXABI() 2942 ? DAG.getRegister(PPC::R2, VT) 2943 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2944 SDValue Ops[] = { GA, Reg }; 2945 return DAG.getMemIntrinsicNode( 2946 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2947 MachinePointerInfo::getGOT(DAG.getMachineFunction()), None, 2948 MachineMemOperand::MOLoad); 2949 } 2950 2951 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2952 SelectionDAG &DAG) const { 2953 EVT PtrVT = Op.getValueType(); 2954 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2955 const Constant *C = CP->getConstVal(); 2956 2957 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2958 // The actual address of the GlobalValue is stored in the TOC. 2959 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2960 if (Subtarget.isUsingPCRelativeCalls()) { 2961 SDLoc DL(CP); 2962 EVT Ty = getPointerTy(DAG.getDataLayout()); 2963 SDValue ConstPool = DAG.getTargetConstantPool( 2964 C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG); 2965 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool); 2966 } 2967 setUsesTOCBasePtr(DAG); 2968 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0); 2969 return getTOCEntry(DAG, SDLoc(CP), GA); 2970 } 2971 2972 unsigned MOHiFlag, MOLoFlag; 2973 bool IsPIC = isPositionIndependent(); 2974 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2975 2976 if (IsPIC && Subtarget.isSVR4ABI()) { 2977 SDValue GA = 2978 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG); 2979 return getTOCEntry(DAG, SDLoc(CP), GA); 2980 } 2981 2982 SDValue CPIHi = 2983 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag); 2984 SDValue CPILo = 2985 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag); 2986 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2987 } 2988 2989 // For 64-bit PowerPC, prefer the more compact relative encodings. 2990 // This trades 32 bits per jump table entry for one or two instructions 2991 // on the jump site. 2992 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2993 if (isJumpTableRelative()) 2994 return MachineJumpTableInfo::EK_LabelDifference32; 2995 2996 return TargetLowering::getJumpTableEncoding(); 2997 } 2998 2999 bool PPCTargetLowering::isJumpTableRelative() const { 3000 if (UseAbsoluteJumpTables) 3001 return false; 3002 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) 3003 return true; 3004 return TargetLowering::isJumpTableRelative(); 3005 } 3006 3007 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 3008 SelectionDAG &DAG) const { 3009 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 3010 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 3011 3012 switch (getTargetMachine().getCodeModel()) { 3013 case CodeModel::Small: 3014 case CodeModel::Medium: 3015 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 3016 default: 3017 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 3018 getPointerTy(DAG.getDataLayout())); 3019 } 3020 } 3021 3022 const MCExpr * 3023 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 3024 unsigned JTI, 3025 MCContext &Ctx) const { 3026 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 3027 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 3028 3029 switch (getTargetMachine().getCodeModel()) { 3030 case CodeModel::Small: 3031 case CodeModel::Medium: 3032 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 3033 default: 3034 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 3035 } 3036 } 3037 3038 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 3039 EVT PtrVT = Op.getValueType(); 3040 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3041 3042 // isUsingPCRelativeCalls() returns true when PCRelative is enabled 3043 if (Subtarget.isUsingPCRelativeCalls()) { 3044 SDLoc DL(JT); 3045 EVT Ty = getPointerTy(DAG.getDataLayout()); 3046 SDValue GA = 3047 DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG); 3048 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3049 return MatAddr; 3050 } 3051 3052 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 3053 // The actual address of the GlobalValue is stored in the TOC. 3054 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3055 setUsesTOCBasePtr(DAG); 3056 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 3057 return getTOCEntry(DAG, SDLoc(JT), GA); 3058 } 3059 3060 unsigned MOHiFlag, MOLoFlag; 3061 bool IsPIC = isPositionIndependent(); 3062 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 3063 3064 if (IsPIC && Subtarget.isSVR4ABI()) { 3065 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 3066 PPCII::MO_PIC_FLAG); 3067 return getTOCEntry(DAG, SDLoc(GA), GA); 3068 } 3069 3070 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 3071 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 3072 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 3073 } 3074 3075 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 3076 SelectionDAG &DAG) const { 3077 EVT PtrVT = Op.getValueType(); 3078 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 3079 const BlockAddress *BA = BASDN->getBlockAddress(); 3080 3081 // isUsingPCRelativeCalls() returns true when PCRelative is enabled 3082 if (Subtarget.isUsingPCRelativeCalls()) { 3083 SDLoc DL(BASDN); 3084 EVT Ty = getPointerTy(DAG.getDataLayout()); 3085 SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(), 3086 PPCII::MO_PCREL_FLAG); 3087 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3088 return MatAddr; 3089 } 3090 3091 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 3092 // The actual BlockAddress is stored in the TOC. 3093 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3094 setUsesTOCBasePtr(DAG); 3095 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 3096 return getTOCEntry(DAG, SDLoc(BASDN), GA); 3097 } 3098 3099 // 32-bit position-independent ELF stores the BlockAddress in the .got. 3100 if (Subtarget.is32BitELFABI() && isPositionIndependent()) 3101 return getTOCEntry( 3102 DAG, SDLoc(BASDN), 3103 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); 3104 3105 unsigned MOHiFlag, MOLoFlag; 3106 bool IsPIC = isPositionIndependent(); 3107 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 3108 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 3109 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 3110 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 3111 } 3112 3113 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 3114 SelectionDAG &DAG) const { 3115 // FIXME: TLS addresses currently use medium model code sequences, 3116 // which is the most useful form. Eventually support for small and 3117 // large models could be added if users need it, at the cost of 3118 // additional complexity. 3119 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3120 if (DAG.getTarget().useEmulatedTLS()) 3121 return LowerToTLSEmulatedModel(GA, DAG); 3122 3123 SDLoc dl(GA); 3124 const GlobalValue *GV = GA->getGlobal(); 3125 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3126 bool is64bit = Subtarget.isPPC64(); 3127 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 3128 PICLevel::Level picLevel = M->getPICLevel(); 3129 3130 const TargetMachine &TM = getTargetMachine(); 3131 TLSModel::Model Model = TM.getTLSModel(GV); 3132 3133 if (Model == TLSModel::LocalExec) { 3134 if (Subtarget.isUsingPCRelativeCalls()) { 3135 SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64); 3136 SDValue TGA = DAG.getTargetGlobalAddress( 3137 GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG)); 3138 SDValue MatAddr = 3139 DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA); 3140 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr); 3141 } 3142 3143 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3144 PPCII::MO_TPREL_HA); 3145 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3146 PPCII::MO_TPREL_LO); 3147 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 3148 : DAG.getRegister(PPC::R2, MVT::i32); 3149 3150 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 3151 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 3152 } 3153 3154 if (Model == TLSModel::InitialExec) { 3155 bool IsPCRel = Subtarget.isUsingPCRelativeCalls(); 3156 SDValue TGA = DAG.getTargetGlobalAddress( 3157 GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0); 3158 SDValue TGATLS = DAG.getTargetGlobalAddress( 3159 GV, dl, PtrVT, 0, 3160 IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS); 3161 SDValue TPOffset; 3162 if (IsPCRel) { 3163 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA); 3164 TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel, 3165 MachinePointerInfo()); 3166 } else { 3167 SDValue GOTPtr; 3168 if (is64bit) { 3169 setUsesTOCBasePtr(DAG); 3170 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 3171 GOTPtr = 3172 DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA); 3173 } else { 3174 if (!TM.isPositionIndependent()) 3175 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 3176 else if (picLevel == PICLevel::SmallPIC) 3177 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 3178 else 3179 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 3180 } 3181 TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr); 3182 } 3183 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 3184 } 3185 3186 if (Model == TLSModel::GeneralDynamic) { 3187 if (Subtarget.isUsingPCRelativeCalls()) { 3188 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3189 PPCII::MO_GOT_TLSGD_PCREL_FLAG); 3190 return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); 3191 } 3192 3193 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 3194 SDValue GOTPtr; 3195 if (is64bit) { 3196 setUsesTOCBasePtr(DAG); 3197 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 3198 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 3199 GOTReg, TGA); 3200 } else { 3201 if (picLevel == PICLevel::SmallPIC) 3202 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 3203 else 3204 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 3205 } 3206 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 3207 GOTPtr, TGA, TGA); 3208 } 3209 3210 if (Model == TLSModel::LocalDynamic) { 3211 if (Subtarget.isUsingPCRelativeCalls()) { 3212 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3213 PPCII::MO_GOT_TLSLD_PCREL_FLAG); 3214 SDValue MatPCRel = 3215 DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); 3216 return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA); 3217 } 3218 3219 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 3220 SDValue GOTPtr; 3221 if (is64bit) { 3222 setUsesTOCBasePtr(DAG); 3223 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 3224 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 3225 GOTReg, TGA); 3226 } else { 3227 if (picLevel == PICLevel::SmallPIC) 3228 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 3229 else 3230 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 3231 } 3232 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 3233 PtrVT, GOTPtr, TGA, TGA); 3234 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 3235 PtrVT, TLSAddr, TGA); 3236 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 3237 } 3238 3239 llvm_unreachable("Unknown TLS model!"); 3240 } 3241 3242 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 3243 SelectionDAG &DAG) const { 3244 EVT PtrVT = Op.getValueType(); 3245 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 3246 SDLoc DL(GSDN); 3247 const GlobalValue *GV = GSDN->getGlobal(); 3248 3249 // 64-bit SVR4 ABI & AIX ABI code is always position-independent. 3250 // The actual address of the GlobalValue is stored in the TOC. 3251 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3252 if (Subtarget.isUsingPCRelativeCalls()) { 3253 EVT Ty = getPointerTy(DAG.getDataLayout()); 3254 if (isAccessedAsGotIndirect(Op)) { 3255 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), 3256 PPCII::MO_PCREL_FLAG | 3257 PPCII::MO_GOT_FLAG); 3258 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3259 SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel, 3260 MachinePointerInfo()); 3261 return Load; 3262 } else { 3263 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), 3264 PPCII::MO_PCREL_FLAG); 3265 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3266 } 3267 } 3268 setUsesTOCBasePtr(DAG); 3269 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 3270 return getTOCEntry(DAG, DL, GA); 3271 } 3272 3273 unsigned MOHiFlag, MOLoFlag; 3274 bool IsPIC = isPositionIndependent(); 3275 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 3276 3277 if (IsPIC && Subtarget.isSVR4ABI()) { 3278 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 3279 GSDN->getOffset(), 3280 PPCII::MO_PIC_FLAG); 3281 return getTOCEntry(DAG, DL, GA); 3282 } 3283 3284 SDValue GAHi = 3285 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 3286 SDValue GALo = 3287 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 3288 3289 return LowerLabelRef(GAHi, GALo, IsPIC, DAG); 3290 } 3291 3292 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 3293 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 3294 SDLoc dl(Op); 3295 3296 if (Op.getValueType() == MVT::v2i64) { 3297 // When the operands themselves are v2i64 values, we need to do something 3298 // special because VSX has no underlying comparison operations for these. 3299 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 3300 // Equality can be handled by casting to the legal type for Altivec 3301 // comparisons, everything else needs to be expanded. 3302 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 3303 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 3304 DAG.getSetCC(dl, MVT::v4i32, 3305 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 3306 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 3307 CC)); 3308 } 3309 3310 return SDValue(); 3311 } 3312 3313 // We handle most of these in the usual way. 3314 return Op; 3315 } 3316 3317 // If we're comparing for equality to zero, expose the fact that this is 3318 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 3319 // fold the new nodes. 3320 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 3321 return V; 3322 3323 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3324 // Leave comparisons against 0 and -1 alone for now, since they're usually 3325 // optimized. FIXME: revisit this when we can custom lower all setcc 3326 // optimizations. 3327 if (C->isAllOnesValue() || C->isNullValue()) 3328 return SDValue(); 3329 } 3330 3331 // If we have an integer seteq/setne, turn it into a compare against zero 3332 // by xor'ing the rhs with the lhs, which is faster than setting a 3333 // condition register, reading it back out, and masking the correct bit. The 3334 // normal approach here uses sub to do this instead of xor. Using xor exposes 3335 // the result to other bit-twiddling opportunities. 3336 EVT LHSVT = Op.getOperand(0).getValueType(); 3337 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 3338 EVT VT = Op.getValueType(); 3339 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 3340 Op.getOperand(1)); 3341 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 3342 } 3343 return SDValue(); 3344 } 3345 3346 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 3347 SDNode *Node = Op.getNode(); 3348 EVT VT = Node->getValueType(0); 3349 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3350 SDValue InChain = Node->getOperand(0); 3351 SDValue VAListPtr = Node->getOperand(1); 3352 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 3353 SDLoc dl(Node); 3354 3355 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 3356 3357 // gpr_index 3358 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3359 VAListPtr, MachinePointerInfo(SV), MVT::i8); 3360 InChain = GprIndex.getValue(1); 3361 3362 if (VT == MVT::i64) { 3363 // Check if GprIndex is even 3364 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 3365 DAG.getConstant(1, dl, MVT::i32)); 3366 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 3367 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 3368 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 3369 DAG.getConstant(1, dl, MVT::i32)); 3370 // Align GprIndex to be even if it isn't 3371 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 3372 GprIndex); 3373 } 3374 3375 // fpr index is 1 byte after gpr 3376 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3377 DAG.getConstant(1, dl, MVT::i32)); 3378 3379 // fpr 3380 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3381 FprPtr, MachinePointerInfo(SV), MVT::i8); 3382 InChain = FprIndex.getValue(1); 3383 3384 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3385 DAG.getConstant(8, dl, MVT::i32)); 3386 3387 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3388 DAG.getConstant(4, dl, MVT::i32)); 3389 3390 // areas 3391 SDValue OverflowArea = 3392 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 3393 InChain = OverflowArea.getValue(1); 3394 3395 SDValue RegSaveArea = 3396 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 3397 InChain = RegSaveArea.getValue(1); 3398 3399 // select overflow_area if index > 8 3400 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 3401 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 3402 3403 // adjustment constant gpr_index * 4/8 3404 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 3405 VT.isInteger() ? GprIndex : FprIndex, 3406 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 3407 MVT::i32)); 3408 3409 // OurReg = RegSaveArea + RegConstant 3410 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 3411 RegConstant); 3412 3413 // Floating types are 32 bytes into RegSaveArea 3414 if (VT.isFloatingPoint()) 3415 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 3416 DAG.getConstant(32, dl, MVT::i32)); 3417 3418 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 3419 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 3420 VT.isInteger() ? GprIndex : FprIndex, 3421 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 3422 MVT::i32)); 3423 3424 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 3425 VT.isInteger() ? VAListPtr : FprPtr, 3426 MachinePointerInfo(SV), MVT::i8); 3427 3428 // determine if we should load from reg_save_area or overflow_area 3429 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 3430 3431 // increase overflow_area by 4/8 if gpr/fpr > 8 3432 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 3433 DAG.getConstant(VT.isInteger() ? 4 : 8, 3434 dl, MVT::i32)); 3435 3436 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 3437 OverflowAreaPlusN); 3438 3439 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 3440 MachinePointerInfo(), MVT::i32); 3441 3442 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 3443 } 3444 3445 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 3446 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 3447 3448 // We have to copy the entire va_list struct: 3449 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 3450 return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2), 3451 DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8), 3452 false, true, false, MachinePointerInfo(), 3453 MachinePointerInfo()); 3454 } 3455 3456 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 3457 SelectionDAG &DAG) const { 3458 if (Subtarget.isAIXABI()) 3459 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX."); 3460 3461 return Op.getOperand(0); 3462 } 3463 3464 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 3465 SelectionDAG &DAG) const { 3466 if (Subtarget.isAIXABI()) 3467 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX."); 3468 3469 SDValue Chain = Op.getOperand(0); 3470 SDValue Trmp = Op.getOperand(1); // trampoline 3471 SDValue FPtr = Op.getOperand(2); // nested function 3472 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 3473 SDLoc dl(Op); 3474 3475 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3476 bool isPPC64 = (PtrVT == MVT::i64); 3477 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3478 3479 TargetLowering::ArgListTy Args; 3480 TargetLowering::ArgListEntry Entry; 3481 3482 Entry.Ty = IntPtrTy; 3483 Entry.Node = Trmp; Args.push_back(Entry); 3484 3485 // TrampSize == (isPPC64 ? 48 : 40); 3486 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3487 isPPC64 ? MVT::i64 : MVT::i32); 3488 Args.push_back(Entry); 3489 3490 Entry.Node = FPtr; Args.push_back(Entry); 3491 Entry.Node = Nest; Args.push_back(Entry); 3492 3493 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3494 TargetLowering::CallLoweringInfo CLI(DAG); 3495 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3496 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3497 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3498 3499 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3500 return CallResult.second; 3501 } 3502 3503 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3504 MachineFunction &MF = DAG.getMachineFunction(); 3505 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3506 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3507 3508 SDLoc dl(Op); 3509 3510 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) { 3511 // vastart just stores the address of the VarArgsFrameIndex slot into the 3512 // memory location argument. 3513 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3514 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3515 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3516 MachinePointerInfo(SV)); 3517 } 3518 3519 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3520 // We suppose the given va_list is already allocated. 3521 // 3522 // typedef struct { 3523 // char gpr; /* index into the array of 8 GPRs 3524 // * stored in the register save area 3525 // * gpr=0 corresponds to r3, 3526 // * gpr=1 to r4, etc. 3527 // */ 3528 // char fpr; /* index into the array of 8 FPRs 3529 // * stored in the register save area 3530 // * fpr=0 corresponds to f1, 3531 // * fpr=1 to f2, etc. 3532 // */ 3533 // char *overflow_arg_area; 3534 // /* location on stack that holds 3535 // * the next overflow argument 3536 // */ 3537 // char *reg_save_area; 3538 // /* where r3:r10 and f1:f8 (if saved) 3539 // * are stored 3540 // */ 3541 // } va_list[1]; 3542 3543 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3544 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3545 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3546 PtrVT); 3547 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3548 PtrVT); 3549 3550 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3551 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3552 3553 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3554 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3555 3556 uint64_t FPROffset = 1; 3557 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3558 3559 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3560 3561 // Store first byte : number of int regs 3562 SDValue firstStore = 3563 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3564 MachinePointerInfo(SV), MVT::i8); 3565 uint64_t nextOffset = FPROffset; 3566 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3567 ConstFPROffset); 3568 3569 // Store second byte : number of float regs 3570 SDValue secondStore = 3571 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3572 MachinePointerInfo(SV, nextOffset), MVT::i8); 3573 nextOffset += StackOffset; 3574 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3575 3576 // Store second word : arguments given on stack 3577 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3578 MachinePointerInfo(SV, nextOffset)); 3579 nextOffset += FrameOffset; 3580 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3581 3582 // Store third word : arguments given in registers 3583 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3584 MachinePointerInfo(SV, nextOffset)); 3585 } 3586 3587 /// FPR - The set of FP registers that should be allocated for arguments 3588 /// on Darwin and AIX. 3589 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3590 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3591 PPC::F11, PPC::F12, PPC::F13}; 3592 3593 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3594 /// the stack. 3595 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3596 unsigned PtrByteSize) { 3597 unsigned ArgSize = ArgVT.getStoreSize(); 3598 if (Flags.isByVal()) 3599 ArgSize = Flags.getByValSize(); 3600 3601 // Round up to multiples of the pointer size, except for array members, 3602 // which are always packed. 3603 if (!Flags.isInConsecutiveRegs()) 3604 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3605 3606 return ArgSize; 3607 } 3608 3609 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3610 /// on the stack. 3611 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3612 ISD::ArgFlagsTy Flags, 3613 unsigned PtrByteSize) { 3614 Align Alignment(PtrByteSize); 3615 3616 // Altivec parameters are padded to a 16 byte boundary. 3617 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3618 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3619 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3620 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3621 Alignment = Align(16); 3622 3623 // ByVal parameters are aligned as requested. 3624 if (Flags.isByVal()) { 3625 auto BVAlign = Flags.getNonZeroByValAlign(); 3626 if (BVAlign > PtrByteSize) { 3627 if (BVAlign.value() % PtrByteSize != 0) 3628 llvm_unreachable( 3629 "ByVal alignment is not a multiple of the pointer size"); 3630 3631 Alignment = BVAlign; 3632 } 3633 } 3634 3635 // Array members are always packed to their original alignment. 3636 if (Flags.isInConsecutiveRegs()) { 3637 // If the array member was split into multiple registers, the first 3638 // needs to be aligned to the size of the full type. (Except for 3639 // ppcf128, which is only aligned as its f64 components.) 3640 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3641 Alignment = Align(OrigVT.getStoreSize()); 3642 else 3643 Alignment = Align(ArgVT.getStoreSize()); 3644 } 3645 3646 return Alignment; 3647 } 3648 3649 /// CalculateStackSlotUsed - Return whether this argument will use its 3650 /// stack slot (instead of being passed in registers). ArgOffset, 3651 /// AvailableFPRs, and AvailableVRs must hold the current argument 3652 /// position, and will be updated to account for this argument. 3653 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, 3654 unsigned PtrByteSize, unsigned LinkageSize, 3655 unsigned ParamAreaSize, unsigned &ArgOffset, 3656 unsigned &AvailableFPRs, 3657 unsigned &AvailableVRs) { 3658 bool UseMemory = false; 3659 3660 // Respect alignment of argument on the stack. 3661 Align Alignment = 3662 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3663 ArgOffset = alignTo(ArgOffset, Alignment); 3664 // If there's no space left in the argument save area, we must 3665 // use memory (this check also catches zero-sized arguments). 3666 if (ArgOffset >= LinkageSize + ParamAreaSize) 3667 UseMemory = true; 3668 3669 // Allocate argument on the stack. 3670 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3671 if (Flags.isInConsecutiveRegsLast()) 3672 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3673 // If we overran the argument save area, we must use memory 3674 // (this check catches arguments passed partially in memory) 3675 if (ArgOffset > LinkageSize + ParamAreaSize) 3676 UseMemory = true; 3677 3678 // However, if the argument is actually passed in an FPR or a VR, 3679 // we don't use memory after all. 3680 if (!Flags.isByVal()) { 3681 if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 3682 if (AvailableFPRs > 0) { 3683 --AvailableFPRs; 3684 return false; 3685 } 3686 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3687 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3688 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3689 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3690 if (AvailableVRs > 0) { 3691 --AvailableVRs; 3692 return false; 3693 } 3694 } 3695 3696 return UseMemory; 3697 } 3698 3699 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3700 /// ensure minimum alignment required for target. 3701 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3702 unsigned NumBytes) { 3703 return alignTo(NumBytes, Lowering->getStackAlign()); 3704 } 3705 3706 SDValue PPCTargetLowering::LowerFormalArguments( 3707 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3708 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3709 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3710 if (Subtarget.isAIXABI()) 3711 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG, 3712 InVals); 3713 if (Subtarget.is64BitELFABI()) 3714 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3715 InVals); 3716 assert(Subtarget.is32BitELFABI()); 3717 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3718 InVals); 3719 } 3720 3721 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3722 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3723 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3724 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3725 3726 // 32-bit SVR4 ABI Stack Frame Layout: 3727 // +-----------------------------------+ 3728 // +--> | Back chain | 3729 // | +-----------------------------------+ 3730 // | | Floating-point register save area | 3731 // | +-----------------------------------+ 3732 // | | General register save area | 3733 // | +-----------------------------------+ 3734 // | | CR save word | 3735 // | +-----------------------------------+ 3736 // | | VRSAVE save word | 3737 // | +-----------------------------------+ 3738 // | | Alignment padding | 3739 // | +-----------------------------------+ 3740 // | | Vector register save area | 3741 // | +-----------------------------------+ 3742 // | | Local variable space | 3743 // | +-----------------------------------+ 3744 // | | Parameter list area | 3745 // | +-----------------------------------+ 3746 // | | LR save word | 3747 // | +-----------------------------------+ 3748 // SP--> +--- | Back chain | 3749 // +-----------------------------------+ 3750 // 3751 // Specifications: 3752 // System V Application Binary Interface PowerPC Processor Supplement 3753 // AltiVec Technology Programming Interface Manual 3754 3755 MachineFunction &MF = DAG.getMachineFunction(); 3756 MachineFrameInfo &MFI = MF.getFrameInfo(); 3757 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3758 3759 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3760 // Potential tail calls could cause overwriting of argument stack slots. 3761 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3762 (CallConv == CallingConv::Fast)); 3763 const Align PtrAlign(4); 3764 3765 // Assign locations to all of the incoming arguments. 3766 SmallVector<CCValAssign, 16> ArgLocs; 3767 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3768 *DAG.getContext()); 3769 3770 // Reserve space for the linkage area on the stack. 3771 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3772 CCInfo.AllocateStack(LinkageSize, PtrAlign); 3773 if (useSoftFloat()) 3774 CCInfo.PreAnalyzeFormalArguments(Ins); 3775 3776 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3777 CCInfo.clearWasPPCF128(); 3778 3779 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3780 CCValAssign &VA = ArgLocs[i]; 3781 3782 // Arguments stored in registers. 3783 if (VA.isRegLoc()) { 3784 const TargetRegisterClass *RC; 3785 EVT ValVT = VA.getValVT(); 3786 3787 switch (ValVT.getSimpleVT().SimpleTy) { 3788 default: 3789 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3790 case MVT::i1: 3791 case MVT::i32: 3792 RC = &PPC::GPRCRegClass; 3793 break; 3794 case MVT::f32: 3795 if (Subtarget.hasP8Vector()) 3796 RC = &PPC::VSSRCRegClass; 3797 else if (Subtarget.hasSPE()) 3798 RC = &PPC::GPRCRegClass; 3799 else 3800 RC = &PPC::F4RCRegClass; 3801 break; 3802 case MVT::f64: 3803 if (Subtarget.hasVSX()) 3804 RC = &PPC::VSFRCRegClass; 3805 else if (Subtarget.hasSPE()) 3806 // SPE passes doubles in GPR pairs. 3807 RC = &PPC::GPRCRegClass; 3808 else 3809 RC = &PPC::F8RCRegClass; 3810 break; 3811 case MVT::v16i8: 3812 case MVT::v8i16: 3813 case MVT::v4i32: 3814 RC = &PPC::VRRCRegClass; 3815 break; 3816 case MVT::v4f32: 3817 RC = &PPC::VRRCRegClass; 3818 break; 3819 case MVT::v2f64: 3820 case MVT::v2i64: 3821 RC = &PPC::VRRCRegClass; 3822 break; 3823 } 3824 3825 SDValue ArgValue; 3826 // Transform the arguments stored in physical registers into 3827 // virtual ones. 3828 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) { 3829 assert(i + 1 < e && "No second half of double precision argument"); 3830 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC); 3831 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC); 3832 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32); 3833 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32); 3834 if (!Subtarget.isLittleEndian()) 3835 std::swap (ArgValueLo, ArgValueHi); 3836 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo, 3837 ArgValueHi); 3838 } else { 3839 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3840 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3841 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3842 if (ValVT == MVT::i1) 3843 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3844 } 3845 3846 InVals.push_back(ArgValue); 3847 } else { 3848 // Argument stored in memory. 3849 assert(VA.isMemLoc()); 3850 3851 // Get the extended size of the argument type in stack 3852 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3853 // Get the actual size of the argument type 3854 unsigned ObjSize = VA.getValVT().getStoreSize(); 3855 unsigned ArgOffset = VA.getLocMemOffset(); 3856 // Stack objects in PPC32 are right justified. 3857 ArgOffset += ArgSize - ObjSize; 3858 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); 3859 3860 // Create load nodes to retrieve arguments from the stack. 3861 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3862 InVals.push_back( 3863 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3864 } 3865 } 3866 3867 // Assign locations to all of the incoming aggregate by value arguments. 3868 // Aggregates passed by value are stored in the local variable space of the 3869 // caller's stack frame, right above the parameter list area. 3870 SmallVector<CCValAssign, 16> ByValArgLocs; 3871 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3872 ByValArgLocs, *DAG.getContext()); 3873 3874 // Reserve stack space for the allocations in CCInfo. 3875 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); 3876 3877 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3878 3879 // Area that is at least reserved in the caller of this function. 3880 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3881 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3882 3883 // Set the size that is at least reserved in caller of this function. Tail 3884 // call optimized function's reserved stack space needs to be aligned so that 3885 // taking the difference between two stack areas will result in an aligned 3886 // stack. 3887 MinReservedArea = 3888 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3889 FuncInfo->setMinReservedArea(MinReservedArea); 3890 3891 SmallVector<SDValue, 8> MemOps; 3892 3893 // If the function takes variable number of arguments, make a frame index for 3894 // the start of the first vararg value... for expansion of llvm.va_start. 3895 if (isVarArg) { 3896 static const MCPhysReg GPArgRegs[] = { 3897 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3898 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3899 }; 3900 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3901 3902 static const MCPhysReg FPArgRegs[] = { 3903 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3904 PPC::F8 3905 }; 3906 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3907 3908 if (useSoftFloat() || hasSPE()) 3909 NumFPArgRegs = 0; 3910 3911 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3912 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3913 3914 // Make room for NumGPArgRegs and NumFPArgRegs. 3915 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3916 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3917 3918 FuncInfo->setVarArgsStackOffset( 3919 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3920 CCInfo.getNextStackOffset(), true)); 3921 3922 FuncInfo->setVarArgsFrameIndex( 3923 MFI.CreateStackObject(Depth, Align(8), false)); 3924 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3925 3926 // The fixed integer arguments of a variadic function are stored to the 3927 // VarArgsFrameIndex on the stack so that they may be loaded by 3928 // dereferencing the result of va_next. 3929 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3930 // Get an existing live-in vreg, or add a new one. 3931 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3932 if (!VReg) 3933 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3934 3935 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3936 SDValue Store = 3937 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3938 MemOps.push_back(Store); 3939 // Increment the address by four for the next argument to store 3940 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3941 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3942 } 3943 3944 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3945 // is set. 3946 // The double arguments are stored to the VarArgsFrameIndex 3947 // on the stack. 3948 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3949 // Get an existing live-in vreg, or add a new one. 3950 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3951 if (!VReg) 3952 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3953 3954 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3955 SDValue Store = 3956 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3957 MemOps.push_back(Store); 3958 // Increment the address by eight for the next argument to store 3959 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3960 PtrVT); 3961 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3962 } 3963 } 3964 3965 if (!MemOps.empty()) 3966 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3967 3968 return Chain; 3969 } 3970 3971 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3972 // value to MVT::i64 and then truncate to the correct register size. 3973 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3974 EVT ObjectVT, SelectionDAG &DAG, 3975 SDValue ArgVal, 3976 const SDLoc &dl) const { 3977 if (Flags.isSExt()) 3978 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3979 DAG.getValueType(ObjectVT)); 3980 else if (Flags.isZExt()) 3981 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3982 DAG.getValueType(ObjectVT)); 3983 3984 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3985 } 3986 3987 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3988 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3989 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3990 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3991 // TODO: add description of PPC stack frame format, or at least some docs. 3992 // 3993 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3994 bool isLittleEndian = Subtarget.isLittleEndian(); 3995 MachineFunction &MF = DAG.getMachineFunction(); 3996 MachineFrameInfo &MFI = MF.getFrameInfo(); 3997 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3998 3999 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4000 "fastcc not supported on varargs functions"); 4001 4002 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4003 // Potential tail calls could cause overwriting of argument stack slots. 4004 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4005 (CallConv == CallingConv::Fast)); 4006 unsigned PtrByteSize = 8; 4007 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4008 4009 static const MCPhysReg GPR[] = { 4010 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4011 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4012 }; 4013 static const MCPhysReg VR[] = { 4014 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4015 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4016 }; 4017 4018 const unsigned Num_GPR_Regs = array_lengthof(GPR); 4019 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4020 const unsigned Num_VR_Regs = array_lengthof(VR); 4021 4022 // Do a first pass over the arguments to determine whether the ABI 4023 // guarantees that our caller has allocated the parameter save area 4024 // on its stack frame. In the ELFv1 ABI, this is always the case; 4025 // in the ELFv2 ABI, it is true if this is a vararg function or if 4026 // any parameter is located in a stack slot. 4027 4028 bool HasParameterArea = !isELFv2ABI || isVarArg; 4029 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 4030 unsigned NumBytes = LinkageSize; 4031 unsigned AvailableFPRs = Num_FPR_Regs; 4032 unsigned AvailableVRs = Num_VR_Regs; 4033 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 4034 if (Ins[i].Flags.isNest()) 4035 continue; 4036 4037 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 4038 PtrByteSize, LinkageSize, ParamAreaSize, 4039 NumBytes, AvailableFPRs, AvailableVRs)) 4040 HasParameterArea = true; 4041 } 4042 4043 // Add DAG nodes to load the arguments or copy them out of registers. On 4044 // entry to a function on PPC, the arguments start after the linkage area, 4045 // although the first ones are often in registers. 4046 4047 unsigned ArgOffset = LinkageSize; 4048 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4049 SmallVector<SDValue, 8> MemOps; 4050 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4051 unsigned CurArgIdx = 0; 4052 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4053 SDValue ArgVal; 4054 bool needsLoad = false; 4055 EVT ObjectVT = Ins[ArgNo].VT; 4056 EVT OrigVT = Ins[ArgNo].ArgVT; 4057 unsigned ObjSize = ObjectVT.getStoreSize(); 4058 unsigned ArgSize = ObjSize; 4059 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4060 if (Ins[ArgNo].isOrigArg()) { 4061 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4062 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4063 } 4064 // We re-align the argument offset for each argument, except when using the 4065 // fast calling convention, when we need to make sure we do that only when 4066 // we'll actually use a stack slot. 4067 unsigned CurArgOffset; 4068 Align Alignment; 4069 auto ComputeArgOffset = [&]() { 4070 /* Respect alignment of argument on the stack. */ 4071 Alignment = 4072 CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 4073 ArgOffset = alignTo(ArgOffset, Alignment); 4074 CurArgOffset = ArgOffset; 4075 }; 4076 4077 if (CallConv != CallingConv::Fast) { 4078 ComputeArgOffset(); 4079 4080 /* Compute GPR index associated with argument offset. */ 4081 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4082 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 4083 } 4084 4085 // FIXME the codegen can be much improved in some cases. 4086 // We do not have to keep everything in memory. 4087 if (Flags.isByVal()) { 4088 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4089 4090 if (CallConv == CallingConv::Fast) 4091 ComputeArgOffset(); 4092 4093 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4094 ObjSize = Flags.getByValSize(); 4095 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4096 // Empty aggregate parameters do not take up registers. Examples: 4097 // struct { } a; 4098 // union { } b; 4099 // int c[0]; 4100 // etc. However, we have to provide a place-holder in InVals, so 4101 // pretend we have an 8-byte item at the current address for that 4102 // purpose. 4103 if (!ObjSize) { 4104 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4105 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4106 InVals.push_back(FIN); 4107 continue; 4108 } 4109 4110 // Create a stack object covering all stack doublewords occupied 4111 // by the argument. If the argument is (fully or partially) on 4112 // the stack, or if the argument is fully in registers but the 4113 // caller has allocated the parameter save anyway, we can refer 4114 // directly to the caller's stack frame. Otherwise, create a 4115 // local copy in our own frame. 4116 int FI; 4117 if (HasParameterArea || 4118 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 4119 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 4120 else 4121 FI = MFI.CreateStackObject(ArgSize, Alignment, false); 4122 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4123 4124 // Handle aggregates smaller than 8 bytes. 4125 if (ObjSize < PtrByteSize) { 4126 // The value of the object is its address, which differs from the 4127 // address of the enclosing doubleword on big-endian systems. 4128 SDValue Arg = FIN; 4129 if (!isLittleEndian) { 4130 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 4131 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 4132 } 4133 InVals.push_back(Arg); 4134 4135 if (GPR_idx != Num_GPR_Regs) { 4136 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4137 FuncInfo->addLiveInAttr(VReg, Flags); 4138 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4139 SDValue Store; 4140 4141 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 4142 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 4143 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 4144 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 4145 MachinePointerInfo(&*FuncArg), ObjType); 4146 } else { 4147 // For sizes that don't fit a truncating store (3, 5, 6, 7), 4148 // store the whole register as-is to the parameter save area 4149 // slot. 4150 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4151 MachinePointerInfo(&*FuncArg)); 4152 } 4153 4154 MemOps.push_back(Store); 4155 } 4156 // Whether we copied from a register or not, advance the offset 4157 // into the parameter save area by a full doubleword. 4158 ArgOffset += PtrByteSize; 4159 continue; 4160 } 4161 4162 // The value of the object is its address, which is the address of 4163 // its first stack doubleword. 4164 InVals.push_back(FIN); 4165 4166 // Store whatever pieces of the object are in registers to memory. 4167 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4168 if (GPR_idx == Num_GPR_Regs) 4169 break; 4170 4171 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4172 FuncInfo->addLiveInAttr(VReg, Flags); 4173 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4174 SDValue Addr = FIN; 4175 if (j) { 4176 SDValue Off = DAG.getConstant(j, dl, PtrVT); 4177 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 4178 } 4179 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 4180 MachinePointerInfo(&*FuncArg, j)); 4181 MemOps.push_back(Store); 4182 ++GPR_idx; 4183 } 4184 ArgOffset += ArgSize; 4185 continue; 4186 } 4187 4188 switch (ObjectVT.getSimpleVT().SimpleTy) { 4189 default: llvm_unreachable("Unhandled argument type!"); 4190 case MVT::i1: 4191 case MVT::i32: 4192 case MVT::i64: 4193 if (Flags.isNest()) { 4194 // The 'nest' parameter, if any, is passed in R11. 4195 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 4196 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4197 4198 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4199 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4200 4201 break; 4202 } 4203 4204 // These can be scalar arguments or elements of an integer array type 4205 // passed directly. Clang may use those instead of "byval" aggregate 4206 // types to avoid forcing arguments to memory unnecessarily. 4207 if (GPR_idx != Num_GPR_Regs) { 4208 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4209 FuncInfo->addLiveInAttr(VReg, Flags); 4210 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4211 4212 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4213 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4214 // value to MVT::i64 and then truncate to the correct register size. 4215 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4216 } else { 4217 if (CallConv == CallingConv::Fast) 4218 ComputeArgOffset(); 4219 4220 needsLoad = true; 4221 ArgSize = PtrByteSize; 4222 } 4223 if (CallConv != CallingConv::Fast || needsLoad) 4224 ArgOffset += 8; 4225 break; 4226 4227 case MVT::f32: 4228 case MVT::f64: 4229 // These can be scalar arguments or elements of a float array type 4230 // passed directly. The latter are used to implement ELFv2 homogenous 4231 // float aggregates. 4232 if (FPR_idx != Num_FPR_Regs) { 4233 unsigned VReg; 4234 4235 if (ObjectVT == MVT::f32) 4236 VReg = MF.addLiveIn(FPR[FPR_idx], 4237 Subtarget.hasP8Vector() 4238 ? &PPC::VSSRCRegClass 4239 : &PPC::F4RCRegClass); 4240 else 4241 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 4242 ? &PPC::VSFRCRegClass 4243 : &PPC::F8RCRegClass); 4244 4245 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4246 ++FPR_idx; 4247 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 4248 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 4249 // once we support fp <-> gpr moves. 4250 4251 // This can only ever happen in the presence of f32 array types, 4252 // since otherwise we never run out of FPRs before running out 4253 // of GPRs. 4254 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4255 FuncInfo->addLiveInAttr(VReg, Flags); 4256 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4257 4258 if (ObjectVT == MVT::f32) { 4259 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 4260 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 4261 DAG.getConstant(32, dl, MVT::i32)); 4262 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 4263 } 4264 4265 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 4266 } else { 4267 if (CallConv == CallingConv::Fast) 4268 ComputeArgOffset(); 4269 4270 needsLoad = true; 4271 } 4272 4273 // When passing an array of floats, the array occupies consecutive 4274 // space in the argument area; only round up to the next doubleword 4275 // at the end of the array. Otherwise, each float takes 8 bytes. 4276 if (CallConv != CallingConv::Fast || needsLoad) { 4277 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 4278 ArgOffset += ArgSize; 4279 if (Flags.isInConsecutiveRegsLast()) 4280 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4281 } 4282 break; 4283 case MVT::v4f32: 4284 case MVT::v4i32: 4285 case MVT::v8i16: 4286 case MVT::v16i8: 4287 case MVT::v2f64: 4288 case MVT::v2i64: 4289 case MVT::v1i128: 4290 case MVT::f128: 4291 // These can be scalar arguments or elements of a vector array type 4292 // passed directly. The latter are used to implement ELFv2 homogenous 4293 // vector aggregates. 4294 if (VR_idx != Num_VR_Regs) { 4295 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4296 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4297 ++VR_idx; 4298 } else { 4299 if (CallConv == CallingConv::Fast) 4300 ComputeArgOffset(); 4301 needsLoad = true; 4302 } 4303 if (CallConv != CallingConv::Fast || needsLoad) 4304 ArgOffset += 16; 4305 break; 4306 } 4307 4308 // We need to load the argument to a virtual register if we determined 4309 // above that we ran out of physical registers of the appropriate type. 4310 if (needsLoad) { 4311 if (ObjSize < ArgSize && !isLittleEndian) 4312 CurArgOffset += ArgSize - ObjSize; 4313 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 4314 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4315 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4316 } 4317 4318 InVals.push_back(ArgVal); 4319 } 4320 4321 // Area that is at least reserved in the caller of this function. 4322 unsigned MinReservedArea; 4323 if (HasParameterArea) 4324 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 4325 else 4326 MinReservedArea = LinkageSize; 4327 4328 // Set the size that is at least reserved in caller of this function. Tail 4329 // call optimized functions' reserved stack space needs to be aligned so that 4330 // taking the difference between two stack areas will result in an aligned 4331 // stack. 4332 MinReservedArea = 4333 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4334 FuncInfo->setMinReservedArea(MinReservedArea); 4335 4336 // If the function takes variable number of arguments, make a frame index for 4337 // the start of the first vararg value... for expansion of llvm.va_start. 4338 // On ELFv2ABI spec, it writes: 4339 // C programs that are intended to be *portable* across different compilers 4340 // and architectures must use the header file <stdarg.h> to deal with variable 4341 // argument lists. 4342 if (isVarArg && MFI.hasVAStart()) { 4343 int Depth = ArgOffset; 4344 4345 FuncInfo->setVarArgsFrameIndex( 4346 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4347 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4348 4349 // If this function is vararg, store any remaining integer argument regs 4350 // to their spots on the stack so that they may be loaded by dereferencing 4351 // the result of va_next. 4352 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4353 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4354 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4355 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4356 SDValue Store = 4357 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4358 MemOps.push_back(Store); 4359 // Increment the address by four for the next argument to store 4360 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4361 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4362 } 4363 } 4364 4365 if (!MemOps.empty()) 4366 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4367 4368 return Chain; 4369 } 4370 4371 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4372 /// adjusted to accommodate the arguments for the tailcall. 4373 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4374 unsigned ParamSize) { 4375 4376 if (!isTailCall) return 0; 4377 4378 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4379 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4380 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4381 // Remember only if the new adjustment is bigger. 4382 if (SPDiff < FI->getTailCallSPDelta()) 4383 FI->setTailCallSPDelta(SPDiff); 4384 4385 return SPDiff; 4386 } 4387 4388 static bool isFunctionGlobalAddress(SDValue Callee); 4389 4390 static bool callsShareTOCBase(const Function *Caller, SDValue Callee, 4391 const TargetMachine &TM) { 4392 // It does not make sense to call callsShareTOCBase() with a caller that 4393 // is PC Relative since PC Relative callers do not have a TOC. 4394 #ifndef NDEBUG 4395 const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller); 4396 assert(!STICaller->isUsingPCRelativeCalls() && 4397 "PC Relative callers do not have a TOC and cannot share a TOC Base"); 4398 #endif 4399 4400 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols 4401 // don't have enough information to determine if the caller and callee share 4402 // the same TOC base, so we have to pessimistically assume they don't for 4403 // correctness. 4404 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4405 if (!G) 4406 return false; 4407 4408 const GlobalValue *GV = G->getGlobal(); 4409 4410 // If the callee is preemptable, then the static linker will use a plt-stub 4411 // which saves the toc to the stack, and needs a nop after the call 4412 // instruction to convert to a toc-restore. 4413 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4414 return false; 4415 4416 // Functions with PC Relative enabled may clobber the TOC in the same DSO. 4417 // We may need a TOC restore in the situation where the caller requires a 4418 // valid TOC but the callee is PC Relative and does not. 4419 const Function *F = dyn_cast<Function>(GV); 4420 const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV); 4421 4422 // If we have an Alias we can try to get the function from there. 4423 if (Alias) { 4424 const GlobalObject *GlobalObj = Alias->getBaseObject(); 4425 F = dyn_cast<Function>(GlobalObj); 4426 } 4427 4428 // If we still have no valid function pointer we do not have enough 4429 // information to determine if the callee uses PC Relative calls so we must 4430 // assume that it does. 4431 if (!F) 4432 return false; 4433 4434 // If the callee uses PC Relative we cannot guarantee that the callee won't 4435 // clobber the TOC of the caller and so we must assume that the two 4436 // functions do not share a TOC base. 4437 const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F); 4438 if (STICallee->isUsingPCRelativeCalls()) 4439 return false; 4440 4441 // If the GV is not a strong definition then we need to assume it can be 4442 // replaced by another function at link time. The function that replaces 4443 // it may not share the same TOC as the caller since the callee may be 4444 // replaced by a PC Relative version of the same function. 4445 if (!GV->isStrongDefinitionForLinker()) 4446 return false; 4447 4448 // The medium and large code models are expected to provide a sufficiently 4449 // large TOC to provide all data addressing needs of a module with a 4450 // single TOC. 4451 if (CodeModel::Medium == TM.getCodeModel() || 4452 CodeModel::Large == TM.getCodeModel()) 4453 return true; 4454 4455 // Any explicitly-specified sections and section prefixes must also match. 4456 // Also, if we're using -ffunction-sections, then each function is always in 4457 // a different section (the same is true for COMDAT functions). 4458 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4459 GV->getSection() != Caller->getSection()) 4460 return false; 4461 if (const auto *F = dyn_cast<Function>(GV)) { 4462 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4463 return false; 4464 } 4465 4466 return true; 4467 } 4468 4469 static bool 4470 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4471 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4472 assert(Subtarget.is64BitELFABI()); 4473 4474 const unsigned PtrByteSize = 8; 4475 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4476 4477 static const MCPhysReg GPR[] = { 4478 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4479 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4480 }; 4481 static const MCPhysReg VR[] = { 4482 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4483 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4484 }; 4485 4486 const unsigned NumGPRs = array_lengthof(GPR); 4487 const unsigned NumFPRs = 13; 4488 const unsigned NumVRs = array_lengthof(VR); 4489 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4490 4491 unsigned NumBytes = LinkageSize; 4492 unsigned AvailableFPRs = NumFPRs; 4493 unsigned AvailableVRs = NumVRs; 4494 4495 for (const ISD::OutputArg& Param : Outs) { 4496 if (Param.Flags.isNest()) continue; 4497 4498 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize, 4499 LinkageSize, ParamAreaSize, NumBytes, 4500 AvailableFPRs, AvailableVRs)) 4501 return true; 4502 } 4503 return false; 4504 } 4505 4506 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) { 4507 if (CB.arg_size() != CallerFn->arg_size()) 4508 return false; 4509 4510 auto CalleeArgIter = CB.arg_begin(); 4511 auto CalleeArgEnd = CB.arg_end(); 4512 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4513 4514 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4515 const Value* CalleeArg = *CalleeArgIter; 4516 const Value* CallerArg = &(*CallerArgIter); 4517 if (CalleeArg == CallerArg) 4518 continue; 4519 4520 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4521 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4522 // } 4523 // 1st argument of callee is undef and has the same type as caller. 4524 if (CalleeArg->getType() == CallerArg->getType() && 4525 isa<UndefValue>(CalleeArg)) 4526 continue; 4527 4528 return false; 4529 } 4530 4531 return true; 4532 } 4533 4534 // Returns true if TCO is possible between the callers and callees 4535 // calling conventions. 4536 static bool 4537 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4538 CallingConv::ID CalleeCC) { 4539 // Tail calls are possible with fastcc and ccc. 4540 auto isTailCallableCC = [] (CallingConv::ID CC){ 4541 return CC == CallingConv::C || CC == CallingConv::Fast; 4542 }; 4543 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4544 return false; 4545 4546 // We can safely tail call both fastcc and ccc callees from a c calling 4547 // convention caller. If the caller is fastcc, we may have less stack space 4548 // than a non-fastcc caller with the same signature so disable tail-calls in 4549 // that case. 4550 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4551 } 4552 4553 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4554 SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg, 4555 const SmallVectorImpl<ISD::OutputArg> &Outs, 4556 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 4557 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4558 4559 if (DisableSCO && !TailCallOpt) return false; 4560 4561 // Variadic argument functions are not supported. 4562 if (isVarArg) return false; 4563 4564 auto &Caller = DAG.getMachineFunction().getFunction(); 4565 // Check that the calling conventions are compatible for tco. 4566 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4567 return false; 4568 4569 // Caller contains any byval parameter is not supported. 4570 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4571 return false; 4572 4573 // Callee contains any byval parameter is not supported, too. 4574 // Note: This is a quick work around, because in some cases, e.g. 4575 // caller's stack size > callee's stack size, we are still able to apply 4576 // sibling call optimization. For example, gcc is able to do SCO for caller1 4577 // in the following example, but not for caller2. 4578 // struct test { 4579 // long int a; 4580 // char ary[56]; 4581 // } gTest; 4582 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4583 // b->a = v.a; 4584 // return 0; 4585 // } 4586 // void caller1(struct test a, struct test c, struct test *b) { 4587 // callee(gTest, b); } 4588 // void caller2(struct test *b) { callee(gTest, b); } 4589 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4590 return false; 4591 4592 // If callee and caller use different calling conventions, we cannot pass 4593 // parameters on stack since offsets for the parameter area may be different. 4594 if (Caller.getCallingConv() != CalleeCC && 4595 needStackSlotPassParameters(Subtarget, Outs)) 4596 return false; 4597 4598 // All variants of 64-bit ELF ABIs without PC-Relative addressing require that 4599 // the caller and callee share the same TOC for TCO/SCO. If the caller and 4600 // callee potentially have different TOC bases then we cannot tail call since 4601 // we need to restore the TOC pointer after the call. 4602 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4603 // We cannot guarantee this for indirect calls or calls to external functions. 4604 // When PC-Relative addressing is used, the concept of the TOC is no longer 4605 // applicable so this check is not required. 4606 // Check first for indirect calls. 4607 if (!Subtarget.isUsingPCRelativeCalls() && 4608 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee)) 4609 return false; 4610 4611 // Check if we share the TOC base. 4612 if (!Subtarget.isUsingPCRelativeCalls() && 4613 !callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4614 return false; 4615 4616 // TCO allows altering callee ABI, so we don't have to check further. 4617 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4618 return true; 4619 4620 if (DisableSCO) return false; 4621 4622 // If callee use the same argument list that caller is using, then we can 4623 // apply SCO on this case. If it is not, then we need to check if callee needs 4624 // stack for passing arguments. 4625 // PC Relative tail calls may not have a CallBase. 4626 // If there is no CallBase we cannot verify if we have the same argument 4627 // list so assume that we don't have the same argument list. 4628 if (CB && !hasSameArgumentList(&Caller, *CB) && 4629 needStackSlotPassParameters(Subtarget, Outs)) 4630 return false; 4631 else if (!CB && needStackSlotPassParameters(Subtarget, Outs)) 4632 return false; 4633 4634 return true; 4635 } 4636 4637 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4638 /// for tail call optimization. Targets which want to do tail call 4639 /// optimization should implement this function. 4640 bool 4641 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4642 CallingConv::ID CalleeCC, 4643 bool isVarArg, 4644 const SmallVectorImpl<ISD::InputArg> &Ins, 4645 SelectionDAG& DAG) const { 4646 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4647 return false; 4648 4649 // Variable argument functions are not supported. 4650 if (isVarArg) 4651 return false; 4652 4653 MachineFunction &MF = DAG.getMachineFunction(); 4654 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4655 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4656 // Functions containing by val parameters are not supported. 4657 for (unsigned i = 0; i != Ins.size(); i++) { 4658 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4659 if (Flags.isByVal()) return false; 4660 } 4661 4662 // Non-PIC/GOT tail calls are supported. 4663 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4664 return true; 4665 4666 // At the moment we can only do local tail calls (in same module, hidden 4667 // or protected) if we are generating PIC. 4668 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4669 return G->getGlobal()->hasHiddenVisibility() 4670 || G->getGlobal()->hasProtectedVisibility(); 4671 } 4672 4673 return false; 4674 } 4675 4676 /// isCallCompatibleAddress - Return the immediate to use if the specified 4677 /// 32-bit value is representable in the immediate field of a BxA instruction. 4678 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4679 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4680 if (!C) return nullptr; 4681 4682 int Addr = C->getZExtValue(); 4683 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4684 SignExtend32<26>(Addr) != Addr) 4685 return nullptr; // Top 6 bits have to be sext of immediate. 4686 4687 return DAG 4688 .getConstant( 4689 (int)C->getZExtValue() >> 2, SDLoc(Op), 4690 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4691 .getNode(); 4692 } 4693 4694 namespace { 4695 4696 struct TailCallArgumentInfo { 4697 SDValue Arg; 4698 SDValue FrameIdxOp; 4699 int FrameIdx = 0; 4700 4701 TailCallArgumentInfo() = default; 4702 }; 4703 4704 } // end anonymous namespace 4705 4706 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4707 static void StoreTailCallArgumentsToStackSlot( 4708 SelectionDAG &DAG, SDValue Chain, 4709 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4710 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4711 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4712 SDValue Arg = TailCallArgs[i].Arg; 4713 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4714 int FI = TailCallArgs[i].FrameIdx; 4715 // Store relative to framepointer. 4716 MemOpChains.push_back(DAG.getStore( 4717 Chain, dl, Arg, FIN, 4718 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4719 } 4720 } 4721 4722 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4723 /// the appropriate stack slot for the tail call optimized function call. 4724 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4725 SDValue OldRetAddr, SDValue OldFP, 4726 int SPDiff, const SDLoc &dl) { 4727 if (SPDiff) { 4728 // Calculate the new stack slot for the return address. 4729 MachineFunction &MF = DAG.getMachineFunction(); 4730 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4731 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4732 bool isPPC64 = Subtarget.isPPC64(); 4733 int SlotSize = isPPC64 ? 8 : 4; 4734 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4735 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4736 NewRetAddrLoc, true); 4737 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4738 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4739 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4740 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4741 } 4742 return Chain; 4743 } 4744 4745 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4746 /// the position of the argument. 4747 static void 4748 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4749 SDValue Arg, int SPDiff, unsigned ArgOffset, 4750 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4751 int Offset = ArgOffset + SPDiff; 4752 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4753 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4754 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4755 SDValue FIN = DAG.getFrameIndex(FI, VT); 4756 TailCallArgumentInfo Info; 4757 Info.Arg = Arg; 4758 Info.FrameIdxOp = FIN; 4759 Info.FrameIdx = FI; 4760 TailCallArguments.push_back(Info); 4761 } 4762 4763 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4764 /// stack slot. Returns the chain as result and the loaded frame pointers in 4765 /// LROpOut/FPOpout. Used when tail calling. 4766 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4767 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4768 SDValue &FPOpOut, const SDLoc &dl) const { 4769 if (SPDiff) { 4770 // Load the LR and FP stack slot for later adjusting. 4771 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4772 LROpOut = getReturnAddrFrameIndex(DAG); 4773 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4774 Chain = SDValue(LROpOut.getNode(), 1); 4775 } 4776 return Chain; 4777 } 4778 4779 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4780 /// by "Src" to address "Dst" of size "Size". Alignment information is 4781 /// specified by the specific parameter attribute. The copy will be passed as 4782 /// a byval function parameter. 4783 /// Sometimes what we are copying is the end of a larger object, the part that 4784 /// does not fit in registers. 4785 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4786 SDValue Chain, ISD::ArgFlagsTy Flags, 4787 SelectionDAG &DAG, const SDLoc &dl) { 4788 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4789 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 4790 Flags.getNonZeroByValAlign(), false, false, false, 4791 MachinePointerInfo(), MachinePointerInfo()); 4792 } 4793 4794 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4795 /// tail calls. 4796 static void LowerMemOpCallTo( 4797 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4798 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4799 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4800 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4801 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4802 if (!isTailCall) { 4803 if (isVector) { 4804 SDValue StackPtr; 4805 if (isPPC64) 4806 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4807 else 4808 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4809 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4810 DAG.getConstant(ArgOffset, dl, PtrVT)); 4811 } 4812 MemOpChains.push_back( 4813 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4814 // Calculate and remember argument location. 4815 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4816 TailCallArguments); 4817 } 4818 4819 static void 4820 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4821 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4822 SDValue FPOp, 4823 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4824 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4825 // might overwrite each other in case of tail call optimization. 4826 SmallVector<SDValue, 8> MemOpChains2; 4827 // Do not flag preceding copytoreg stuff together with the following stuff. 4828 InFlag = SDValue(); 4829 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4830 MemOpChains2, dl); 4831 if (!MemOpChains2.empty()) 4832 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4833 4834 // Store the return address to the appropriate stack slot. 4835 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4836 4837 // Emit callseq_end just before tailcall node. 4838 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4839 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4840 InFlag = Chain.getValue(1); 4841 } 4842 4843 // Is this global address that of a function that can be called by name? (as 4844 // opposed to something that must hold a descriptor for an indirect call). 4845 static bool isFunctionGlobalAddress(SDValue Callee) { 4846 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4847 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4848 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4849 return false; 4850 4851 return G->getGlobal()->getValueType()->isFunctionTy(); 4852 } 4853 4854 return false; 4855 } 4856 4857 SDValue PPCTargetLowering::LowerCallResult( 4858 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4859 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4860 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4861 SmallVector<CCValAssign, 16> RVLocs; 4862 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4863 *DAG.getContext()); 4864 4865 CCRetInfo.AnalyzeCallResult( 4866 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 4867 ? RetCC_PPC_Cold 4868 : RetCC_PPC); 4869 4870 // Copy all of the result registers out of their specified physreg. 4871 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4872 CCValAssign &VA = RVLocs[i]; 4873 assert(VA.isRegLoc() && "Can only return in registers!"); 4874 4875 SDValue Val; 4876 4877 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 4878 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 4879 InFlag); 4880 Chain = Lo.getValue(1); 4881 InFlag = Lo.getValue(2); 4882 VA = RVLocs[++i]; // skip ahead to next loc 4883 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 4884 InFlag); 4885 Chain = Hi.getValue(1); 4886 InFlag = Hi.getValue(2); 4887 if (!Subtarget.isLittleEndian()) 4888 std::swap (Lo, Hi); 4889 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi); 4890 } else { 4891 Val = DAG.getCopyFromReg(Chain, dl, 4892 VA.getLocReg(), VA.getLocVT(), InFlag); 4893 Chain = Val.getValue(1); 4894 InFlag = Val.getValue(2); 4895 } 4896 4897 switch (VA.getLocInfo()) { 4898 default: llvm_unreachable("Unknown loc info!"); 4899 case CCValAssign::Full: break; 4900 case CCValAssign::AExt: 4901 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4902 break; 4903 case CCValAssign::ZExt: 4904 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4905 DAG.getValueType(VA.getValVT())); 4906 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4907 break; 4908 case CCValAssign::SExt: 4909 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4910 DAG.getValueType(VA.getValVT())); 4911 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4912 break; 4913 } 4914 4915 InVals.push_back(Val); 4916 } 4917 4918 return Chain; 4919 } 4920 4921 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG, 4922 const PPCSubtarget &Subtarget, bool isPatchPoint) { 4923 // PatchPoint calls are not indirect. 4924 if (isPatchPoint) 4925 return false; 4926 4927 if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee)) 4928 return false; 4929 4930 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not 4931 // becuase the immediate function pointer points to a descriptor instead of 4932 // a function entry point. The ELFv2 ABI cannot use a BLA because the function 4933 // pointer immediate points to the global entry point, while the BLA would 4934 // need to jump to the local entry point (see rL211174). 4935 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() && 4936 isBLACompatibleAddress(Callee, DAG)) 4937 return false; 4938 4939 return true; 4940 } 4941 4942 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls. 4943 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) { 4944 return Subtarget.isAIXABI() || 4945 (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()); 4946 } 4947 4948 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags, 4949 const Function &Caller, 4950 const SDValue &Callee, 4951 const PPCSubtarget &Subtarget, 4952 const TargetMachine &TM) { 4953 if (CFlags.IsTailCall) 4954 return PPCISD::TC_RETURN; 4955 4956 // This is a call through a function pointer. 4957 if (CFlags.IsIndirect) { 4958 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross 4959 // indirect calls. The save of the caller's TOC pointer to the stack will be 4960 // inserted into the DAG as part of call lowering. The restore of the TOC 4961 // pointer is modeled by using a pseudo instruction for the call opcode that 4962 // represents the 2 instruction sequence of an indirect branch and link, 4963 // immediately followed by a load of the TOC pointer from the the stack save 4964 // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC 4965 // as it is not saved or used. 4966 return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC 4967 : PPCISD::BCTRL; 4968 } 4969 4970 if (Subtarget.isUsingPCRelativeCalls()) { 4971 assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI."); 4972 return PPCISD::CALL_NOTOC; 4973 } 4974 4975 // The ABIs that maintain a TOC pointer accross calls need to have a nop 4976 // immediately following the call instruction if the caller and callee may 4977 // have different TOC bases. At link time if the linker determines the calls 4978 // may not share a TOC base, the call is redirected to a trampoline inserted 4979 // by the linker. The trampoline will (among other things) save the callers 4980 // TOC pointer at an ABI designated offset in the linkage area and the linker 4981 // will rewrite the nop to be a load of the TOC pointer from the linkage area 4982 // into gpr2. 4983 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) 4984 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL 4985 : PPCISD::CALL_NOP; 4986 4987 return PPCISD::CALL; 4988 } 4989 4990 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, 4991 const SDLoc &dl, const PPCSubtarget &Subtarget) { 4992 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI()) 4993 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 4994 return SDValue(Dest, 0); 4995 4996 // Returns true if the callee is local, and false otherwise. 4997 auto isLocalCallee = [&]() { 4998 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4999 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5000 const GlobalValue *GV = G ? G->getGlobal() : nullptr; 5001 5002 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) && 5003 !dyn_cast_or_null<GlobalIFunc>(GV); 5004 }; 5005 5006 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in 5007 // a static relocation model causes some versions of GNU LD (2.17.50, at 5008 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are 5009 // built with secure-PLT. 5010 bool UsePlt = 5011 Subtarget.is32BitELFABI() && !isLocalCallee() && 5012 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_; 5013 5014 const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) { 5015 const TargetMachine &TM = Subtarget.getTargetMachine(); 5016 const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering(); 5017 MCSymbolXCOFF *S = 5018 cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM)); 5019 5020 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5021 return DAG.getMCSymbol(S, PtrVT); 5022 }; 5023 5024 if (isFunctionGlobalAddress(Callee)) { 5025 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5026 5027 if (Subtarget.isAIXABI()) { 5028 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX."); 5029 return getAIXFuncEntryPointSymbolSDNode(GV); 5030 } 5031 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0, 5032 UsePlt ? PPCII::MO_PLT : 0); 5033 } 5034 5035 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 5036 const char *SymName = S->getSymbol(); 5037 if (Subtarget.isAIXABI()) { 5038 // If there exists a user-declared function whose name is the same as the 5039 // ExternalSymbol's, then we pick up the user-declared version. 5040 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5041 if (const Function *F = 5042 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) 5043 return getAIXFuncEntryPointSymbolSDNode(F); 5044 5045 // On AIX, direct function calls reference the symbol for the function's 5046 // entry point, which is named by prepending a "." before the function's 5047 // C-linkage name. A Qualname is returned here because an external 5048 // function entry point is a csect with XTY_ER property. 5049 const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) { 5050 auto &Context = DAG.getMachineFunction().getMMI().getContext(); 5051 MCSectionXCOFF *Sec = Context.getXCOFFSection( 5052 (Twine(".") + Twine(SymName)).str(), XCOFF::XMC_PR, XCOFF::XTY_ER, 5053 SectionKind::getMetadata()); 5054 return Sec->getQualNameSymbol(); 5055 }; 5056 5057 SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data(); 5058 } 5059 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(), 5060 UsePlt ? PPCII::MO_PLT : 0); 5061 } 5062 5063 // No transformation needed. 5064 assert(Callee.getNode() && "What no callee?"); 5065 return Callee; 5066 } 5067 5068 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) { 5069 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START && 5070 "Expected a CALLSEQ_STARTSDNode."); 5071 5072 // The last operand is the chain, except when the node has glue. If the node 5073 // has glue, then the last operand is the glue, and the chain is the second 5074 // last operand. 5075 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1); 5076 if (LastValue.getValueType() != MVT::Glue) 5077 return LastValue; 5078 5079 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2); 5080 } 5081 5082 // Creates the node that moves a functions address into the count register 5083 // to prepare for an indirect call instruction. 5084 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5085 SDValue &Glue, SDValue &Chain, 5086 const SDLoc &dl) { 5087 SDValue MTCTROps[] = {Chain, Callee, Glue}; 5088 EVT ReturnTypes[] = {MVT::Other, MVT::Glue}; 5089 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2), 5090 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2)); 5091 // The glue is the second value produced. 5092 Glue = Chain.getValue(1); 5093 } 5094 5095 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5096 SDValue &Glue, SDValue &Chain, 5097 SDValue CallSeqStart, 5098 const CallBase *CB, const SDLoc &dl, 5099 bool hasNest, 5100 const PPCSubtarget &Subtarget) { 5101 // Function pointers in the 64-bit SVR4 ABI do not point to the function 5102 // entry point, but to the function descriptor (the function entry point 5103 // address is part of the function descriptor though). 5104 // The function descriptor is a three doubleword structure with the 5105 // following fields: function entry point, TOC base address and 5106 // environment pointer. 5107 // Thus for a call through a function pointer, the following actions need 5108 // to be performed: 5109 // 1. Save the TOC of the caller in the TOC save area of its stack 5110 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 5111 // 2. Load the address of the function entry point from the function 5112 // descriptor. 5113 // 3. Load the TOC of the callee from the function descriptor into r2. 5114 // 4. Load the environment pointer from the function descriptor into 5115 // r11. 5116 // 5. Branch to the function entry point address. 5117 // 6. On return of the callee, the TOC of the caller needs to be 5118 // restored (this is done in FinishCall()). 5119 // 5120 // The loads are scheduled at the beginning of the call sequence, and the 5121 // register copies are flagged together to ensure that no other 5122 // operations can be scheduled in between. E.g. without flagging the 5123 // copies together, a TOC access in the caller could be scheduled between 5124 // the assignment of the callee TOC and the branch to the callee, which leads 5125 // to incorrect code. 5126 5127 // Start by loading the function address from the descriptor. 5128 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart); 5129 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 5130 ? (MachineMemOperand::MODereferenceable | 5131 MachineMemOperand::MOInvariant) 5132 : MachineMemOperand::MONone; 5133 5134 MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr); 5135 5136 // Registers used in building the DAG. 5137 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); 5138 const MCRegister TOCReg = Subtarget.getTOCPointerRegister(); 5139 5140 // Offsets of descriptor members. 5141 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset(); 5142 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset(); 5143 5144 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 5145 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4; 5146 5147 // One load for the functions entry point address. 5148 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI, 5149 Alignment, MMOFlags); 5150 5151 // One for loading the TOC anchor for the module that contains the called 5152 // function. 5153 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl); 5154 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff); 5155 SDValue TOCPtr = 5156 DAG.getLoad(RegVT, dl, LDChain, AddTOC, 5157 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags); 5158 5159 // One for loading the environment pointer. 5160 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl); 5161 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff); 5162 SDValue LoadEnvPtr = 5163 DAG.getLoad(RegVT, dl, LDChain, AddPtr, 5164 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags); 5165 5166 5167 // Then copy the newly loaded TOC anchor to the TOC pointer. 5168 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue); 5169 Chain = TOCVal.getValue(0); 5170 Glue = TOCVal.getValue(1); 5171 5172 // If the function call has an explicit 'nest' parameter, it takes the 5173 // place of the environment pointer. 5174 assert((!hasNest || !Subtarget.isAIXABI()) && 5175 "Nest parameter is not supported on AIX."); 5176 if (!hasNest) { 5177 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue); 5178 Chain = EnvVal.getValue(0); 5179 Glue = EnvVal.getValue(1); 5180 } 5181 5182 // The rest of the indirect call sequence is the same as the non-descriptor 5183 // DAG. 5184 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl); 5185 } 5186 5187 static void 5188 buildCallOperands(SmallVectorImpl<SDValue> &Ops, 5189 PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, 5190 SelectionDAG &DAG, 5191 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 5192 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, 5193 const PPCSubtarget &Subtarget) { 5194 const bool IsPPC64 = Subtarget.isPPC64(); 5195 // MVT for a general purpose register. 5196 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 5197 5198 // First operand is always the chain. 5199 Ops.push_back(Chain); 5200 5201 // If it's a direct call pass the callee as the second operand. 5202 if (!CFlags.IsIndirect) 5203 Ops.push_back(Callee); 5204 else { 5205 assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect."); 5206 5207 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area 5208 // on the stack (this would have been done in `LowerCall_64SVR4` or 5209 // `LowerCall_AIX`). The call instruction is a pseudo instruction that 5210 // represents both the indirect branch and a load that restores the TOC 5211 // pointer from the linkage area. The operand for the TOC restore is an add 5212 // of the TOC save offset to the stack pointer. This must be the second 5213 // operand: after the chain input but before any other variadic arguments. 5214 // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not 5215 // saved or used. 5216 if (isTOCSaveRestoreRequired(Subtarget)) { 5217 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 5218 5219 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT); 5220 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5221 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5222 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff); 5223 Ops.push_back(AddTOC); 5224 } 5225 5226 // Add the register used for the environment pointer. 5227 if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest) 5228 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(), 5229 RegVT)); 5230 5231 5232 // Add CTR register as callee so a bctr can be emitted later. 5233 if (CFlags.IsTailCall) 5234 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT)); 5235 } 5236 5237 // If this is a tail call add stack pointer delta. 5238 if (CFlags.IsTailCall) 5239 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5240 5241 // Add argument registers to the end of the list so that they are known live 5242 // into the call. 5243 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5244 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5245 RegsToPass[i].second.getValueType())); 5246 5247 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is 5248 // no way to mark dependencies as implicit here. 5249 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. 5250 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && 5251 !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls()) 5252 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT)); 5253 5254 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5255 if (CFlags.IsVarArg && Subtarget.is32BitELFABI()) 5256 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5257 5258 // Add a register mask operand representing the call-preserved registers. 5259 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5260 const uint32_t *Mask = 5261 TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv); 5262 assert(Mask && "Missing call preserved mask for calling convention"); 5263 Ops.push_back(DAG.getRegisterMask(Mask)); 5264 5265 // If the glue is valid, it is the last operand. 5266 if (Glue.getNode()) 5267 Ops.push_back(Glue); 5268 } 5269 5270 SDValue PPCTargetLowering::FinishCall( 5271 CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, 5272 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue, 5273 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5274 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5275 SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const { 5276 5277 if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) || 5278 Subtarget.isAIXABI()) 5279 setUsesTOCBasePtr(DAG); 5280 5281 unsigned CallOpc = 5282 getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee, 5283 Subtarget, DAG.getTarget()); 5284 5285 if (!CFlags.IsIndirect) 5286 Callee = transformCallee(Callee, DAG, dl, Subtarget); 5287 else if (Subtarget.usesFunctionDescriptors()) 5288 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB, 5289 dl, CFlags.HasNest, Subtarget); 5290 else 5291 prepareIndirectCall(DAG, Callee, Glue, Chain, dl); 5292 5293 // Build the operand list for the call instruction. 5294 SmallVector<SDValue, 8> Ops; 5295 buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee, 5296 SPDiff, Subtarget); 5297 5298 // Emit tail call. 5299 if (CFlags.IsTailCall) { 5300 // Indirect tail call when using PC Relative calls do not have the same 5301 // constraints. 5302 assert(((Callee.getOpcode() == ISD::Register && 5303 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5304 Callee.getOpcode() == ISD::TargetExternalSymbol || 5305 Callee.getOpcode() == ISD::TargetGlobalAddress || 5306 isa<ConstantSDNode>(Callee) || 5307 (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && 5308 "Expecting a global address, external symbol, absolute value, " 5309 "register or an indirect tail call when PC Relative calls are " 5310 "used."); 5311 // PC Relative calls also use TC_RETURN as the way to mark tail calls. 5312 assert(CallOpc == PPCISD::TC_RETURN && 5313 "Unexpected call opcode for a tail call."); 5314 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5315 return DAG.getNode(CallOpc, dl, MVT::Other, Ops); 5316 } 5317 5318 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}}; 5319 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops); 5320 DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge); 5321 Glue = Chain.getValue(1); 5322 5323 // When performing tail call optimization the callee pops its arguments off 5324 // the stack. Account for this here so these bytes can be pushed back on in 5325 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5326 int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast && 5327 getTargetMachine().Options.GuaranteedTailCallOpt) 5328 ? NumBytes 5329 : 0; 5330 5331 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5332 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5333 Glue, dl); 5334 Glue = Chain.getValue(1); 5335 5336 return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl, 5337 DAG, InVals); 5338 } 5339 5340 SDValue 5341 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5342 SmallVectorImpl<SDValue> &InVals) const { 5343 SelectionDAG &DAG = CLI.DAG; 5344 SDLoc &dl = CLI.DL; 5345 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5346 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5347 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5348 SDValue Chain = CLI.Chain; 5349 SDValue Callee = CLI.Callee; 5350 bool &isTailCall = CLI.IsTailCall; 5351 CallingConv::ID CallConv = CLI.CallConv; 5352 bool isVarArg = CLI.IsVarArg; 5353 bool isPatchPoint = CLI.IsPatchPoint; 5354 const CallBase *CB = CLI.CB; 5355 5356 if (isTailCall) { 5357 if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall())) 5358 isTailCall = false; 5359 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5360 isTailCall = IsEligibleForTailCallOptimization_64SVR4( 5361 Callee, CallConv, CB, isVarArg, Outs, Ins, DAG); 5362 else 5363 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5364 Ins, DAG); 5365 if (isTailCall) { 5366 ++NumTailCalls; 5367 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5368 ++NumSiblingCalls; 5369 5370 // PC Relative calls no longer guarantee that the callee is a Global 5371 // Address Node. The callee could be an indirect tail call in which 5372 // case the SDValue for the callee could be a load (to load the address 5373 // of a function pointer) or it may be a register copy (to move the 5374 // address of the callee from a function parameter into a virtual 5375 // register). It may also be an ExternalSymbolSDNode (ex memcopy). 5376 assert((Subtarget.isUsingPCRelativeCalls() || 5377 isa<GlobalAddressSDNode>(Callee)) && 5378 "Callee should be an llvm::Function object."); 5379 5380 LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName() 5381 << "\nTCO callee: "); 5382 LLVM_DEBUG(Callee.dump()); 5383 } 5384 } 5385 5386 if (!isTailCall && CB && CB->isMustTailCall()) 5387 report_fatal_error("failed to perform tail call elimination on a call " 5388 "site marked musttail"); 5389 5390 // When long calls (i.e. indirect calls) are always used, calls are always 5391 // made via function pointer. If we have a function name, first translate it 5392 // into a pointer. 5393 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5394 !isTailCall) 5395 Callee = LowerGlobalAddress(Callee, DAG); 5396 5397 CallFlags CFlags( 5398 CallConv, isTailCall, isVarArg, isPatchPoint, 5399 isIndirectCall(Callee, DAG, Subtarget, isPatchPoint), 5400 // hasNest 5401 Subtarget.is64BitELFABI() && 5402 any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }), 5403 CLI.NoMerge); 5404 5405 if (Subtarget.isAIXABI()) 5406 return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5407 InVals, CB); 5408 5409 assert(Subtarget.isSVR4ABI()); 5410 if (Subtarget.isPPC64()) 5411 return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5412 InVals, CB); 5413 return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5414 InVals, CB); 5415 } 5416 5417 SDValue PPCTargetLowering::LowerCall_32SVR4( 5418 SDValue Chain, SDValue Callee, CallFlags CFlags, 5419 const SmallVectorImpl<ISD::OutputArg> &Outs, 5420 const SmallVectorImpl<SDValue> &OutVals, 5421 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5422 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5423 const CallBase *CB) const { 5424 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5425 // of the 32-bit SVR4 ABI stack frame layout. 5426 5427 const CallingConv::ID CallConv = CFlags.CallConv; 5428 const bool IsVarArg = CFlags.IsVarArg; 5429 const bool IsTailCall = CFlags.IsTailCall; 5430 5431 assert((CallConv == CallingConv::C || 5432 CallConv == CallingConv::Cold || 5433 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5434 5435 const Align PtrAlign(4); 5436 5437 MachineFunction &MF = DAG.getMachineFunction(); 5438 5439 // Mark this function as potentially containing a function that contains a 5440 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5441 // and restoring the callers stack pointer in this functions epilog. This is 5442 // done because by tail calling the called function might overwrite the value 5443 // in this function's (MF) stack pointer stack slot 0(SP). 5444 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5445 CallConv == CallingConv::Fast) 5446 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5447 5448 // Count how many bytes are to be pushed on the stack, including the linkage 5449 // area, parameter list area and the part of the local variable space which 5450 // contains copies of aggregates which are passed by value. 5451 5452 // Assign locations to all of the outgoing arguments. 5453 SmallVector<CCValAssign, 16> ArgLocs; 5454 PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 5455 5456 // Reserve space for the linkage area on the stack. 5457 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5458 PtrAlign); 5459 if (useSoftFloat()) 5460 CCInfo.PreAnalyzeCallOperands(Outs); 5461 5462 if (IsVarArg) { 5463 // Handle fixed and variable vector arguments differently. 5464 // Fixed vector arguments go into registers as long as registers are 5465 // available. Variable vector arguments always go into memory. 5466 unsigned NumArgs = Outs.size(); 5467 5468 for (unsigned i = 0; i != NumArgs; ++i) { 5469 MVT ArgVT = Outs[i].VT; 5470 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5471 bool Result; 5472 5473 if (Outs[i].IsFixed) { 5474 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5475 CCInfo); 5476 } else { 5477 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5478 ArgFlags, CCInfo); 5479 } 5480 5481 if (Result) { 5482 #ifndef NDEBUG 5483 errs() << "Call operand #" << i << " has unhandled type " 5484 << EVT(ArgVT).getEVTString() << "\n"; 5485 #endif 5486 llvm_unreachable(nullptr); 5487 } 5488 } 5489 } else { 5490 // All arguments are treated the same. 5491 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5492 } 5493 CCInfo.clearWasPPCF128(); 5494 5495 // Assign locations to all of the outgoing aggregate by value arguments. 5496 SmallVector<CCValAssign, 16> ByValArgLocs; 5497 CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext()); 5498 5499 // Reserve stack space for the allocations in CCInfo. 5500 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); 5501 5502 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5503 5504 // Size of the linkage area, parameter list area and the part of the local 5505 // space variable where copies of aggregates which are passed by value are 5506 // stored. 5507 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5508 5509 // Calculate by how many bytes the stack has to be adjusted in case of tail 5510 // call optimization. 5511 int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes); 5512 5513 // Adjust the stack pointer for the new arguments... 5514 // These operations are automatically eliminated by the prolog/epilog pass 5515 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5516 SDValue CallSeqStart = Chain; 5517 5518 // Load the return address and frame pointer so it can be moved somewhere else 5519 // later. 5520 SDValue LROp, FPOp; 5521 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5522 5523 // Set up a copy of the stack pointer for use loading and storing any 5524 // arguments that may not fit in the registers available for argument 5525 // passing. 5526 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5527 5528 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5529 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5530 SmallVector<SDValue, 8> MemOpChains; 5531 5532 bool seenFloatArg = false; 5533 // Walk the register/memloc assignments, inserting copies/loads. 5534 // i - Tracks the index into the list of registers allocated for the call 5535 // RealArgIdx - Tracks the index into the list of actual function arguments 5536 // j - Tracks the index into the list of byval arguments 5537 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size(); 5538 i != e; 5539 ++i, ++RealArgIdx) { 5540 CCValAssign &VA = ArgLocs[i]; 5541 SDValue Arg = OutVals[RealArgIdx]; 5542 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags; 5543 5544 if (Flags.isByVal()) { 5545 // Argument is an aggregate which is passed by value, thus we need to 5546 // create a copy of it in the local variable space of the current stack 5547 // frame (which is the stack frame of the caller) and pass the address of 5548 // this copy to the callee. 5549 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5550 CCValAssign &ByValVA = ByValArgLocs[j++]; 5551 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5552 5553 // Memory reserved in the local variable space of the callers stack frame. 5554 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5555 5556 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5557 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5558 StackPtr, PtrOff); 5559 5560 // Create a copy of the argument in the local area of the current 5561 // stack frame. 5562 SDValue MemcpyCall = 5563 CreateCopyOfByValArgument(Arg, PtrOff, 5564 CallSeqStart.getNode()->getOperand(0), 5565 Flags, DAG, dl); 5566 5567 // This must go outside the CALLSEQ_START..END. 5568 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5569 SDLoc(MemcpyCall)); 5570 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5571 NewCallSeqStart.getNode()); 5572 Chain = CallSeqStart = NewCallSeqStart; 5573 5574 // Pass the address of the aggregate copy on the stack either in a 5575 // physical register or in the parameter list area of the current stack 5576 // frame to the callee. 5577 Arg = PtrOff; 5578 } 5579 5580 // When useCRBits() is true, there can be i1 arguments. 5581 // It is because getRegisterType(MVT::i1) => MVT::i1, 5582 // and for other integer types getRegisterType() => MVT::i32. 5583 // Extend i1 and ensure callee will get i32. 5584 if (Arg.getValueType() == MVT::i1) 5585 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 5586 dl, MVT::i32, Arg); 5587 5588 if (VA.isRegLoc()) { 5589 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5590 // Put argument in a physical register. 5591 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) { 5592 bool IsLE = Subtarget.isLittleEndian(); 5593 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5594 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl)); 5595 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0))); 5596 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5597 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl)); 5598 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(), 5599 SVal.getValue(0))); 5600 } else 5601 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5602 } else { 5603 // Put argument in the parameter list area of the current stack frame. 5604 assert(VA.isMemLoc()); 5605 unsigned LocMemOffset = VA.getLocMemOffset(); 5606 5607 if (!IsTailCall) { 5608 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5609 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5610 StackPtr, PtrOff); 5611 5612 MemOpChains.push_back( 5613 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5614 } else { 5615 // Calculate and remember argument location. 5616 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5617 TailCallArguments); 5618 } 5619 } 5620 } 5621 5622 if (!MemOpChains.empty()) 5623 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5624 5625 // Build a sequence of copy-to-reg nodes chained together with token chain 5626 // and flag operands which copy the outgoing args into the appropriate regs. 5627 SDValue InFlag; 5628 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5629 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5630 RegsToPass[i].second, InFlag); 5631 InFlag = Chain.getValue(1); 5632 } 5633 5634 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5635 // registers. 5636 if (IsVarArg) { 5637 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5638 SDValue Ops[] = { Chain, InFlag }; 5639 5640 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5641 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5642 5643 InFlag = Chain.getValue(1); 5644 } 5645 5646 if (IsTailCall) 5647 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5648 TailCallArguments); 5649 5650 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 5651 Callee, SPDiff, NumBytes, Ins, InVals, CB); 5652 } 5653 5654 // Copy an argument into memory, being careful to do this outside the 5655 // call sequence for the call to which the argument belongs. 5656 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5657 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5658 SelectionDAG &DAG, const SDLoc &dl) const { 5659 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5660 CallSeqStart.getNode()->getOperand(0), 5661 Flags, DAG, dl); 5662 // The MEMCPY must go outside the CALLSEQ_START..END. 5663 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5664 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5665 SDLoc(MemcpyCall)); 5666 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5667 NewCallSeqStart.getNode()); 5668 return NewCallSeqStart; 5669 } 5670 5671 SDValue PPCTargetLowering::LowerCall_64SVR4( 5672 SDValue Chain, SDValue Callee, CallFlags CFlags, 5673 const SmallVectorImpl<ISD::OutputArg> &Outs, 5674 const SmallVectorImpl<SDValue> &OutVals, 5675 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5676 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5677 const CallBase *CB) const { 5678 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5679 bool isLittleEndian = Subtarget.isLittleEndian(); 5680 unsigned NumOps = Outs.size(); 5681 bool IsSibCall = false; 5682 bool IsFastCall = CFlags.CallConv == CallingConv::Fast; 5683 5684 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5685 unsigned PtrByteSize = 8; 5686 5687 MachineFunction &MF = DAG.getMachineFunction(); 5688 5689 if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5690 IsSibCall = true; 5691 5692 // Mark this function as potentially containing a function that contains a 5693 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5694 // and restoring the callers stack pointer in this functions epilog. This is 5695 // done because by tail calling the called function might overwrite the value 5696 // in this function's (MF) stack pointer stack slot 0(SP). 5697 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) 5698 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5699 5700 assert(!(IsFastCall && CFlags.IsVarArg) && 5701 "fastcc not supported on varargs functions"); 5702 5703 // Count how many bytes are to be pushed on the stack, including the linkage 5704 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5705 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5706 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5707 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5708 unsigned NumBytes = LinkageSize; 5709 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5710 5711 static const MCPhysReg GPR[] = { 5712 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5713 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5714 }; 5715 static const MCPhysReg VR[] = { 5716 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5717 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5718 }; 5719 5720 const unsigned NumGPRs = array_lengthof(GPR); 5721 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5722 const unsigned NumVRs = array_lengthof(VR); 5723 5724 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5725 // can be passed to the callee in registers. 5726 // For the fast calling convention, there is another check below. 5727 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5728 bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall; 5729 if (!HasParameterArea) { 5730 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5731 unsigned AvailableFPRs = NumFPRs; 5732 unsigned AvailableVRs = NumVRs; 5733 unsigned NumBytesTmp = NumBytes; 5734 for (unsigned i = 0; i != NumOps; ++i) { 5735 if (Outs[i].Flags.isNest()) continue; 5736 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5737 PtrByteSize, LinkageSize, ParamAreaSize, 5738 NumBytesTmp, AvailableFPRs, AvailableVRs)) 5739 HasParameterArea = true; 5740 } 5741 } 5742 5743 // When using the fast calling convention, we don't provide backing for 5744 // arguments that will be in registers. 5745 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5746 5747 // Avoid allocating parameter area for fastcc functions if all the arguments 5748 // can be passed in the registers. 5749 if (IsFastCall) 5750 HasParameterArea = false; 5751 5752 // Add up all the space actually used. 5753 for (unsigned i = 0; i != NumOps; ++i) { 5754 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5755 EVT ArgVT = Outs[i].VT; 5756 EVT OrigVT = Outs[i].ArgVT; 5757 5758 if (Flags.isNest()) 5759 continue; 5760 5761 if (IsFastCall) { 5762 if (Flags.isByVal()) { 5763 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5764 if (NumGPRsUsed > NumGPRs) 5765 HasParameterArea = true; 5766 } else { 5767 switch (ArgVT.getSimpleVT().SimpleTy) { 5768 default: llvm_unreachable("Unexpected ValueType for argument!"); 5769 case MVT::i1: 5770 case MVT::i32: 5771 case MVT::i64: 5772 if (++NumGPRsUsed <= NumGPRs) 5773 continue; 5774 break; 5775 case MVT::v4i32: 5776 case MVT::v8i16: 5777 case MVT::v16i8: 5778 case MVT::v2f64: 5779 case MVT::v2i64: 5780 case MVT::v1i128: 5781 case MVT::f128: 5782 if (++NumVRsUsed <= NumVRs) 5783 continue; 5784 break; 5785 case MVT::v4f32: 5786 if (++NumVRsUsed <= NumVRs) 5787 continue; 5788 break; 5789 case MVT::f32: 5790 case MVT::f64: 5791 if (++NumFPRsUsed <= NumFPRs) 5792 continue; 5793 break; 5794 } 5795 HasParameterArea = true; 5796 } 5797 } 5798 5799 /* Respect alignment of argument on the stack. */ 5800 auto Alignement = 5801 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5802 NumBytes = alignTo(NumBytes, Alignement); 5803 5804 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5805 if (Flags.isInConsecutiveRegsLast()) 5806 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5807 } 5808 5809 unsigned NumBytesActuallyUsed = NumBytes; 5810 5811 // In the old ELFv1 ABI, 5812 // the prolog code of the callee may store up to 8 GPR argument registers to 5813 // the stack, allowing va_start to index over them in memory if its varargs. 5814 // Because we cannot tell if this is needed on the caller side, we have to 5815 // conservatively assume that it is needed. As such, make sure we have at 5816 // least enough stack space for the caller to store the 8 GPRs. 5817 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5818 // really requires memory operands, e.g. a vararg function. 5819 if (HasParameterArea) 5820 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5821 else 5822 NumBytes = LinkageSize; 5823 5824 // Tail call needs the stack to be aligned. 5825 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) 5826 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5827 5828 int SPDiff = 0; 5829 5830 // Calculate by how many bytes the stack has to be adjusted in case of tail 5831 // call optimization. 5832 if (!IsSibCall) 5833 SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes); 5834 5835 // To protect arguments on the stack from being clobbered in a tail call, 5836 // force all the loads to happen before doing any other lowering. 5837 if (CFlags.IsTailCall) 5838 Chain = DAG.getStackArgumentTokenFactor(Chain); 5839 5840 // Adjust the stack pointer for the new arguments... 5841 // These operations are automatically eliminated by the prolog/epilog pass 5842 if (!IsSibCall) 5843 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5844 SDValue CallSeqStart = Chain; 5845 5846 // Load the return address and frame pointer so it can be move somewhere else 5847 // later. 5848 SDValue LROp, FPOp; 5849 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5850 5851 // Set up a copy of the stack pointer for use loading and storing any 5852 // arguments that may not fit in the registers available for argument 5853 // passing. 5854 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5855 5856 // Figure out which arguments are going to go in registers, and which in 5857 // memory. Also, if this is a vararg function, floating point operations 5858 // must be stored to our stack, and loaded into integer regs as well, if 5859 // any integer regs are available for argument passing. 5860 unsigned ArgOffset = LinkageSize; 5861 5862 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5863 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5864 5865 SmallVector<SDValue, 8> MemOpChains; 5866 for (unsigned i = 0; i != NumOps; ++i) { 5867 SDValue Arg = OutVals[i]; 5868 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5869 EVT ArgVT = Outs[i].VT; 5870 EVT OrigVT = Outs[i].ArgVT; 5871 5872 // PtrOff will be used to store the current argument to the stack if a 5873 // register cannot be found for it. 5874 SDValue PtrOff; 5875 5876 // We re-align the argument offset for each argument, except when using the 5877 // fast calling convention, when we need to make sure we do that only when 5878 // we'll actually use a stack slot. 5879 auto ComputePtrOff = [&]() { 5880 /* Respect alignment of argument on the stack. */ 5881 auto Alignment = 5882 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5883 ArgOffset = alignTo(ArgOffset, Alignment); 5884 5885 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5886 5887 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5888 }; 5889 5890 if (!IsFastCall) { 5891 ComputePtrOff(); 5892 5893 /* Compute GPR index associated with argument offset. */ 5894 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5895 GPR_idx = std::min(GPR_idx, NumGPRs); 5896 } 5897 5898 // Promote integers to 64-bit values. 5899 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5900 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5901 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5902 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5903 } 5904 5905 // FIXME memcpy is used way more than necessary. Correctness first. 5906 // Note: "by value" is code for passing a structure by value, not 5907 // basic types. 5908 if (Flags.isByVal()) { 5909 // Note: Size includes alignment padding, so 5910 // struct x { short a; char b; } 5911 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5912 // These are the proper values we need for right-justifying the 5913 // aggregate in a parameter register. 5914 unsigned Size = Flags.getByValSize(); 5915 5916 // An empty aggregate parameter takes up no storage and no 5917 // registers. 5918 if (Size == 0) 5919 continue; 5920 5921 if (IsFastCall) 5922 ComputePtrOff(); 5923 5924 // All aggregates smaller than 8 bytes must be passed right-justified. 5925 if (Size==1 || Size==2 || Size==4) { 5926 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5927 if (GPR_idx != NumGPRs) { 5928 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5929 MachinePointerInfo(), VT); 5930 MemOpChains.push_back(Load.getValue(1)); 5931 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5932 5933 ArgOffset += PtrByteSize; 5934 continue; 5935 } 5936 } 5937 5938 if (GPR_idx == NumGPRs && Size < 8) { 5939 SDValue AddPtr = PtrOff; 5940 if (!isLittleEndian) { 5941 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5942 PtrOff.getValueType()); 5943 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5944 } 5945 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5946 CallSeqStart, 5947 Flags, DAG, dl); 5948 ArgOffset += PtrByteSize; 5949 continue; 5950 } 5951 // Copy entire object into memory. There are cases where gcc-generated 5952 // code assumes it is there, even if it could be put entirely into 5953 // registers. (This is not what the doc says.) 5954 5955 // FIXME: The above statement is likely due to a misunderstanding of the 5956 // documents. All arguments must be copied into the parameter area BY 5957 // THE CALLEE in the event that the callee takes the address of any 5958 // formal argument. That has not yet been implemented. However, it is 5959 // reasonable to use the stack area as a staging area for the register 5960 // load. 5961 5962 // Skip this for small aggregates, as we will use the same slot for a 5963 // right-justified copy, below. 5964 if (Size >= 8) 5965 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5966 CallSeqStart, 5967 Flags, DAG, dl); 5968 5969 // When a register is available, pass a small aggregate right-justified. 5970 if (Size < 8 && GPR_idx != NumGPRs) { 5971 // The easiest way to get this right-justified in a register 5972 // is to copy the structure into the rightmost portion of a 5973 // local variable slot, then load the whole slot into the 5974 // register. 5975 // FIXME: The memcpy seems to produce pretty awful code for 5976 // small aggregates, particularly for packed ones. 5977 // FIXME: It would be preferable to use the slot in the 5978 // parameter save area instead of a new local variable. 5979 SDValue AddPtr = PtrOff; 5980 if (!isLittleEndian) { 5981 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5982 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5983 } 5984 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5985 CallSeqStart, 5986 Flags, DAG, dl); 5987 5988 // Load the slot into the register. 5989 SDValue Load = 5990 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5991 MemOpChains.push_back(Load.getValue(1)); 5992 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5993 5994 // Done with this argument. 5995 ArgOffset += PtrByteSize; 5996 continue; 5997 } 5998 5999 // For aggregates larger than PtrByteSize, copy the pieces of the 6000 // object that fit into registers from the parameter save area. 6001 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6002 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6003 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6004 if (GPR_idx != NumGPRs) { 6005 SDValue Load = 6006 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6007 MemOpChains.push_back(Load.getValue(1)); 6008 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6009 ArgOffset += PtrByteSize; 6010 } else { 6011 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6012 break; 6013 } 6014 } 6015 continue; 6016 } 6017 6018 switch (Arg.getSimpleValueType().SimpleTy) { 6019 default: llvm_unreachable("Unexpected ValueType for argument!"); 6020 case MVT::i1: 6021 case MVT::i32: 6022 case MVT::i64: 6023 if (Flags.isNest()) { 6024 // The 'nest' parameter, if any, is passed in R11. 6025 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 6026 break; 6027 } 6028 6029 // These can be scalar arguments or elements of an integer array type 6030 // passed directly. Clang may use those instead of "byval" aggregate 6031 // types to avoid forcing arguments to memory unnecessarily. 6032 if (GPR_idx != NumGPRs) { 6033 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6034 } else { 6035 if (IsFastCall) 6036 ComputePtrOff(); 6037 6038 assert(HasParameterArea && 6039 "Parameter area must exist to pass an argument in memory."); 6040 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6041 true, CFlags.IsTailCall, false, MemOpChains, 6042 TailCallArguments, dl); 6043 if (IsFastCall) 6044 ArgOffset += PtrByteSize; 6045 } 6046 if (!IsFastCall) 6047 ArgOffset += PtrByteSize; 6048 break; 6049 case MVT::f32: 6050 case MVT::f64: { 6051 // These can be scalar arguments or elements of a float array type 6052 // passed directly. The latter are used to implement ELFv2 homogenous 6053 // float aggregates. 6054 6055 // Named arguments go into FPRs first, and once they overflow, the 6056 // remaining arguments go into GPRs and then the parameter save area. 6057 // Unnamed arguments for vararg functions always go to GPRs and 6058 // then the parameter save area. For now, put all arguments to vararg 6059 // routines always in both locations (FPR *and* GPR or stack slot). 6060 bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs; 6061 bool NeededLoad = false; 6062 6063 // First load the argument into the next available FPR. 6064 if (FPR_idx != NumFPRs) 6065 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6066 6067 // Next, load the argument into GPR or stack slot if needed. 6068 if (!NeedGPROrStack) 6069 ; 6070 else if (GPR_idx != NumGPRs && !IsFastCall) { 6071 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 6072 // once we support fp <-> gpr moves. 6073 6074 // In the non-vararg case, this can only ever happen in the 6075 // presence of f32 array types, since otherwise we never run 6076 // out of FPRs before running out of GPRs. 6077 SDValue ArgVal; 6078 6079 // Double values are always passed in a single GPR. 6080 if (Arg.getValueType() != MVT::f32) { 6081 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 6082 6083 // Non-array float values are extended and passed in a GPR. 6084 } else if (!Flags.isInConsecutiveRegs()) { 6085 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6086 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6087 6088 // If we have an array of floats, we collect every odd element 6089 // together with its predecessor into one GPR. 6090 } else if (ArgOffset % PtrByteSize != 0) { 6091 SDValue Lo, Hi; 6092 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 6093 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6094 if (!isLittleEndian) 6095 std::swap(Lo, Hi); 6096 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6097 6098 // The final element, if even, goes into the first half of a GPR. 6099 } else if (Flags.isInConsecutiveRegsLast()) { 6100 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6101 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6102 if (!isLittleEndian) 6103 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 6104 DAG.getConstant(32, dl, MVT::i32)); 6105 6106 // Non-final even elements are skipped; they will be handled 6107 // together the with subsequent argument on the next go-around. 6108 } else 6109 ArgVal = SDValue(); 6110 6111 if (ArgVal.getNode()) 6112 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 6113 } else { 6114 if (IsFastCall) 6115 ComputePtrOff(); 6116 6117 // Single-precision floating-point values are mapped to the 6118 // second (rightmost) word of the stack doubleword. 6119 if (Arg.getValueType() == MVT::f32 && 6120 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 6121 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6122 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6123 } 6124 6125 assert(HasParameterArea && 6126 "Parameter area must exist to pass an argument in memory."); 6127 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6128 true, CFlags.IsTailCall, false, MemOpChains, 6129 TailCallArguments, dl); 6130 6131 NeededLoad = true; 6132 } 6133 // When passing an array of floats, the array occupies consecutive 6134 // space in the argument area; only round up to the next doubleword 6135 // at the end of the array. Otherwise, each float takes 8 bytes. 6136 if (!IsFastCall || NeededLoad) { 6137 ArgOffset += (Arg.getValueType() == MVT::f32 && 6138 Flags.isInConsecutiveRegs()) ? 4 : 8; 6139 if (Flags.isInConsecutiveRegsLast()) 6140 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6141 } 6142 break; 6143 } 6144 case MVT::v4f32: 6145 case MVT::v4i32: 6146 case MVT::v8i16: 6147 case MVT::v16i8: 6148 case MVT::v2f64: 6149 case MVT::v2i64: 6150 case MVT::v1i128: 6151 case MVT::f128: 6152 // These can be scalar arguments or elements of a vector array type 6153 // passed directly. The latter are used to implement ELFv2 homogenous 6154 // vector aggregates. 6155 6156 // For a varargs call, named arguments go into VRs or on the stack as 6157 // usual; unnamed arguments always go to the stack or the corresponding 6158 // GPRs when within range. For now, we always put the value in both 6159 // locations (or even all three). 6160 if (CFlags.IsVarArg) { 6161 assert(HasParameterArea && 6162 "Parameter area must exist if we have a varargs call."); 6163 // We could elide this store in the case where the object fits 6164 // entirely in R registers. Maybe later. 6165 SDValue Store = 6166 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6167 MemOpChains.push_back(Store); 6168 if (VR_idx != NumVRs) { 6169 SDValue Load = 6170 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6171 MemOpChains.push_back(Load.getValue(1)); 6172 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6173 } 6174 ArgOffset += 16; 6175 for (unsigned i=0; i<16; i+=PtrByteSize) { 6176 if (GPR_idx == NumGPRs) 6177 break; 6178 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6179 DAG.getConstant(i, dl, PtrVT)); 6180 SDValue Load = 6181 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6182 MemOpChains.push_back(Load.getValue(1)); 6183 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6184 } 6185 break; 6186 } 6187 6188 // Non-varargs Altivec params go into VRs or on the stack. 6189 if (VR_idx != NumVRs) { 6190 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6191 } else { 6192 if (IsFastCall) 6193 ComputePtrOff(); 6194 6195 assert(HasParameterArea && 6196 "Parameter area must exist to pass an argument in memory."); 6197 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6198 true, CFlags.IsTailCall, true, MemOpChains, 6199 TailCallArguments, dl); 6200 if (IsFastCall) 6201 ArgOffset += 16; 6202 } 6203 6204 if (!IsFastCall) 6205 ArgOffset += 16; 6206 break; 6207 } 6208 } 6209 6210 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6211 "mismatch in size of parameter area"); 6212 (void)NumBytesActuallyUsed; 6213 6214 if (!MemOpChains.empty()) 6215 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6216 6217 // Check if this is an indirect call (MTCTR/BCTRL). 6218 // See prepareDescriptorIndirectCall and buildCallOperands for more 6219 // information about calls through function pointers in the 64-bit SVR4 ABI. 6220 if (CFlags.IsIndirect) { 6221 // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the 6222 // caller in the TOC save area. 6223 if (isTOCSaveRestoreRequired(Subtarget)) { 6224 assert(!CFlags.IsTailCall && "Indirect tails calls not supported"); 6225 // Load r2 into a virtual register and store it to the TOC save area. 6226 setUsesTOCBasePtr(DAG); 6227 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6228 // TOC save area offset. 6229 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6230 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6231 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6232 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, 6233 MachinePointerInfo::getStack( 6234 DAG.getMachineFunction(), TOCSaveOffset)); 6235 } 6236 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6237 // This does not mean the MTCTR instruction must use R12; it's easier 6238 // to model this as an extra parameter, so do that. 6239 if (isELFv2ABI && !CFlags.IsPatchPoint) 6240 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6241 } 6242 6243 // Build a sequence of copy-to-reg nodes chained together with token chain 6244 // and flag operands which copy the outgoing args into the appropriate regs. 6245 SDValue InFlag; 6246 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6247 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6248 RegsToPass[i].second, InFlag); 6249 InFlag = Chain.getValue(1); 6250 } 6251 6252 if (CFlags.IsTailCall && !IsSibCall) 6253 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6254 TailCallArguments); 6255 6256 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 6257 Callee, SPDiff, NumBytes, Ins, InVals, CB); 6258 } 6259 6260 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, 6261 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 6262 CCState &State) { 6263 6264 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>( 6265 State.getMachineFunction().getSubtarget()); 6266 const bool IsPPC64 = Subtarget.isPPC64(); 6267 const Align PtrAlign = IsPPC64 ? Align(8) : Align(4); 6268 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6269 6270 if (ValVT.isVector() && !State.getMachineFunction() 6271 .getTarget() 6272 .Options.EnableAIXExtendedAltivecABI) 6273 report_fatal_error("the default Altivec AIX ABI is not yet supported"); 6274 6275 if (ValVT == MVT::f128) 6276 report_fatal_error("f128 is unimplemented on AIX."); 6277 6278 if (ArgFlags.isNest()) 6279 report_fatal_error("Nest arguments are unimplemented."); 6280 6281 static const MCPhysReg GPR_32[] = {// 32-bit registers. 6282 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6283 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 6284 static const MCPhysReg GPR_64[] = {// 64-bit registers. 6285 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6286 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 6287 6288 static const MCPhysReg VR[] = {// Vector registers. 6289 PPC::V2, PPC::V3, PPC::V4, PPC::V5, 6290 PPC::V6, PPC::V7, PPC::V8, PPC::V9, 6291 PPC::V10, PPC::V11, PPC::V12, PPC::V13}; 6292 6293 if (ArgFlags.isByVal()) { 6294 if (ArgFlags.getNonZeroByValAlign() > PtrAlign) 6295 report_fatal_error("Pass-by-value arguments with alignment greater than " 6296 "register width are not supported."); 6297 6298 const unsigned ByValSize = ArgFlags.getByValSize(); 6299 6300 // An empty aggregate parameter takes up no storage and no registers, 6301 // but needs a MemLoc for a stack slot for the formal arguments side. 6302 if (ByValSize == 0) { 6303 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE, 6304 State.getNextStackOffset(), RegVT, 6305 LocInfo)); 6306 return false; 6307 } 6308 6309 const unsigned StackSize = alignTo(ByValSize, PtrAlign); 6310 unsigned Offset = State.AllocateStack(StackSize, PtrAlign); 6311 for (const unsigned E = Offset + StackSize; Offset < E; 6312 Offset += PtrAlign.value()) { 6313 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) 6314 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6315 else { 6316 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE, 6317 Offset, MVT::INVALID_SIMPLE_VALUE_TYPE, 6318 LocInfo)); 6319 break; 6320 } 6321 } 6322 return false; 6323 } 6324 6325 // Arguments always reserve parameter save area. 6326 switch (ValVT.SimpleTy) { 6327 default: 6328 report_fatal_error("Unhandled value type for argument."); 6329 case MVT::i64: 6330 // i64 arguments should have been split to i32 for PPC32. 6331 assert(IsPPC64 && "PPC32 should have split i64 values."); 6332 LLVM_FALLTHROUGH; 6333 case MVT::i1: 6334 case MVT::i32: { 6335 const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign); 6336 // AIX integer arguments are always passed in register width. 6337 if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits()) 6338 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt 6339 : CCValAssign::LocInfo::ZExt; 6340 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) 6341 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6342 else 6343 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo)); 6344 6345 return false; 6346 } 6347 case MVT::f32: 6348 case MVT::f64: { 6349 // Parameter save area (PSA) is reserved even if the float passes in fpr. 6350 const unsigned StoreSize = LocVT.getStoreSize(); 6351 // Floats are always 4-byte aligned in the PSA on AIX. 6352 // This includes f64 in 64-bit mode for ABI compatibility. 6353 const unsigned Offset = 6354 State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4)); 6355 unsigned FReg = State.AllocateReg(FPR); 6356 if (FReg) 6357 State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo)); 6358 6359 // Reserve and initialize GPRs or initialize the PSA as required. 6360 for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) { 6361 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6362 assert(FReg && "An FPR should be available when a GPR is reserved."); 6363 if (State.isVarArg()) { 6364 // Successfully reserved GPRs are only initialized for vararg calls. 6365 // Custom handling is required for: 6366 // f64 in PPC32 needs to be split into 2 GPRs. 6367 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR. 6368 State.addLoc( 6369 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6370 } 6371 } else { 6372 // If there are insufficient GPRs, the PSA needs to be initialized. 6373 // Initialization occurs even if an FPR was initialized for 6374 // compatibility with the AIX XL compiler. The full memory for the 6375 // argument will be initialized even if a prior word is saved in GPR. 6376 // A custom memLoc is used when the argument also passes in FPR so 6377 // that the callee handling can skip over it easily. 6378 State.addLoc( 6379 FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, 6380 LocInfo) 6381 : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6382 break; 6383 } 6384 } 6385 6386 return false; 6387 } 6388 case MVT::v4f32: 6389 case MVT::v4i32: 6390 case MVT::v8i16: 6391 case MVT::v16i8: 6392 case MVT::v2i64: 6393 case MVT::v2f64: 6394 case MVT::v1i128: { 6395 if (State.isVarArg()) 6396 report_fatal_error( 6397 "variadic arguments for vector types are unimplemented for AIX"); 6398 6399 if (unsigned VReg = State.AllocateReg(VR)) 6400 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo)); 6401 else { 6402 report_fatal_error( 6403 "passing vector parameters to the stack is unimplemented for AIX"); 6404 } 6405 return false; 6406 } 6407 } 6408 return true; 6409 } 6410 6411 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT, 6412 bool IsPPC64) { 6413 assert((IsPPC64 || SVT != MVT::i64) && 6414 "i64 should have been split for 32-bit codegen."); 6415 6416 switch (SVT) { 6417 default: 6418 report_fatal_error("Unexpected value type for formal argument"); 6419 case MVT::i1: 6420 case MVT::i32: 6421 case MVT::i64: 6422 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6423 case MVT::f32: 6424 return &PPC::F4RCRegClass; 6425 case MVT::f64: 6426 return &PPC::F8RCRegClass; 6427 case MVT::v4f32: 6428 case MVT::v4i32: 6429 case MVT::v8i16: 6430 case MVT::v16i8: 6431 case MVT::v2i64: 6432 case MVT::v2f64: 6433 case MVT::v1i128: 6434 return &PPC::VRRCRegClass; 6435 } 6436 } 6437 6438 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, 6439 SelectionDAG &DAG, SDValue ArgValue, 6440 MVT LocVT, const SDLoc &dl) { 6441 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger()); 6442 assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits()); 6443 6444 if (Flags.isSExt()) 6445 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue, 6446 DAG.getValueType(ValVT)); 6447 else if (Flags.isZExt()) 6448 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue, 6449 DAG.getValueType(ValVT)); 6450 6451 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue); 6452 } 6453 6454 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) { 6455 const unsigned LASize = FL->getLinkageSize(); 6456 6457 if (PPC::GPRCRegClass.contains(Reg)) { 6458 assert(Reg >= PPC::R3 && Reg <= PPC::R10 && 6459 "Reg must be a valid argument register!"); 6460 return LASize + 4 * (Reg - PPC::R3); 6461 } 6462 6463 if (PPC::G8RCRegClass.contains(Reg)) { 6464 assert(Reg >= PPC::X3 && Reg <= PPC::X10 && 6465 "Reg must be a valid argument register!"); 6466 return LASize + 8 * (Reg - PPC::X3); 6467 } 6468 6469 llvm_unreachable("Only general purpose registers expected."); 6470 } 6471 6472 // AIX ABI Stack Frame Layout: 6473 // 6474 // Low Memory +--------------------------------------------+ 6475 // SP +---> | Back chain | ---+ 6476 // | +--------------------------------------------+ | 6477 // | | Saved Condition Register | | 6478 // | +--------------------------------------------+ | 6479 // | | Saved Linkage Register | | 6480 // | +--------------------------------------------+ | Linkage Area 6481 // | | Reserved for compilers | | 6482 // | +--------------------------------------------+ | 6483 // | | Reserved for binders | | 6484 // | +--------------------------------------------+ | 6485 // | | Saved TOC pointer | ---+ 6486 // | +--------------------------------------------+ 6487 // | | Parameter save area | 6488 // | +--------------------------------------------+ 6489 // | | Alloca space | 6490 // | +--------------------------------------------+ 6491 // | | Local variable space | 6492 // | +--------------------------------------------+ 6493 // | | Float/int conversion temporary | 6494 // | +--------------------------------------------+ 6495 // | | Save area for AltiVec registers | 6496 // | +--------------------------------------------+ 6497 // | | AltiVec alignment padding | 6498 // | +--------------------------------------------+ 6499 // | | Save area for VRSAVE register | 6500 // | +--------------------------------------------+ 6501 // | | Save area for General Purpose registers | 6502 // | +--------------------------------------------+ 6503 // | | Save area for Floating Point registers | 6504 // | +--------------------------------------------+ 6505 // +---- | Back chain | 6506 // High Memory +--------------------------------------------+ 6507 // 6508 // Specifications: 6509 // AIX 7.2 Assembler Language Reference 6510 // Subroutine linkage convention 6511 6512 SDValue PPCTargetLowering::LowerFormalArguments_AIX( 6513 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 6514 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6515 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 6516 6517 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold || 6518 CallConv == CallingConv::Fast) && 6519 "Unexpected calling convention!"); 6520 6521 if (getTargetMachine().Options.GuaranteedTailCallOpt) 6522 report_fatal_error("Tail call support is unimplemented on AIX."); 6523 6524 if (useSoftFloat()) 6525 report_fatal_error("Soft float support is unimplemented on AIX."); 6526 6527 const PPCSubtarget &Subtarget = 6528 static_cast<const PPCSubtarget &>(DAG.getSubtarget()); 6529 6530 const bool IsPPC64 = Subtarget.isPPC64(); 6531 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6532 6533 // Assign locations to all of the incoming arguments. 6534 SmallVector<CCValAssign, 16> ArgLocs; 6535 MachineFunction &MF = DAG.getMachineFunction(); 6536 MachineFrameInfo &MFI = MF.getFrameInfo(); 6537 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 6538 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 6539 6540 const EVT PtrVT = getPointerTy(MF.getDataLayout()); 6541 // Reserve space for the linkage area on the stack. 6542 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6543 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize)); 6544 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX); 6545 6546 SmallVector<SDValue, 8> MemOps; 6547 6548 for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) { 6549 CCValAssign &VA = ArgLocs[I++]; 6550 MVT LocVT = VA.getLocVT(); 6551 ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags; 6552 if (VA.isMemLoc() && VA.getValVT().isVector()) 6553 report_fatal_error( 6554 "passing vector parameters to the stack is unimplemented for AIX"); 6555 6556 // For compatibility with the AIX XL compiler, the float args in the 6557 // parameter save area are initialized even if the argument is available 6558 // in register. The caller is required to initialize both the register 6559 // and memory, however, the callee can choose to expect it in either. 6560 // The memloc is dismissed here because the argument is retrieved from 6561 // the register. 6562 if (VA.isMemLoc() && VA.needsCustom()) 6563 continue; 6564 6565 if (VA.isRegLoc()) { 6566 if (VA.getValVT().isScalarInteger()) 6567 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType); 6568 else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector()) 6569 FuncInfo->appendParameterType(VA.getValVT().SimpleTy == MVT::f32 6570 ? PPCFunctionInfo::ShortFloatPoint 6571 : PPCFunctionInfo::LongFloatPoint); 6572 } 6573 6574 if (Flags.isByVal() && VA.isMemLoc()) { 6575 const unsigned Size = 6576 alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize, 6577 PtrByteSize); 6578 const int FI = MF.getFrameInfo().CreateFixedObject( 6579 Size, VA.getLocMemOffset(), /* IsImmutable */ false, 6580 /* IsAliased */ true); 6581 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 6582 InVals.push_back(FIN); 6583 6584 continue; 6585 } 6586 6587 if (Flags.isByVal()) { 6588 assert(VA.isRegLoc() && "MemLocs should already be handled."); 6589 6590 const MCPhysReg ArgReg = VA.getLocReg(); 6591 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 6592 6593 if (Flags.getNonZeroByValAlign() > PtrByteSize) 6594 report_fatal_error("Over aligned byvals not supported yet."); 6595 6596 const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize); 6597 const int FI = MF.getFrameInfo().CreateFixedObject( 6598 StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false, 6599 /* IsAliased */ true); 6600 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 6601 InVals.push_back(FIN); 6602 6603 // Add live ins for all the RegLocs for the same ByVal. 6604 const TargetRegisterClass *RegClass = 6605 IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6606 6607 auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg, 6608 unsigned Offset) { 6609 const unsigned VReg = MF.addLiveIn(PhysReg, RegClass); 6610 // Since the callers side has left justified the aggregate in the 6611 // register, we can simply store the entire register into the stack 6612 // slot. 6613 SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); 6614 // The store to the fixedstack object is needed becuase accessing a 6615 // field of the ByVal will use a gep and load. Ideally we will optimize 6616 // to extracting the value from the register directly, and elide the 6617 // stores when the arguments address is not taken, but that will need to 6618 // be future work. 6619 SDValue Store = DAG.getStore( 6620 CopyFrom.getValue(1), dl, CopyFrom, 6621 DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)), 6622 MachinePointerInfo::getFixedStack(MF, FI, Offset)); 6623 6624 MemOps.push_back(Store); 6625 }; 6626 6627 unsigned Offset = 0; 6628 HandleRegLoc(VA.getLocReg(), Offset); 6629 Offset += PtrByteSize; 6630 for (; Offset != StackSize && ArgLocs[I].isRegLoc(); 6631 Offset += PtrByteSize) { 6632 assert(ArgLocs[I].getValNo() == VA.getValNo() && 6633 "RegLocs should be for ByVal argument."); 6634 6635 const CCValAssign RL = ArgLocs[I++]; 6636 HandleRegLoc(RL.getLocReg(), Offset); 6637 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType); 6638 } 6639 6640 if (Offset != StackSize) { 6641 assert(ArgLocs[I].getValNo() == VA.getValNo() && 6642 "Expected MemLoc for remaining bytes."); 6643 assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes."); 6644 // Consume the MemLoc.The InVal has already been emitted, so nothing 6645 // more needs to be done. 6646 ++I; 6647 } 6648 6649 continue; 6650 } 6651 6652 EVT ValVT = VA.getValVT(); 6653 if (VA.isRegLoc() && !VA.needsCustom()) { 6654 MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy; 6655 unsigned VReg = 6656 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64)); 6657 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); 6658 if (ValVT.isScalarInteger() && 6659 (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) { 6660 ArgValue = 6661 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl); 6662 } 6663 InVals.push_back(ArgValue); 6664 continue; 6665 } 6666 if (VA.isMemLoc()) { 6667 const unsigned LocSize = LocVT.getStoreSize(); 6668 const unsigned ValSize = ValVT.getStoreSize(); 6669 assert((ValSize <= LocSize) && 6670 "Object size is larger than size of MemLoc"); 6671 int CurArgOffset = VA.getLocMemOffset(); 6672 // Objects are right-justified because AIX is big-endian. 6673 if (LocSize > ValSize) 6674 CurArgOffset += LocSize - ValSize; 6675 // Potential tail calls could cause overwriting of argument stack slots. 6676 const bool IsImmutable = 6677 !(getTargetMachine().Options.GuaranteedTailCallOpt && 6678 (CallConv == CallingConv::Fast)); 6679 int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable); 6680 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 6681 SDValue ArgValue = 6682 DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo()); 6683 InVals.push_back(ArgValue); 6684 continue; 6685 } 6686 } 6687 6688 // On AIX a minimum of 8 words is saved to the parameter save area. 6689 const unsigned MinParameterSaveArea = 8 * PtrByteSize; 6690 // Area that is at least reserved in the caller of this function. 6691 unsigned CallerReservedArea = 6692 std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea); 6693 6694 // Set the size that is at least reserved in caller of this function. Tail 6695 // call optimized function's reserved stack space needs to be aligned so 6696 // that taking the difference between two stack areas will result in an 6697 // aligned stack. 6698 CallerReservedArea = 6699 EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea); 6700 FuncInfo->setMinReservedArea(CallerReservedArea); 6701 6702 if (isVarArg) { 6703 FuncInfo->setVarArgsFrameIndex( 6704 MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true)); 6705 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 6706 6707 static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6708 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 6709 6710 static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6711 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 6712 const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32); 6713 6714 // The fixed integer arguments of a variadic function are stored to the 6715 // VarArgsFrameIndex on the stack so that they may be loaded by 6716 // dereferencing the result of va_next. 6717 for (unsigned GPRIndex = 6718 (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize; 6719 GPRIndex < NumGPArgRegs; ++GPRIndex) { 6720 6721 const unsigned VReg = 6722 IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass) 6723 : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass); 6724 6725 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 6726 SDValue Store = 6727 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 6728 MemOps.push_back(Store); 6729 // Increment the address for the next argument to store. 6730 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 6731 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 6732 } 6733 } 6734 6735 if (!MemOps.empty()) 6736 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 6737 6738 return Chain; 6739 } 6740 6741 SDValue PPCTargetLowering::LowerCall_AIX( 6742 SDValue Chain, SDValue Callee, CallFlags CFlags, 6743 const SmallVectorImpl<ISD::OutputArg> &Outs, 6744 const SmallVectorImpl<SDValue> &OutVals, 6745 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6746 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6747 const CallBase *CB) const { 6748 // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the 6749 // AIX ABI stack frame layout. 6750 6751 assert((CFlags.CallConv == CallingConv::C || 6752 CFlags.CallConv == CallingConv::Cold || 6753 CFlags.CallConv == CallingConv::Fast) && 6754 "Unexpected calling convention!"); 6755 6756 if (CFlags.IsPatchPoint) 6757 report_fatal_error("This call type is unimplemented on AIX."); 6758 6759 const PPCSubtarget& Subtarget = 6760 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 6761 6762 MachineFunction &MF = DAG.getMachineFunction(); 6763 SmallVector<CCValAssign, 16> ArgLocs; 6764 CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs, 6765 *DAG.getContext()); 6766 6767 // Reserve space for the linkage save area (LSA) on the stack. 6768 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA: 6769 // [SP][CR][LR][2 x reserved][TOC]. 6770 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64. 6771 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6772 const bool IsPPC64 = Subtarget.isPPC64(); 6773 const EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6774 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6775 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize)); 6776 CCInfo.AnalyzeCallOperands(Outs, CC_AIX); 6777 6778 // The prolog code of the callee may store up to 8 GPR argument registers to 6779 // the stack, allowing va_start to index over them in memory if the callee 6780 // is variadic. 6781 // Because we cannot tell if this is needed on the caller side, we have to 6782 // conservatively assume that it is needed. As such, make sure we have at 6783 // least enough stack space for the caller to store the 8 GPRs. 6784 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize; 6785 const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize, 6786 CCInfo.getNextStackOffset()); 6787 6788 // Adjust the stack pointer for the new arguments... 6789 // These operations are automatically eliminated by the prolog/epilog pass. 6790 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6791 SDValue CallSeqStart = Chain; 6792 6793 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6794 SmallVector<SDValue, 8> MemOpChains; 6795 6796 // Set up a copy of the stack pointer for loading and storing any 6797 // arguments that may not fit in the registers available for argument 6798 // passing. 6799 const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64) 6800 : DAG.getRegister(PPC::R1, MVT::i32); 6801 6802 for (unsigned I = 0, E = ArgLocs.size(); I != E;) { 6803 const unsigned ValNo = ArgLocs[I].getValNo(); 6804 SDValue Arg = OutVals[ValNo]; 6805 ISD::ArgFlagsTy Flags = Outs[ValNo].Flags; 6806 6807 if (Flags.isByVal()) { 6808 const unsigned ByValSize = Flags.getByValSize(); 6809 6810 // Nothing to do for zero-sized ByVals on the caller side. 6811 if (!ByValSize) { 6812 ++I; 6813 continue; 6814 } 6815 6816 auto GetLoad = [&](EVT VT, unsigned LoadOffset) { 6817 return DAG.getExtLoad( 6818 ISD::ZEXTLOAD, dl, PtrVT, Chain, 6819 (LoadOffset != 0) 6820 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset)) 6821 : Arg, 6822 MachinePointerInfo(), VT); 6823 }; 6824 6825 unsigned LoadOffset = 0; 6826 6827 // Initialize registers, which are fully occupied by the by-val argument. 6828 while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) { 6829 SDValue Load = GetLoad(PtrVT, LoadOffset); 6830 MemOpChains.push_back(Load.getValue(1)); 6831 LoadOffset += PtrByteSize; 6832 const CCValAssign &ByValVA = ArgLocs[I++]; 6833 assert(ByValVA.getValNo() == ValNo && 6834 "Unexpected location for pass-by-value argument."); 6835 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load)); 6836 } 6837 6838 if (LoadOffset == ByValSize) 6839 continue; 6840 6841 // There must be one more loc to handle the remainder. 6842 assert(ArgLocs[I].getValNo() == ValNo && 6843 "Expected additional location for by-value argument."); 6844 6845 if (ArgLocs[I].isMemLoc()) { 6846 assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg."); 6847 const CCValAssign &ByValVA = ArgLocs[I++]; 6848 ISD::ArgFlagsTy MemcpyFlags = Flags; 6849 // Only memcpy the bytes that don't pass in register. 6850 MemcpyFlags.setByValSize(ByValSize - LoadOffset); 6851 Chain = CallSeqStart = createMemcpyOutsideCallSeq( 6852 (LoadOffset != 0) 6853 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset)) 6854 : Arg, 6855 DAG.getObjectPtrOffset(dl, StackPtr, 6856 TypeSize::Fixed(ByValVA.getLocMemOffset())), 6857 CallSeqStart, MemcpyFlags, DAG, dl); 6858 continue; 6859 } 6860 6861 // Initialize the final register residue. 6862 // Any residue that occupies the final by-val arg register must be 6863 // left-justified on AIX. Loads must be a power-of-2 size and cannot be 6864 // larger than the ByValSize. For example: a 7 byte by-val arg requires 4, 6865 // 2 and 1 byte loads. 6866 const unsigned ResidueBytes = ByValSize % PtrByteSize; 6867 assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize && 6868 "Unexpected register residue for by-value argument."); 6869 SDValue ResidueVal; 6870 for (unsigned Bytes = 0; Bytes != ResidueBytes;) { 6871 const unsigned N = PowerOf2Floor(ResidueBytes - Bytes); 6872 const MVT VT = 6873 N == 1 ? MVT::i8 6874 : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64)); 6875 SDValue Load = GetLoad(VT, LoadOffset); 6876 MemOpChains.push_back(Load.getValue(1)); 6877 LoadOffset += N; 6878 Bytes += N; 6879 6880 // By-val arguments are passed left-justfied in register. 6881 // Every load here needs to be shifted, otherwise a full register load 6882 // should have been used. 6883 assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) && 6884 "Unexpected load emitted during handling of pass-by-value " 6885 "argument."); 6886 unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8); 6887 EVT ShiftAmountTy = 6888 getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout()); 6889 SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy); 6890 SDValue ShiftedLoad = 6891 DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt); 6892 ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal, 6893 ShiftedLoad) 6894 : ShiftedLoad; 6895 } 6896 6897 const CCValAssign &ByValVA = ArgLocs[I++]; 6898 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal)); 6899 continue; 6900 } 6901 6902 CCValAssign &VA = ArgLocs[I++]; 6903 const MVT LocVT = VA.getLocVT(); 6904 const MVT ValVT = VA.getValVT(); 6905 6906 if (VA.isMemLoc() && VA.getValVT().isVector()) 6907 report_fatal_error( 6908 "passing vector parameters to the stack is unimplemented for AIX"); 6909 6910 switch (VA.getLocInfo()) { 6911 default: 6912 report_fatal_error("Unexpected argument extension type."); 6913 case CCValAssign::Full: 6914 break; 6915 case CCValAssign::ZExt: 6916 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6917 break; 6918 case CCValAssign::SExt: 6919 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6920 break; 6921 } 6922 6923 if (VA.isRegLoc() && !VA.needsCustom()) { 6924 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 6925 continue; 6926 } 6927 6928 if (VA.isMemLoc()) { 6929 SDValue PtrOff = 6930 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType()); 6931 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6932 MemOpChains.push_back( 6933 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 6934 6935 continue; 6936 } 6937 6938 // Custom handling is used for GPR initializations for vararg float 6939 // arguments. 6940 assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg && 6941 ValVT.isFloatingPoint() && LocVT.isInteger() && 6942 "Unexpected register handling for calling convention."); 6943 6944 SDValue ArgAsInt = 6945 DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg); 6946 6947 if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize()) 6948 // f32 in 32-bit GPR 6949 // f64 in 64-bit GPR 6950 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt)); 6951 else if (Arg.getValueType().getFixedSizeInBits() < 6952 LocVT.getFixedSizeInBits()) 6953 // f32 in 64-bit GPR. 6954 RegsToPass.push_back(std::make_pair( 6955 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT))); 6956 else { 6957 // f64 in two 32-bit GPRs 6958 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs. 6959 assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 && 6960 "Unexpected custom register for argument!"); 6961 CCValAssign &GPR1 = VA; 6962 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt, 6963 DAG.getConstant(32, dl, MVT::i8)); 6964 RegsToPass.push_back(std::make_pair( 6965 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32))); 6966 6967 if (I != E) { 6968 // If only 1 GPR was available, there will only be one custom GPR and 6969 // the argument will also pass in memory. 6970 CCValAssign &PeekArg = ArgLocs[I]; 6971 if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) { 6972 assert(PeekArg.needsCustom() && "A second custom GPR is expected."); 6973 CCValAssign &GPR2 = ArgLocs[I++]; 6974 RegsToPass.push_back(std::make_pair( 6975 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32))); 6976 } 6977 } 6978 } 6979 } 6980 6981 if (!MemOpChains.empty()) 6982 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6983 6984 // For indirect calls, we need to save the TOC base to the stack for 6985 // restoration after the call. 6986 if (CFlags.IsIndirect) { 6987 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported."); 6988 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister(); 6989 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 6990 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 6991 const unsigned TOCSaveOffset = 6992 Subtarget.getFrameLowering()->getTOCSaveOffset(); 6993 6994 setUsesTOCBasePtr(DAG); 6995 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT); 6996 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6997 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT); 6998 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6999 Chain = DAG.getStore( 7000 Val.getValue(1), dl, Val, AddPtr, 7001 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 7002 } 7003 7004 // Build a sequence of copy-to-reg nodes chained together with token chain 7005 // and flag operands which copy the outgoing args into the appropriate regs. 7006 SDValue InFlag; 7007 for (auto Reg : RegsToPass) { 7008 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag); 7009 InFlag = Chain.getValue(1); 7010 } 7011 7012 const int SPDiff = 0; 7013 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 7014 Callee, SPDiff, NumBytes, Ins, InVals, CB); 7015 } 7016 7017 bool 7018 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 7019 MachineFunction &MF, bool isVarArg, 7020 const SmallVectorImpl<ISD::OutputArg> &Outs, 7021 LLVMContext &Context) const { 7022 SmallVector<CCValAssign, 16> RVLocs; 7023 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 7024 return CCInfo.CheckReturn( 7025 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7026 ? RetCC_PPC_Cold 7027 : RetCC_PPC); 7028 } 7029 7030 SDValue 7031 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 7032 bool isVarArg, 7033 const SmallVectorImpl<ISD::OutputArg> &Outs, 7034 const SmallVectorImpl<SDValue> &OutVals, 7035 const SDLoc &dl, SelectionDAG &DAG) const { 7036 SmallVector<CCValAssign, 16> RVLocs; 7037 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 7038 *DAG.getContext()); 7039 CCInfo.AnalyzeReturn(Outs, 7040 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7041 ? RetCC_PPC_Cold 7042 : RetCC_PPC); 7043 7044 SDValue Flag; 7045 SmallVector<SDValue, 4> RetOps(1, Chain); 7046 7047 // Copy the result values into the output registers. 7048 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) { 7049 CCValAssign &VA = RVLocs[i]; 7050 assert(VA.isRegLoc() && "Can only return in registers!"); 7051 7052 SDValue Arg = OutVals[RealResIdx]; 7053 7054 switch (VA.getLocInfo()) { 7055 default: llvm_unreachable("Unknown loc info!"); 7056 case CCValAssign::Full: break; 7057 case CCValAssign::AExt: 7058 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 7059 break; 7060 case CCValAssign::ZExt: 7061 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7062 break; 7063 case CCValAssign::SExt: 7064 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7065 break; 7066 } 7067 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 7068 bool isLittleEndian = Subtarget.isLittleEndian(); 7069 // Legalize ret f64 -> ret 2 x i32. 7070 SDValue SVal = 7071 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7072 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl)); 7073 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7074 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7075 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7076 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl)); 7077 Flag = Chain.getValue(1); 7078 VA = RVLocs[++i]; // skip ahead to next loc 7079 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7080 } else 7081 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 7082 Flag = Chain.getValue(1); 7083 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7084 } 7085 7086 RetOps[0] = Chain; // Update chain. 7087 7088 // Add the flag if we have it. 7089 if (Flag.getNode()) 7090 RetOps.push_back(Flag); 7091 7092 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 7093 } 7094 7095 SDValue 7096 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 7097 SelectionDAG &DAG) const { 7098 SDLoc dl(Op); 7099 7100 // Get the correct type for integers. 7101 EVT IntVT = Op.getValueType(); 7102 7103 // Get the inputs. 7104 SDValue Chain = Op.getOperand(0); 7105 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7106 // Build a DYNAREAOFFSET node. 7107 SDValue Ops[2] = {Chain, FPSIdx}; 7108 SDVTList VTs = DAG.getVTList(IntVT); 7109 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 7110 } 7111 7112 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 7113 SelectionDAG &DAG) const { 7114 // When we pop the dynamic allocation we need to restore the SP link. 7115 SDLoc dl(Op); 7116 7117 // Get the correct type for pointers. 7118 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7119 7120 // Construct the stack pointer operand. 7121 bool isPPC64 = Subtarget.isPPC64(); 7122 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 7123 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 7124 7125 // Get the operands for the STACKRESTORE. 7126 SDValue Chain = Op.getOperand(0); 7127 SDValue SaveSP = Op.getOperand(1); 7128 7129 // Load the old link SP. 7130 SDValue LoadLinkSP = 7131 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 7132 7133 // Restore the stack pointer. 7134 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 7135 7136 // Store the old link SP. 7137 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 7138 } 7139 7140 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 7141 MachineFunction &MF = DAG.getMachineFunction(); 7142 bool isPPC64 = Subtarget.isPPC64(); 7143 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7144 7145 // Get current frame pointer save index. The users of this index will be 7146 // primarily DYNALLOC instructions. 7147 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7148 int RASI = FI->getReturnAddrSaveIndex(); 7149 7150 // If the frame pointer save index hasn't been defined yet. 7151 if (!RASI) { 7152 // Find out what the fix offset of the frame pointer save area. 7153 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 7154 // Allocate the frame index for frame pointer save area. 7155 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 7156 // Save the result. 7157 FI->setReturnAddrSaveIndex(RASI); 7158 } 7159 return DAG.getFrameIndex(RASI, PtrVT); 7160 } 7161 7162 SDValue 7163 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 7164 MachineFunction &MF = DAG.getMachineFunction(); 7165 bool isPPC64 = Subtarget.isPPC64(); 7166 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7167 7168 // Get current frame pointer save index. The users of this index will be 7169 // primarily DYNALLOC instructions. 7170 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7171 int FPSI = FI->getFramePointerSaveIndex(); 7172 7173 // If the frame pointer save index hasn't been defined yet. 7174 if (!FPSI) { 7175 // Find out what the fix offset of the frame pointer save area. 7176 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 7177 // Allocate the frame index for frame pointer save area. 7178 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 7179 // Save the result. 7180 FI->setFramePointerSaveIndex(FPSI); 7181 } 7182 return DAG.getFrameIndex(FPSI, PtrVT); 7183 } 7184 7185 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7186 SelectionDAG &DAG) const { 7187 MachineFunction &MF = DAG.getMachineFunction(); 7188 // Get the inputs. 7189 SDValue Chain = Op.getOperand(0); 7190 SDValue Size = Op.getOperand(1); 7191 SDLoc dl(Op); 7192 7193 // Get the correct type for pointers. 7194 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7195 // Negate the size. 7196 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 7197 DAG.getConstant(0, dl, PtrVT), Size); 7198 // Construct a node for the frame pointer save index. 7199 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7200 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 7201 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 7202 if (hasInlineStackProbe(MF)) 7203 return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops); 7204 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 7205 } 7206 7207 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 7208 SelectionDAG &DAG) const { 7209 MachineFunction &MF = DAG.getMachineFunction(); 7210 7211 bool isPPC64 = Subtarget.isPPC64(); 7212 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7213 7214 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 7215 return DAG.getFrameIndex(FI, PtrVT); 7216 } 7217 7218 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 7219 SelectionDAG &DAG) const { 7220 SDLoc DL(Op); 7221 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 7222 DAG.getVTList(MVT::i32, MVT::Other), 7223 Op.getOperand(0), Op.getOperand(1)); 7224 } 7225 7226 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 7227 SelectionDAG &DAG) const { 7228 SDLoc DL(Op); 7229 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 7230 Op.getOperand(0), Op.getOperand(1)); 7231 } 7232 7233 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 7234 if (Op.getValueType().isVector()) 7235 return LowerVectorLoad(Op, DAG); 7236 7237 assert(Op.getValueType() == MVT::i1 && 7238 "Custom lowering only for i1 loads"); 7239 7240 // First, load 8 bits into 32 bits, then truncate to 1 bit. 7241 7242 SDLoc dl(Op); 7243 LoadSDNode *LD = cast<LoadSDNode>(Op); 7244 7245 SDValue Chain = LD->getChain(); 7246 SDValue BasePtr = LD->getBasePtr(); 7247 MachineMemOperand *MMO = LD->getMemOperand(); 7248 7249 SDValue NewLD = 7250 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 7251 BasePtr, MVT::i8, MMO); 7252 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 7253 7254 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 7255 return DAG.getMergeValues(Ops, dl); 7256 } 7257 7258 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 7259 if (Op.getOperand(1).getValueType().isVector()) 7260 return LowerVectorStore(Op, DAG); 7261 7262 assert(Op.getOperand(1).getValueType() == MVT::i1 && 7263 "Custom lowering only for i1 stores"); 7264 7265 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 7266 7267 SDLoc dl(Op); 7268 StoreSDNode *ST = cast<StoreSDNode>(Op); 7269 7270 SDValue Chain = ST->getChain(); 7271 SDValue BasePtr = ST->getBasePtr(); 7272 SDValue Value = ST->getValue(); 7273 MachineMemOperand *MMO = ST->getMemOperand(); 7274 7275 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 7276 Value); 7277 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 7278 } 7279 7280 // FIXME: Remove this once the ANDI glue bug is fixed: 7281 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 7282 assert(Op.getValueType() == MVT::i1 && 7283 "Custom lowering only for i1 results"); 7284 7285 SDLoc DL(Op); 7286 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0)); 7287 } 7288 7289 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op, 7290 SelectionDAG &DAG) const { 7291 7292 // Implements a vector truncate that fits in a vector register as a shuffle. 7293 // We want to legalize vector truncates down to where the source fits in 7294 // a vector register (and target is therefore smaller than vector register 7295 // size). At that point legalization will try to custom lower the sub-legal 7296 // result and get here - where we can contain the truncate as a single target 7297 // operation. 7298 7299 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows: 7300 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2> 7301 // 7302 // We will implement it for big-endian ordering as this (where x denotes 7303 // undefined): 7304 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to 7305 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u> 7306 // 7307 // The same operation in little-endian ordering will be: 7308 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to 7309 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1> 7310 7311 EVT TrgVT = Op.getValueType(); 7312 assert(TrgVT.isVector() && "Vector type expected."); 7313 unsigned TrgNumElts = TrgVT.getVectorNumElements(); 7314 EVT EltVT = TrgVT.getVectorElementType(); 7315 if (!isOperationCustom(Op.getOpcode(), TrgVT) || 7316 TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) || 7317 !isPowerOf2_32(EltVT.getSizeInBits())) 7318 return SDValue(); 7319 7320 SDValue N1 = Op.getOperand(0); 7321 EVT SrcVT = N1.getValueType(); 7322 unsigned SrcSize = SrcVT.getSizeInBits(); 7323 if (SrcSize > 256 || 7324 !isPowerOf2_32(SrcVT.getVectorNumElements()) || 7325 !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits())) 7326 return SDValue(); 7327 if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2) 7328 return SDValue(); 7329 7330 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7331 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7332 7333 SDLoc DL(Op); 7334 SDValue Op1, Op2; 7335 if (SrcSize == 256) { 7336 EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout()); 7337 EVT SplitVT = 7338 N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext()); 7339 unsigned SplitNumElts = SplitVT.getVectorNumElements(); 7340 Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1, 7341 DAG.getConstant(0, DL, VecIdxTy)); 7342 Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1, 7343 DAG.getConstant(SplitNumElts, DL, VecIdxTy)); 7344 } 7345 else { 7346 Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL); 7347 Op2 = DAG.getUNDEF(WideVT); 7348 } 7349 7350 // First list the elements we want to keep. 7351 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits(); 7352 SmallVector<int, 16> ShuffV; 7353 if (Subtarget.isLittleEndian()) 7354 for (unsigned i = 0; i < TrgNumElts; ++i) 7355 ShuffV.push_back(i * SizeMult); 7356 else 7357 for (unsigned i = 1; i <= TrgNumElts; ++i) 7358 ShuffV.push_back(i * SizeMult - 1); 7359 7360 // Populate the remaining elements with undefs. 7361 for (unsigned i = TrgNumElts; i < WideNumElts; ++i) 7362 // ShuffV.push_back(i + WideNumElts); 7363 ShuffV.push_back(WideNumElts + 1); 7364 7365 Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1); 7366 Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2); 7367 return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV); 7368 } 7369 7370 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 7371 /// possible. 7372 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 7373 // Not FP, or using SPE? Not a fsel. 7374 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 7375 !Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.hasSPE()) 7376 return Op; 7377 7378 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 7379 7380 EVT ResVT = Op.getValueType(); 7381 EVT CmpVT = Op.getOperand(0).getValueType(); 7382 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7383 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 7384 SDLoc dl(Op); 7385 SDNodeFlags Flags = Op.getNode()->getFlags(); 7386 7387 // We have xsmaxcdp/xsmincdp which are OK to emit even in the 7388 // presence of infinities. 7389 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) { 7390 switch (CC) { 7391 default: 7392 break; 7393 case ISD::SETOGT: 7394 case ISD::SETGT: 7395 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS); 7396 case ISD::SETOLT: 7397 case ISD::SETLT: 7398 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS); 7399 } 7400 } 7401 7402 // We might be able to do better than this under some circumstances, but in 7403 // general, fsel-based lowering of select is a finite-math-only optimization. 7404 // For more information, see section F.3 of the 2.06 ISA specification. 7405 // With ISA 3.0 7406 if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) || 7407 (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs())) 7408 return Op; 7409 7410 // If the RHS of the comparison is a 0.0, we don't need to do the 7411 // subtraction at all. 7412 SDValue Sel1; 7413 if (isFloatingPointZero(RHS)) 7414 switch (CC) { 7415 default: break; // SETUO etc aren't handled by fsel. 7416 case ISD::SETNE: 7417 std::swap(TV, FV); 7418 LLVM_FALLTHROUGH; 7419 case ISD::SETEQ: 7420 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7421 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7422 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7423 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7424 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7425 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7426 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 7427 case ISD::SETULT: 7428 case ISD::SETLT: 7429 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7430 LLVM_FALLTHROUGH; 7431 case ISD::SETOGE: 7432 case ISD::SETGE: 7433 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7434 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7435 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7436 case ISD::SETUGT: 7437 case ISD::SETGT: 7438 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7439 LLVM_FALLTHROUGH; 7440 case ISD::SETOLE: 7441 case ISD::SETLE: 7442 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7443 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7444 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7445 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 7446 } 7447 7448 SDValue Cmp; 7449 switch (CC) { 7450 default: break; // SETUO etc aren't handled by fsel. 7451 case ISD::SETNE: 7452 std::swap(TV, FV); 7453 LLVM_FALLTHROUGH; 7454 case ISD::SETEQ: 7455 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7456 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7457 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7458 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7459 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7460 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7461 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7462 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 7463 case ISD::SETULT: 7464 case ISD::SETLT: 7465 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7466 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7467 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7468 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7469 case ISD::SETOGE: 7470 case ISD::SETGE: 7471 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7472 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7473 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7474 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7475 case ISD::SETUGT: 7476 case ISD::SETGT: 7477 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7478 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7479 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7480 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7481 case ISD::SETOLE: 7482 case ISD::SETLE: 7483 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7484 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7485 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7486 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7487 } 7488 return Op; 7489 } 7490 7491 static unsigned getPPCStrictOpcode(unsigned Opc) { 7492 switch (Opc) { 7493 default: 7494 llvm_unreachable("No strict version of this opcode!"); 7495 case PPCISD::FCTIDZ: 7496 return PPCISD::STRICT_FCTIDZ; 7497 case PPCISD::FCTIWZ: 7498 return PPCISD::STRICT_FCTIWZ; 7499 case PPCISD::FCTIDUZ: 7500 return PPCISD::STRICT_FCTIDUZ; 7501 case PPCISD::FCTIWUZ: 7502 return PPCISD::STRICT_FCTIWUZ; 7503 case PPCISD::FCFID: 7504 return PPCISD::STRICT_FCFID; 7505 case PPCISD::FCFIDU: 7506 return PPCISD::STRICT_FCFIDU; 7507 case PPCISD::FCFIDS: 7508 return PPCISD::STRICT_FCFIDS; 7509 case PPCISD::FCFIDUS: 7510 return PPCISD::STRICT_FCFIDUS; 7511 } 7512 } 7513 7514 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG, 7515 const PPCSubtarget &Subtarget) { 7516 SDLoc dl(Op); 7517 bool IsStrict = Op->isStrictFPOpcode(); 7518 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || 7519 Op.getOpcode() == ISD::STRICT_FP_TO_SINT; 7520 7521 // TODO: Any other flags to propagate? 7522 SDNodeFlags Flags; 7523 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 7524 7525 // For strict nodes, source is the second operand. 7526 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 7527 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 7528 assert(Src.getValueType().isFloatingPoint()); 7529 if (Src.getValueType() == MVT::f32) { 7530 if (IsStrict) { 7531 Src = 7532 DAG.getNode(ISD::STRICT_FP_EXTEND, dl, 7533 DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags); 7534 Chain = Src.getValue(1); 7535 } else 7536 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7537 } 7538 SDValue Conv; 7539 unsigned Opc = ISD::DELETED_NODE; 7540 switch (Op.getSimpleValueType().SimpleTy) { 7541 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7542 case MVT::i32: 7543 Opc = IsSigned ? PPCISD::FCTIWZ 7544 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ); 7545 break; 7546 case MVT::i64: 7547 assert((IsSigned || Subtarget.hasFPCVT()) && 7548 "i64 FP_TO_UINT is supported only with FPCVT"); 7549 Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ; 7550 } 7551 if (IsStrict) { 7552 Opc = getPPCStrictOpcode(Opc); 7553 Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other), 7554 {Chain, Src}, Flags); 7555 } else { 7556 Conv = DAG.getNode(Opc, dl, MVT::f64, Src); 7557 } 7558 return Conv; 7559 } 7560 7561 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 7562 SelectionDAG &DAG, 7563 const SDLoc &dl) const { 7564 SDValue Tmp = convertFPToInt(Op, DAG, Subtarget); 7565 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || 7566 Op.getOpcode() == ISD::STRICT_FP_TO_SINT; 7567 bool IsStrict = Op->isStrictFPOpcode(); 7568 7569 // Convert the FP value to an int value through memory. 7570 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 7571 (IsSigned || Subtarget.hasFPCVT()); 7572 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 7573 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 7574 MachinePointerInfo MPI = 7575 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 7576 7577 // Emit a store to the stack slot. 7578 SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode(); 7579 Align Alignment(DAG.getEVTAlign(Tmp.getValueType())); 7580 if (i32Stack) { 7581 MachineFunction &MF = DAG.getMachineFunction(); 7582 Alignment = Align(4); 7583 MachineMemOperand *MMO = 7584 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment); 7585 SDValue Ops[] = { Chain, Tmp, FIPtr }; 7586 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7587 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 7588 } else 7589 Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment); 7590 7591 // Result is a load from the stack slot. If loading 4 bytes, make sure to 7592 // add in a bias on big endian. 7593 if (Op.getValueType() == MVT::i32 && !i32Stack) { 7594 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 7595 DAG.getConstant(4, dl, FIPtr.getValueType())); 7596 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 7597 } 7598 7599 RLI.Chain = Chain; 7600 RLI.Ptr = FIPtr; 7601 RLI.MPI = MPI; 7602 RLI.Alignment = Alignment; 7603 } 7604 7605 /// Custom lowers floating point to integer conversions to use 7606 /// the direct move instructions available in ISA 2.07 to avoid the 7607 /// need for load/store combinations. 7608 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 7609 SelectionDAG &DAG, 7610 const SDLoc &dl) const { 7611 SDValue Conv = convertFPToInt(Op, DAG, Subtarget); 7612 SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv); 7613 if (Op->isStrictFPOpcode()) 7614 return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl); 7615 else 7616 return Mov; 7617 } 7618 7619 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 7620 const SDLoc &dl) const { 7621 bool IsStrict = Op->isStrictFPOpcode(); 7622 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || 7623 Op.getOpcode() == ISD::STRICT_FP_TO_SINT; 7624 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 7625 EVT SrcVT = Src.getValueType(); 7626 EVT DstVT = Op.getValueType(); 7627 7628 // FP to INT conversions are legal for f128. 7629 if (SrcVT == MVT::f128) 7630 return Subtarget.hasP9Vector() ? Op : SDValue(); 7631 7632 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 7633 // PPC (the libcall is not available). 7634 if (SrcVT == MVT::ppcf128) { 7635 if (DstVT == MVT::i32) { 7636 // TODO: Conservatively pass only nofpexcept flag here. Need to check and 7637 // set other fast-math flags to FP operations in both strict and 7638 // non-strict cases. (FP_TO_SINT, FSUB) 7639 SDNodeFlags Flags; 7640 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 7641 7642 if (IsSigned) { 7643 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src, 7644 DAG.getIntPtrConstant(0, dl)); 7645 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src, 7646 DAG.getIntPtrConstant(1, dl)); 7647 7648 // Add the two halves of the long double in round-to-zero mode, and use 7649 // a smaller FP_TO_SINT. 7650 if (IsStrict) { 7651 SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl, 7652 DAG.getVTList(MVT::f64, MVT::Other), 7653 {Op.getOperand(0), Lo, Hi}, Flags); 7654 return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, 7655 DAG.getVTList(MVT::i32, MVT::Other), 7656 {Res.getValue(1), Res}, Flags); 7657 } else { 7658 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7659 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 7660 } 7661 } else { 7662 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 7663 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 7664 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 7665 SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT); 7666 if (IsStrict) { 7667 // Sel = Src < 0x80000000 7668 // FltOfs = select Sel, 0.0, 0x80000000 7669 // IntOfs = select Sel, 0, 0x80000000 7670 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 7671 SDValue Chain = Op.getOperand(0); 7672 EVT SetCCVT = 7673 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 7674 EVT DstSetCCVT = 7675 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 7676 SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 7677 Chain, true); 7678 Chain = Sel.getValue(1); 7679 7680 SDValue FltOfs = DAG.getSelect( 7681 dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst); 7682 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7683 7684 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, 7685 DAG.getVTList(SrcVT, MVT::Other), 7686 {Chain, Src, FltOfs}, Flags); 7687 Chain = Val.getValue(1); 7688 SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, 7689 DAG.getVTList(DstVT, MVT::Other), 7690 {Chain, Val}, Flags); 7691 Chain = SInt.getValue(1); 7692 SDValue IntOfs = DAG.getSelect( 7693 dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask); 7694 SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 7695 return DAG.getMergeValues({Result, Chain}, dl); 7696 } else { 7697 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 7698 // FIXME: generated code sucks. 7699 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst); 7700 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 7701 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask); 7702 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src); 7703 return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE); 7704 } 7705 } 7706 } 7707 7708 return SDValue(); 7709 } 7710 7711 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 7712 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 7713 7714 ReuseLoadInfo RLI; 7715 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7716 7717 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7718 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7719 } 7720 7721 // We're trying to insert a regular store, S, and then a load, L. If the 7722 // incoming value, O, is a load, we might just be able to have our load use the 7723 // address used by O. However, we don't know if anything else will store to 7724 // that address before we can load from it. To prevent this situation, we need 7725 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7726 // the same chain operand as O, we create a token factor from the chain results 7727 // of O and L, and we replace all uses of O's chain result with that token 7728 // factor (see spliceIntoChain below for this last part). 7729 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7730 ReuseLoadInfo &RLI, 7731 SelectionDAG &DAG, 7732 ISD::LoadExtType ET) const { 7733 // Conservatively skip reusing for constrained FP nodes. 7734 if (Op->isStrictFPOpcode()) 7735 return false; 7736 7737 SDLoc dl(Op); 7738 bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT && 7739 (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32); 7740 if (ET == ISD::NON_EXTLOAD && 7741 (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) && 7742 isOperationLegalOrCustom(Op.getOpcode(), 7743 Op.getOperand(0).getValueType())) { 7744 7745 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7746 return true; 7747 } 7748 7749 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7750 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7751 LD->isNonTemporal()) 7752 return false; 7753 if (LD->getMemoryVT() != MemVT) 7754 return false; 7755 7756 // If the result of the load is an illegal type, then we can't build a 7757 // valid chain for reuse since the legalised loads and token factor node that 7758 // ties the legalised loads together uses a different output chain then the 7759 // illegal load. 7760 if (!isTypeLegal(LD->getValueType(0))) 7761 return false; 7762 7763 RLI.Ptr = LD->getBasePtr(); 7764 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7765 assert(LD->getAddressingMode() == ISD::PRE_INC && 7766 "Non-pre-inc AM on PPC?"); 7767 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7768 LD->getOffset()); 7769 } 7770 7771 RLI.Chain = LD->getChain(); 7772 RLI.MPI = LD->getPointerInfo(); 7773 RLI.IsDereferenceable = LD->isDereferenceable(); 7774 RLI.IsInvariant = LD->isInvariant(); 7775 RLI.Alignment = LD->getAlign(); 7776 RLI.AAInfo = LD->getAAInfo(); 7777 RLI.Ranges = LD->getRanges(); 7778 7779 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7780 return true; 7781 } 7782 7783 // Given the head of the old chain, ResChain, insert a token factor containing 7784 // it and NewResChain, and make users of ResChain now be users of that token 7785 // factor. 7786 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7787 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7788 SDValue NewResChain, 7789 SelectionDAG &DAG) const { 7790 if (!ResChain) 7791 return; 7792 7793 SDLoc dl(NewResChain); 7794 7795 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7796 NewResChain, DAG.getUNDEF(MVT::Other)); 7797 assert(TF.getNode() != NewResChain.getNode() && 7798 "A new TF really is required here"); 7799 7800 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7801 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7802 } 7803 7804 /// Analyze profitability of direct move 7805 /// prefer float load to int load plus direct move 7806 /// when there is no integer use of int load 7807 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7808 SDNode *Origin = Op.getOperand(0).getNode(); 7809 if (Origin->getOpcode() != ISD::LOAD) 7810 return true; 7811 7812 // If there is no LXSIBZX/LXSIHZX, like Power8, 7813 // prefer direct move if the memory size is 1 or 2 bytes. 7814 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7815 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7816 return true; 7817 7818 for (SDNode::use_iterator UI = Origin->use_begin(), 7819 UE = Origin->use_end(); 7820 UI != UE; ++UI) { 7821 7822 // Only look at the users of the loaded value. 7823 if (UI.getUse().get().getResNo() != 0) 7824 continue; 7825 7826 if (UI->getOpcode() != ISD::SINT_TO_FP && 7827 UI->getOpcode() != ISD::UINT_TO_FP && 7828 UI->getOpcode() != ISD::STRICT_SINT_TO_FP && 7829 UI->getOpcode() != ISD::STRICT_UINT_TO_FP) 7830 return true; 7831 } 7832 7833 return false; 7834 } 7835 7836 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG, 7837 const PPCSubtarget &Subtarget, 7838 SDValue Chain = SDValue()) { 7839 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP || 7840 Op.getOpcode() == ISD::STRICT_SINT_TO_FP; 7841 SDLoc dl(Op); 7842 7843 // TODO: Any other flags to propagate? 7844 SDNodeFlags Flags; 7845 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 7846 7847 // If we have FCFIDS, then use it when converting to single-precision. 7848 // Otherwise, convert to double-precision and then round. 7849 bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT(); 7850 unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS) 7851 : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU); 7852 EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64; 7853 if (Op->isStrictFPOpcode()) { 7854 if (!Chain) 7855 Chain = Op.getOperand(0); 7856 return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl, 7857 DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags); 7858 } else 7859 return DAG.getNode(ConvOpc, dl, ConvTy, Src); 7860 } 7861 7862 /// Custom lowers integer to floating point conversions to use 7863 /// the direct move instructions available in ISA 2.07 to avoid the 7864 /// need for load/store combinations. 7865 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7866 SelectionDAG &DAG, 7867 const SDLoc &dl) const { 7868 assert((Op.getValueType() == MVT::f32 || 7869 Op.getValueType() == MVT::f64) && 7870 "Invalid floating point type as target of conversion"); 7871 assert(Subtarget.hasFPCVT() && 7872 "Int to FP conversions with direct moves require FPCVT"); 7873 SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0); 7874 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7875 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP || 7876 Op.getOpcode() == ISD::STRICT_SINT_TO_FP; 7877 unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA; 7878 SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src); 7879 return convertIntToFP(Op, Mov, DAG, Subtarget); 7880 } 7881 7882 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) { 7883 7884 EVT VecVT = Vec.getValueType(); 7885 assert(VecVT.isVector() && "Expected a vector type."); 7886 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width."); 7887 7888 EVT EltVT = VecVT.getVectorElementType(); 7889 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7890 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7891 7892 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements(); 7893 SmallVector<SDValue, 16> Ops(NumConcat); 7894 Ops[0] = Vec; 7895 SDValue UndefVec = DAG.getUNDEF(VecVT); 7896 for (unsigned i = 1; i < NumConcat; ++i) 7897 Ops[i] = UndefVec; 7898 7899 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops); 7900 } 7901 7902 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG, 7903 const SDLoc &dl) const { 7904 bool IsStrict = Op->isStrictFPOpcode(); 7905 unsigned Opc = Op.getOpcode(); 7906 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 7907 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP || 7908 Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) && 7909 "Unexpected conversion type"); 7910 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) && 7911 "Supports conversions to v2f64/v4f32 only."); 7912 7913 // TODO: Any other flags to propagate? 7914 SDNodeFlags Flags; 7915 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 7916 7917 bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP; 7918 bool FourEltRes = Op.getValueType() == MVT::v4f32; 7919 7920 SDValue Wide = widenVec(DAG, Src, dl); 7921 EVT WideVT = Wide.getValueType(); 7922 unsigned WideNumElts = WideVT.getVectorNumElements(); 7923 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64; 7924 7925 SmallVector<int, 16> ShuffV; 7926 for (unsigned i = 0; i < WideNumElts; ++i) 7927 ShuffV.push_back(i + WideNumElts); 7928 7929 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2; 7930 int SaveElts = FourEltRes ? 4 : 2; 7931 if (Subtarget.isLittleEndian()) 7932 for (int i = 0; i < SaveElts; i++) 7933 ShuffV[i * Stride] = i; 7934 else 7935 for (int i = 1; i <= SaveElts; i++) 7936 ShuffV[i * Stride - 1] = i - 1; 7937 7938 SDValue ShuffleSrc2 = 7939 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT); 7940 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV); 7941 7942 SDValue Extend; 7943 if (SignedConv) { 7944 Arrange = DAG.getBitcast(IntermediateVT, Arrange); 7945 EVT ExtVT = Src.getValueType(); 7946 if (Subtarget.hasP9Altivec()) 7947 ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(), 7948 IntermediateVT.getVectorNumElements()); 7949 7950 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange, 7951 DAG.getValueType(ExtVT)); 7952 } else 7953 Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange); 7954 7955 if (IsStrict) 7956 return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other), 7957 {Op.getOperand(0), Extend}, Flags); 7958 7959 return DAG.getNode(Opc, dl, Op.getValueType(), Extend); 7960 } 7961 7962 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 7963 SelectionDAG &DAG) const { 7964 SDLoc dl(Op); 7965 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP || 7966 Op.getOpcode() == ISD::STRICT_SINT_TO_FP; 7967 bool IsStrict = Op->isStrictFPOpcode(); 7968 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 7969 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode(); 7970 7971 // TODO: Any other flags to propagate? 7972 SDNodeFlags Flags; 7973 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 7974 7975 EVT InVT = Src.getValueType(); 7976 EVT OutVT = Op.getValueType(); 7977 if (OutVT.isVector() && OutVT.isFloatingPoint() && 7978 isOperationCustom(Op.getOpcode(), InVT)) 7979 return LowerINT_TO_FPVector(Op, DAG, dl); 7980 7981 // Conversions to f128 are legal. 7982 if (Op.getValueType() == MVT::f128) 7983 return Subtarget.hasP9Vector() ? Op : SDValue(); 7984 7985 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 7986 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 7987 return SDValue(); 7988 7989 if (Src.getValueType() == MVT::i1) { 7990 SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src, 7991 DAG.getConstantFP(1.0, dl, Op.getValueType()), 7992 DAG.getConstantFP(0.0, dl, Op.getValueType())); 7993 if (IsStrict) 7994 return DAG.getMergeValues({Sel, Chain}, dl); 7995 else 7996 return Sel; 7997 } 7998 7999 // If we have direct moves, we can do all the conversion, skip the store/load 8000 // however, without FPCVT we can't do most conversions. 8001 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 8002 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 8003 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 8004 8005 assert((IsSigned || Subtarget.hasFPCVT()) && 8006 "UINT_TO_FP is supported only with FPCVT"); 8007 8008 if (Src.getValueType() == MVT::i64) { 8009 SDValue SINT = Src; 8010 // When converting to single-precision, we actually need to convert 8011 // to double-precision first and then round to single-precision. 8012 // To avoid double-rounding effects during that operation, we have 8013 // to prepare the input operand. Bits that might be truncated when 8014 // converting to double-precision are replaced by a bit that won't 8015 // be lost at this stage, but is below the single-precision rounding 8016 // position. 8017 // 8018 // However, if -enable-unsafe-fp-math is in effect, accept double 8019 // rounding to avoid the extra overhead. 8020 if (Op.getValueType() == MVT::f32 && 8021 !Subtarget.hasFPCVT() && 8022 !DAG.getTarget().Options.UnsafeFPMath) { 8023 8024 // Twiddle input to make sure the low 11 bits are zero. (If this 8025 // is the case, we are guaranteed the value will fit into the 53 bit 8026 // mantissa of an IEEE double-precision value without rounding.) 8027 // If any of those low 11 bits were not zero originally, make sure 8028 // bit 12 (value 2048) is set instead, so that the final rounding 8029 // to single-precision gets the correct result. 8030 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8031 SINT, DAG.getConstant(2047, dl, MVT::i64)); 8032 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 8033 Round, DAG.getConstant(2047, dl, MVT::i64)); 8034 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 8035 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8036 Round, DAG.getConstant(-2048, dl, MVT::i64)); 8037 8038 // However, we cannot use that value unconditionally: if the magnitude 8039 // of the input value is small, the bit-twiddling we did above might 8040 // end up visibly changing the output. Fortunately, in that case, we 8041 // don't need to twiddle bits since the original input will convert 8042 // exactly to double-precision floating-point already. Therefore, 8043 // construct a conditional to use the original value if the top 11 8044 // bits are all sign-bit copies, and use the rounded value computed 8045 // above otherwise. 8046 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 8047 SINT, DAG.getConstant(53, dl, MVT::i32)); 8048 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 8049 Cond, DAG.getConstant(1, dl, MVT::i64)); 8050 Cond = DAG.getSetCC( 8051 dl, 8052 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64), 8053 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 8054 8055 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 8056 } 8057 8058 ReuseLoadInfo RLI; 8059 SDValue Bits; 8060 8061 MachineFunction &MF = DAG.getMachineFunction(); 8062 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 8063 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 8064 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 8065 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8066 } else if (Subtarget.hasLFIWAX() && 8067 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 8068 MachineMemOperand *MMO = 8069 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8070 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8071 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8072 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 8073 DAG.getVTList(MVT::f64, MVT::Other), 8074 Ops, MVT::i32, MMO); 8075 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8076 } else if (Subtarget.hasFPCVT() && 8077 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 8078 MachineMemOperand *MMO = 8079 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8080 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8081 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8082 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 8083 DAG.getVTList(MVT::f64, MVT::Other), 8084 Ops, MVT::i32, MMO); 8085 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8086 } else if (((Subtarget.hasLFIWAX() && 8087 SINT.getOpcode() == ISD::SIGN_EXTEND) || 8088 (Subtarget.hasFPCVT() && 8089 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 8090 SINT.getOperand(0).getValueType() == MVT::i32) { 8091 MachineFrameInfo &MFI = MF.getFrameInfo(); 8092 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8093 8094 int FrameIdx = MFI.CreateStackObject(4, Align(4), false); 8095 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8096 8097 SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx, 8098 MachinePointerInfo::getFixedStack( 8099 DAG.getMachineFunction(), FrameIdx)); 8100 Chain = Store; 8101 8102 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8103 "Expected an i32 store"); 8104 8105 RLI.Ptr = FIdx; 8106 RLI.Chain = Chain; 8107 RLI.MPI = 8108 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8109 RLI.Alignment = Align(4); 8110 8111 MachineMemOperand *MMO = 8112 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8113 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8114 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8115 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 8116 PPCISD::LFIWZX : PPCISD::LFIWAX, 8117 dl, DAG.getVTList(MVT::f64, MVT::Other), 8118 Ops, MVT::i32, MMO); 8119 Chain = Bits.getValue(1); 8120 } else 8121 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 8122 8123 SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain); 8124 if (IsStrict) 8125 Chain = FP.getValue(1); 8126 8127 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 8128 if (IsStrict) 8129 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, 8130 DAG.getVTList(MVT::f32, MVT::Other), 8131 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags); 8132 else 8133 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 8134 DAG.getIntPtrConstant(0, dl)); 8135 } 8136 return FP; 8137 } 8138 8139 assert(Src.getValueType() == MVT::i32 && 8140 "Unhandled INT_TO_FP type in custom expander!"); 8141 // Since we only generate this in 64-bit mode, we can take advantage of 8142 // 64-bit registers. In particular, sign extend the input value into the 8143 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 8144 // then lfd it and fcfid it. 8145 MachineFunction &MF = DAG.getMachineFunction(); 8146 MachineFrameInfo &MFI = MF.getFrameInfo(); 8147 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8148 8149 SDValue Ld; 8150 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 8151 ReuseLoadInfo RLI; 8152 bool ReusingLoad; 8153 if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) { 8154 int FrameIdx = MFI.CreateStackObject(4, Align(4), false); 8155 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8156 8157 SDValue Store = DAG.getStore(Chain, dl, Src, FIdx, 8158 MachinePointerInfo::getFixedStack( 8159 DAG.getMachineFunction(), FrameIdx)); 8160 Chain = Store; 8161 8162 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8163 "Expected an i32 store"); 8164 8165 RLI.Ptr = FIdx; 8166 RLI.Chain = Chain; 8167 RLI.MPI = 8168 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8169 RLI.Alignment = Align(4); 8170 } 8171 8172 MachineMemOperand *MMO = 8173 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8174 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8175 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8176 Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl, 8177 DAG.getVTList(MVT::f64, MVT::Other), Ops, 8178 MVT::i32, MMO); 8179 Chain = Ld.getValue(1); 8180 if (ReusingLoad) 8181 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 8182 } else { 8183 assert(Subtarget.isPPC64() && 8184 "i32->FP without LFIWAX supported only on PPC64"); 8185 8186 int FrameIdx = MFI.CreateStackObject(8, Align(8), false); 8187 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8188 8189 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src); 8190 8191 // STD the extended value into the stack slot. 8192 SDValue Store = DAG.getStore( 8193 Chain, dl, Ext64, FIdx, 8194 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8195 Chain = Store; 8196 8197 // Load the value as a double. 8198 Ld = DAG.getLoad( 8199 MVT::f64, dl, Chain, FIdx, 8200 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8201 Chain = Ld.getValue(1); 8202 } 8203 8204 // FCFID it and return it. 8205 SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain); 8206 if (IsStrict) 8207 Chain = FP.getValue(1); 8208 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 8209 if (IsStrict) 8210 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, 8211 DAG.getVTList(MVT::f32, MVT::Other), 8212 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags); 8213 else 8214 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 8215 DAG.getIntPtrConstant(0, dl)); 8216 } 8217 return FP; 8218 } 8219 8220 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 8221 SelectionDAG &DAG) const { 8222 SDLoc dl(Op); 8223 /* 8224 The rounding mode is in bits 30:31 of FPSR, and has the following 8225 settings: 8226 00 Round to nearest 8227 01 Round to 0 8228 10 Round to +inf 8229 11 Round to -inf 8230 8231 FLT_ROUNDS, on the other hand, expects the following: 8232 -1 Undefined 8233 0 Round to 0 8234 1 Round to nearest 8235 2 Round to +inf 8236 3 Round to -inf 8237 8238 To perform the conversion, we do: 8239 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 8240 */ 8241 8242 MachineFunction &MF = DAG.getMachineFunction(); 8243 EVT VT = Op.getValueType(); 8244 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8245 8246 // Save FP Control Word to register 8247 SDValue Chain = Op.getOperand(0); 8248 SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain); 8249 Chain = MFFS.getValue(1); 8250 8251 SDValue CWD; 8252 if (isTypeLegal(MVT::i64)) { 8253 CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, 8254 DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS)); 8255 } else { 8256 // Save FP register to stack slot 8257 int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false); 8258 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 8259 Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo()); 8260 8261 // Load FP Control Word from low 32 bits of stack slot. 8262 assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) && 8263 "Stack slot adjustment is valid only on big endian subtargets!"); 8264 SDValue Four = DAG.getConstant(4, dl, PtrVT); 8265 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 8266 CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo()); 8267 Chain = CWD.getValue(1); 8268 } 8269 8270 // Transform as necessary 8271 SDValue CWD1 = 8272 DAG.getNode(ISD::AND, dl, MVT::i32, 8273 CWD, DAG.getConstant(3, dl, MVT::i32)); 8274 SDValue CWD2 = 8275 DAG.getNode(ISD::SRL, dl, MVT::i32, 8276 DAG.getNode(ISD::AND, dl, MVT::i32, 8277 DAG.getNode(ISD::XOR, dl, MVT::i32, 8278 CWD, DAG.getConstant(3, dl, MVT::i32)), 8279 DAG.getConstant(3, dl, MVT::i32)), 8280 DAG.getConstant(1, dl, MVT::i32)); 8281 8282 SDValue RetVal = 8283 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 8284 8285 RetVal = 8286 DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND), 8287 dl, VT, RetVal); 8288 8289 return DAG.getMergeValues({RetVal, Chain}, dl); 8290 } 8291 8292 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8293 EVT VT = Op.getValueType(); 8294 unsigned BitWidth = VT.getSizeInBits(); 8295 SDLoc dl(Op); 8296 assert(Op.getNumOperands() == 3 && 8297 VT == Op.getOperand(1).getValueType() && 8298 "Unexpected SHL!"); 8299 8300 // Expand into a bunch of logical ops. Note that these ops 8301 // depend on the PPC behavior for oversized shift amounts. 8302 SDValue Lo = Op.getOperand(0); 8303 SDValue Hi = Op.getOperand(1); 8304 SDValue Amt = Op.getOperand(2); 8305 EVT AmtVT = Amt.getValueType(); 8306 8307 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8308 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8309 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 8310 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 8311 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 8312 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8313 DAG.getConstant(-BitWidth, dl, AmtVT)); 8314 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 8315 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8316 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 8317 SDValue OutOps[] = { OutLo, OutHi }; 8318 return DAG.getMergeValues(OutOps, dl); 8319 } 8320 8321 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8322 EVT VT = Op.getValueType(); 8323 SDLoc dl(Op); 8324 unsigned BitWidth = VT.getSizeInBits(); 8325 assert(Op.getNumOperands() == 3 && 8326 VT == Op.getOperand(1).getValueType() && 8327 "Unexpected SRL!"); 8328 8329 // Expand into a bunch of logical ops. Note that these ops 8330 // depend on the PPC behavior for oversized shift amounts. 8331 SDValue Lo = Op.getOperand(0); 8332 SDValue Hi = Op.getOperand(1); 8333 SDValue Amt = Op.getOperand(2); 8334 EVT AmtVT = Amt.getValueType(); 8335 8336 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8337 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8338 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8339 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8340 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8341 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8342 DAG.getConstant(-BitWidth, dl, AmtVT)); 8343 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 8344 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8345 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 8346 SDValue OutOps[] = { OutLo, OutHi }; 8347 return DAG.getMergeValues(OutOps, dl); 8348 } 8349 8350 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 8351 SDLoc dl(Op); 8352 EVT VT = Op.getValueType(); 8353 unsigned BitWidth = VT.getSizeInBits(); 8354 assert(Op.getNumOperands() == 3 && 8355 VT == Op.getOperand(1).getValueType() && 8356 "Unexpected SRA!"); 8357 8358 // Expand into a bunch of logical ops, followed by a select_cc. 8359 SDValue Lo = Op.getOperand(0); 8360 SDValue Hi = Op.getOperand(1); 8361 SDValue Amt = Op.getOperand(2); 8362 EVT AmtVT = Amt.getValueType(); 8363 8364 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8365 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8366 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8367 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8368 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8369 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8370 DAG.getConstant(-BitWidth, dl, AmtVT)); 8371 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 8372 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 8373 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 8374 Tmp4, Tmp6, ISD::SETLE); 8375 SDValue OutOps[] = { OutLo, OutHi }; 8376 return DAG.getMergeValues(OutOps, dl); 8377 } 8378 8379 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op, 8380 SelectionDAG &DAG) const { 8381 SDLoc dl(Op); 8382 EVT VT = Op.getValueType(); 8383 unsigned BitWidth = VT.getSizeInBits(); 8384 8385 bool IsFSHL = Op.getOpcode() == ISD::FSHL; 8386 SDValue X = Op.getOperand(0); 8387 SDValue Y = Op.getOperand(1); 8388 SDValue Z = Op.getOperand(2); 8389 EVT AmtVT = Z.getValueType(); 8390 8391 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 8392 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 8393 // This is simpler than TargetLowering::expandFunnelShift because we can rely 8394 // on PowerPC shift by BW being well defined. 8395 Z = DAG.getNode(ISD::AND, dl, AmtVT, Z, 8396 DAG.getConstant(BitWidth - 1, dl, AmtVT)); 8397 SDValue SubZ = 8398 DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z); 8399 X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ); 8400 Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z); 8401 return DAG.getNode(ISD::OR, dl, VT, X, Y); 8402 } 8403 8404 //===----------------------------------------------------------------------===// 8405 // Vector related lowering. 8406 // 8407 8408 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an 8409 /// element size of SplatSize. Cast the result to VT. 8410 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT, 8411 SelectionDAG &DAG, const SDLoc &dl) { 8412 static const MVT VTys[] = { // canonical VT to use for each size. 8413 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 8414 }; 8415 8416 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 8417 8418 // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize. 8419 if (Val == ((1LLU << (SplatSize * 8)) - 1)) { 8420 SplatSize = 1; 8421 Val = 0xFF; 8422 } 8423 8424 EVT CanonicalVT = VTys[SplatSize-1]; 8425 8426 // Build a canonical splat for this value. 8427 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 8428 } 8429 8430 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 8431 /// specified intrinsic ID. 8432 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 8433 const SDLoc &dl, EVT DestVT = MVT::Other) { 8434 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 8435 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8436 DAG.getConstant(IID, dl, MVT::i32), Op); 8437 } 8438 8439 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 8440 /// specified intrinsic ID. 8441 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 8442 SelectionDAG &DAG, const SDLoc &dl, 8443 EVT DestVT = MVT::Other) { 8444 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 8445 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8446 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 8447 } 8448 8449 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 8450 /// specified intrinsic ID. 8451 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 8452 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 8453 EVT DestVT = MVT::Other) { 8454 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 8455 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8456 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 8457 } 8458 8459 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 8460 /// amount. The result has the specified value type. 8461 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 8462 SelectionDAG &DAG, const SDLoc &dl) { 8463 // Force LHS/RHS to be the right type. 8464 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 8465 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 8466 8467 int Ops[16]; 8468 for (unsigned i = 0; i != 16; ++i) 8469 Ops[i] = i + Amt; 8470 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 8471 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8472 } 8473 8474 /// Do we have an efficient pattern in a .td file for this node? 8475 /// 8476 /// \param V - pointer to the BuildVectorSDNode being matched 8477 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 8478 /// 8479 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 8480 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 8481 /// the opposite is true (expansion is beneficial) are: 8482 /// - The node builds a vector out of integers that are not 32 or 64-bits 8483 /// - The node builds a vector out of constants 8484 /// - The node is a "load-and-splat" 8485 /// In all other cases, we will choose to keep the BUILD_VECTOR. 8486 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 8487 bool HasDirectMove, 8488 bool HasP8Vector) { 8489 EVT VecVT = V->getValueType(0); 8490 bool RightType = VecVT == MVT::v2f64 || 8491 (HasP8Vector && VecVT == MVT::v4f32) || 8492 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 8493 if (!RightType) 8494 return false; 8495 8496 bool IsSplat = true; 8497 bool IsLoad = false; 8498 SDValue Op0 = V->getOperand(0); 8499 8500 // This function is called in a block that confirms the node is not a constant 8501 // splat. So a constant BUILD_VECTOR here means the vector is built out of 8502 // different constants. 8503 if (V->isConstant()) 8504 return false; 8505 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 8506 if (V->getOperand(i).isUndef()) 8507 return false; 8508 // We want to expand nodes that represent load-and-splat even if the 8509 // loaded value is a floating point truncation or conversion to int. 8510 if (V->getOperand(i).getOpcode() == ISD::LOAD || 8511 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 8512 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8513 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 8514 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8515 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 8516 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 8517 IsLoad = true; 8518 // If the operands are different or the input is not a load and has more 8519 // uses than just this BV node, then it isn't a splat. 8520 if (V->getOperand(i) != Op0 || 8521 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 8522 IsSplat = false; 8523 } 8524 return !(IsSplat && IsLoad); 8525 } 8526 8527 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 8528 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 8529 8530 SDLoc dl(Op); 8531 SDValue Op0 = Op->getOperand(0); 8532 8533 if ((Op.getValueType() != MVT::f128) || 8534 (Op0.getOpcode() != ISD::BUILD_PAIR) || 8535 (Op0.getOperand(0).getValueType() != MVT::i64) || 8536 (Op0.getOperand(1).getValueType() != MVT::i64)) 8537 return SDValue(); 8538 8539 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 8540 Op0.getOperand(1)); 8541 } 8542 8543 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) { 8544 const SDValue *InputLoad = &Op; 8545 if (InputLoad->getOpcode() == ISD::BITCAST) 8546 InputLoad = &InputLoad->getOperand(0); 8547 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR || 8548 InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) { 8549 IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED; 8550 InputLoad = &InputLoad->getOperand(0); 8551 } 8552 if (InputLoad->getOpcode() != ISD::LOAD) 8553 return nullptr; 8554 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8555 return ISD::isNormalLoad(LD) ? InputLoad : nullptr; 8556 } 8557 8558 // Convert the argument APFloat to a single precision APFloat if there is no 8559 // loss in information during the conversion to single precision APFloat and the 8560 // resulting number is not a denormal number. Return true if successful. 8561 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) { 8562 APFloat APFloatToConvert = ArgAPFloat; 8563 bool LosesInfo = true; 8564 APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, 8565 &LosesInfo); 8566 bool Success = (!LosesInfo && !APFloatToConvert.isDenormal()); 8567 if (Success) 8568 ArgAPFloat = APFloatToConvert; 8569 return Success; 8570 } 8571 8572 // Bitcast the argument APInt to a double and convert it to a single precision 8573 // APFloat, bitcast the APFloat to an APInt and assign it to the original 8574 // argument if there is no loss in information during the conversion from 8575 // double to single precision APFloat and the resulting number is not a denormal 8576 // number. Return true if successful. 8577 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) { 8578 double DpValue = ArgAPInt.bitsToDouble(); 8579 APFloat APFloatDp(DpValue); 8580 bool Success = convertToNonDenormSingle(APFloatDp); 8581 if (Success) 8582 ArgAPInt = APFloatDp.bitcastToAPInt(); 8583 return Success; 8584 } 8585 8586 // If this is a case we can't handle, return null and let the default 8587 // expansion code take care of it. If we CAN select this case, and if it 8588 // selects to a single instruction, return Op. Otherwise, if we can codegen 8589 // this case more efficiently than a constant pool load, lower it to the 8590 // sequence of ops that should be used. 8591 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 8592 SelectionDAG &DAG) const { 8593 SDLoc dl(Op); 8594 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 8595 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 8596 8597 // Check if this is a splat of a constant value. 8598 APInt APSplatBits, APSplatUndef; 8599 unsigned SplatBitSize; 8600 bool HasAnyUndefs; 8601 bool BVNIsConstantSplat = 8602 BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 8603 HasAnyUndefs, 0, !Subtarget.isLittleEndian()); 8604 8605 // If it is a splat of a double, check if we can shrink it to a 32 bit 8606 // non-denormal float which when converted back to double gives us the same 8607 // double. This is to exploit the XXSPLTIDP instruction.+ // If we lose precision, we use XXSPLTI32DX. 8608 if (BVNIsConstantSplat && (SplatBitSize == 64) && 8609 Subtarget.hasPrefixInstrs()) { 8610 if (convertToNonDenormSingle(APSplatBits) && 8611 (Op->getValueType(0) == MVT::v2f64)) { 8612 SDValue SplatNode = DAG.getNode( 8613 PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64, 8614 DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32)); 8615 return DAG.getBitcast(Op.getValueType(), SplatNode); 8616 } else if (APSplatBits.getBitWidth() == 64) { 8617 // We may lose precision, so we have to use XXSPLTI32DX. 8618 8619 uint32_t Hi = 8620 (uint32_t)((APSplatBits.getZExtValue() & 0xFFFFFFFF00000000LL) >> 32); 8621 uint32_t Lo = 8622 (uint32_t)(APSplatBits.getZExtValue() & 0xFFFFFFFF); 8623 SDValue SplatNode = DAG.getUNDEF(MVT::v2i64); 8624 8625 if (!Hi || !Lo) 8626 // If either load is 0, then we should generate XXLXOR to set to 0. 8627 SplatNode = DAG.getTargetConstant(0, dl, MVT::v2i64); 8628 8629 if (Hi) 8630 SplatNode = DAG.getNode( 8631 PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode, 8632 DAG.getTargetConstant(0, dl, MVT::i32), 8633 DAG.getTargetConstant(Hi, dl, MVT::i32)); 8634 8635 if (Lo) 8636 SplatNode = 8637 DAG.getNode(PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode, 8638 DAG.getTargetConstant(1, dl, MVT::i32), 8639 DAG.getTargetConstant(Lo, dl, MVT::i32)); 8640 8641 return DAG.getBitcast(Op.getValueType(), SplatNode); 8642 } 8643 } 8644 8645 if (!BVNIsConstantSplat || SplatBitSize > 32) { 8646 8647 bool IsPermutedLoad = false; 8648 const SDValue *InputLoad = 8649 getNormalLoadInput(Op.getOperand(0), IsPermutedLoad); 8650 // Handle load-and-splat patterns as we have instructions that will do this 8651 // in one go. 8652 if (InputLoad && DAG.isSplatValue(Op, true)) { 8653 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8654 8655 // We have handling for 4 and 8 byte elements. 8656 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits(); 8657 8658 // Checking for a single use of this load, we have to check for vector 8659 // width (128 bits) / ElementSize uses (since each operand of the 8660 // BUILD_VECTOR is a separate use of the value. 8661 unsigned NumUsesOfInputLD = 128 / ElementSize; 8662 for (SDValue BVInOp : Op->ops()) 8663 if (BVInOp.isUndef()) 8664 NumUsesOfInputLD--; 8665 assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?"); 8666 if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) && 8667 ((Subtarget.hasVSX() && ElementSize == 64) || 8668 (Subtarget.hasP9Vector() && ElementSize == 32))) { 8669 SDValue Ops[] = { 8670 LD->getChain(), // Chain 8671 LD->getBasePtr(), // Ptr 8672 DAG.getValueType(Op.getValueType()) // VT 8673 }; 8674 SDValue LdSplt = DAG.getMemIntrinsicNode( 8675 PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other), 8676 Ops, LD->getMemoryVT(), LD->getMemOperand()); 8677 // Replace all uses of the output chain of the original load with the 8678 // output chain of the new load. 8679 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), 8680 LdSplt.getValue(1)); 8681 return LdSplt; 8682 } 8683 } 8684 8685 // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to 8686 // 32-bits can be lowered to VSX instructions under certain conditions. 8687 // Without VSX, there is no pattern more efficient than expanding the node. 8688 if (Subtarget.hasVSX() && Subtarget.isPPC64() && 8689 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 8690 Subtarget.hasP8Vector())) 8691 return Op; 8692 return SDValue(); 8693 } 8694 8695 uint64_t SplatBits = APSplatBits.getZExtValue(); 8696 uint64_t SplatUndef = APSplatUndef.getZExtValue(); 8697 unsigned SplatSize = SplatBitSize / 8; 8698 8699 // First, handle single instruction cases. 8700 8701 // All zeros? 8702 if (SplatBits == 0) { 8703 // Canonicalize all zero vectors to be v4i32. 8704 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 8705 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 8706 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 8707 } 8708 return Op; 8709 } 8710 8711 // We have XXSPLTIW for constant splats four bytes wide. 8712 // Given vector length is a multiple of 4, 2-byte splats can be replaced 8713 // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to 8714 // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be 8715 // turned into a 4-byte splat of 0xABABABAB. 8716 if (Subtarget.hasPrefixInstrs() && SplatSize == 2) 8717 return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2, 8718 Op.getValueType(), DAG, dl); 8719 8720 if (Subtarget.hasPrefixInstrs() && SplatSize == 4) 8721 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG, 8722 dl); 8723 8724 // We have XXSPLTIB for constant splats one byte wide. 8725 if (Subtarget.hasP9Vector() && SplatSize == 1) 8726 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG, 8727 dl); 8728 8729 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 8730 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 8731 (32-SplatBitSize)); 8732 if (SextVal >= -16 && SextVal <= 15) 8733 return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG, 8734 dl); 8735 8736 // Two instruction sequences. 8737 8738 // If this value is in the range [-32,30] and is even, use: 8739 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 8740 // If this value is in the range [17,31] and is odd, use: 8741 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 8742 // If this value is in the range [-31,-17] and is odd, use: 8743 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 8744 // Note the last two are three-instruction sequences. 8745 if (SextVal >= -32 && SextVal <= 31) { 8746 // To avoid having these optimizations undone by constant folding, 8747 // we convert to a pseudo that will be expanded later into one of 8748 // the above forms. 8749 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 8750 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 8751 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 8752 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 8753 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 8754 if (VT == Op.getValueType()) 8755 return RetVal; 8756 else 8757 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 8758 } 8759 8760 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 8761 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 8762 // for fneg/fabs. 8763 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 8764 // Make -1 and vspltisw -1: 8765 SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl); 8766 8767 // Make the VSLW intrinsic, computing 0x8000_0000. 8768 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 8769 OnesV, DAG, dl); 8770 8771 // xor by OnesV to invert it. 8772 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 8773 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8774 } 8775 8776 // Check to see if this is a wide variety of vsplti*, binop self cases. 8777 static const signed char SplatCsts[] = { 8778 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 8779 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 8780 }; 8781 8782 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 8783 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 8784 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 8785 int i = SplatCsts[idx]; 8786 8787 // Figure out what shift amount will be used by altivec if shifted by i in 8788 // this splat size. 8789 unsigned TypeShiftAmt = i & (SplatBitSize-1); 8790 8791 // vsplti + shl self. 8792 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 8793 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl); 8794 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8795 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 8796 Intrinsic::ppc_altivec_vslw 8797 }; 8798 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8799 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8800 } 8801 8802 // vsplti + srl self. 8803 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8804 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl); 8805 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8806 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 8807 Intrinsic::ppc_altivec_vsrw 8808 }; 8809 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8810 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8811 } 8812 8813 // vsplti + rol self. 8814 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 8815 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 8816 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl); 8817 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8818 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 8819 Intrinsic::ppc_altivec_vrlw 8820 }; 8821 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8822 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8823 } 8824 8825 // t = vsplti c, result = vsldoi t, t, 1 8826 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 8827 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl); 8828 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 8829 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8830 } 8831 // t = vsplti c, result = vsldoi t, t, 2 8832 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 8833 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl); 8834 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 8835 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8836 } 8837 // t = vsplti c, result = vsldoi t, t, 3 8838 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 8839 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl); 8840 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 8841 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8842 } 8843 } 8844 8845 return SDValue(); 8846 } 8847 8848 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 8849 /// the specified operations to build the shuffle. 8850 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 8851 SDValue RHS, SelectionDAG &DAG, 8852 const SDLoc &dl) { 8853 unsigned OpNum = (PFEntry >> 26) & 0x0F; 8854 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8855 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8856 8857 enum { 8858 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 8859 OP_VMRGHW, 8860 OP_VMRGLW, 8861 OP_VSPLTISW0, 8862 OP_VSPLTISW1, 8863 OP_VSPLTISW2, 8864 OP_VSPLTISW3, 8865 OP_VSLDOI4, 8866 OP_VSLDOI8, 8867 OP_VSLDOI12 8868 }; 8869 8870 if (OpNum == OP_COPY) { 8871 if (LHSID == (1*9+2)*9+3) return LHS; 8872 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 8873 return RHS; 8874 } 8875 8876 SDValue OpLHS, OpRHS; 8877 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 8878 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 8879 8880 int ShufIdxs[16]; 8881 switch (OpNum) { 8882 default: llvm_unreachable("Unknown i32 permute!"); 8883 case OP_VMRGHW: 8884 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 8885 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 8886 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 8887 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 8888 break; 8889 case OP_VMRGLW: 8890 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 8891 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 8892 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 8893 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 8894 break; 8895 case OP_VSPLTISW0: 8896 for (unsigned i = 0; i != 16; ++i) 8897 ShufIdxs[i] = (i&3)+0; 8898 break; 8899 case OP_VSPLTISW1: 8900 for (unsigned i = 0; i != 16; ++i) 8901 ShufIdxs[i] = (i&3)+4; 8902 break; 8903 case OP_VSPLTISW2: 8904 for (unsigned i = 0; i != 16; ++i) 8905 ShufIdxs[i] = (i&3)+8; 8906 break; 8907 case OP_VSPLTISW3: 8908 for (unsigned i = 0; i != 16; ++i) 8909 ShufIdxs[i] = (i&3)+12; 8910 break; 8911 case OP_VSLDOI4: 8912 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 8913 case OP_VSLDOI8: 8914 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 8915 case OP_VSLDOI12: 8916 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 8917 } 8918 EVT VT = OpLHS.getValueType(); 8919 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 8920 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 8921 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 8922 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8923 } 8924 8925 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 8926 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 8927 /// SDValue. 8928 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 8929 SelectionDAG &DAG) const { 8930 const unsigned BytesInVector = 16; 8931 bool IsLE = Subtarget.isLittleEndian(); 8932 SDLoc dl(N); 8933 SDValue V1 = N->getOperand(0); 8934 SDValue V2 = N->getOperand(1); 8935 unsigned ShiftElts = 0, InsertAtByte = 0; 8936 bool Swap = false; 8937 8938 // Shifts required to get the byte we want at element 7. 8939 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 8940 0, 15, 14, 13, 12, 11, 10, 9}; 8941 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 8942 1, 2, 3, 4, 5, 6, 7, 8}; 8943 8944 ArrayRef<int> Mask = N->getMask(); 8945 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 8946 8947 // For each mask element, find out if we're just inserting something 8948 // from V2 into V1 or vice versa. 8949 // Possible permutations inserting an element from V2 into V1: 8950 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8951 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8952 // ... 8953 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 8954 // Inserting from V1 into V2 will be similar, except mask range will be 8955 // [16,31]. 8956 8957 bool FoundCandidate = false; 8958 // If both vector operands for the shuffle are the same vector, the mask 8959 // will contain only elements from the first one and the second one will be 8960 // undef. 8961 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 8962 // Go through the mask of half-words to find an element that's being moved 8963 // from one vector to the other. 8964 for (unsigned i = 0; i < BytesInVector; ++i) { 8965 unsigned CurrentElement = Mask[i]; 8966 // If 2nd operand is undefined, we should only look for element 7 in the 8967 // Mask. 8968 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 8969 continue; 8970 8971 bool OtherElementsInOrder = true; 8972 // Examine the other elements in the Mask to see if they're in original 8973 // order. 8974 for (unsigned j = 0; j < BytesInVector; ++j) { 8975 if (j == i) 8976 continue; 8977 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 8978 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 8979 // in which we always assume we're always picking from the 1st operand. 8980 int MaskOffset = 8981 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 8982 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 8983 OtherElementsInOrder = false; 8984 break; 8985 } 8986 } 8987 // If other elements are in original order, we record the number of shifts 8988 // we need to get the element we want into element 7. Also record which byte 8989 // in the vector we should insert into. 8990 if (OtherElementsInOrder) { 8991 // If 2nd operand is undefined, we assume no shifts and no swapping. 8992 if (V2.isUndef()) { 8993 ShiftElts = 0; 8994 Swap = false; 8995 } else { 8996 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 8997 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 8998 : BigEndianShifts[CurrentElement & 0xF]; 8999 Swap = CurrentElement < BytesInVector; 9000 } 9001 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 9002 FoundCandidate = true; 9003 break; 9004 } 9005 } 9006 9007 if (!FoundCandidate) 9008 return SDValue(); 9009 9010 // Candidate found, construct the proper SDAG sequence with VINSERTB, 9011 // optionally with VECSHL if shift is required. 9012 if (Swap) 9013 std::swap(V1, V2); 9014 if (V2.isUndef()) 9015 V2 = V1; 9016 if (ShiftElts) { 9017 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9018 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9019 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 9020 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9021 } 9022 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 9023 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9024 } 9025 9026 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 9027 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 9028 /// SDValue. 9029 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 9030 SelectionDAG &DAG) const { 9031 const unsigned NumHalfWords = 8; 9032 const unsigned BytesInVector = NumHalfWords * 2; 9033 // Check that the shuffle is on half-words. 9034 if (!isNByteElemShuffleMask(N, 2, 1)) 9035 return SDValue(); 9036 9037 bool IsLE = Subtarget.isLittleEndian(); 9038 SDLoc dl(N); 9039 SDValue V1 = N->getOperand(0); 9040 SDValue V2 = N->getOperand(1); 9041 unsigned ShiftElts = 0, InsertAtByte = 0; 9042 bool Swap = false; 9043 9044 // Shifts required to get the half-word we want at element 3. 9045 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 9046 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 9047 9048 uint32_t Mask = 0; 9049 uint32_t OriginalOrderLow = 0x1234567; 9050 uint32_t OriginalOrderHigh = 0x89ABCDEF; 9051 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 9052 // 32-bit space, only need 4-bit nibbles per element. 9053 for (unsigned i = 0; i < NumHalfWords; ++i) { 9054 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9055 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 9056 } 9057 9058 // For each mask element, find out if we're just inserting something 9059 // from V2 into V1 or vice versa. Possible permutations inserting an element 9060 // from V2 into V1: 9061 // X, 1, 2, 3, 4, 5, 6, 7 9062 // 0, X, 2, 3, 4, 5, 6, 7 9063 // 0, 1, X, 3, 4, 5, 6, 7 9064 // 0, 1, 2, X, 4, 5, 6, 7 9065 // 0, 1, 2, 3, X, 5, 6, 7 9066 // 0, 1, 2, 3, 4, X, 6, 7 9067 // 0, 1, 2, 3, 4, 5, X, 7 9068 // 0, 1, 2, 3, 4, 5, 6, X 9069 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 9070 9071 bool FoundCandidate = false; 9072 // Go through the mask of half-words to find an element that's being moved 9073 // from one vector to the other. 9074 for (unsigned i = 0; i < NumHalfWords; ++i) { 9075 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9076 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 9077 uint32_t MaskOtherElts = ~(0xF << MaskShift); 9078 uint32_t TargetOrder = 0x0; 9079 9080 // If both vector operands for the shuffle are the same vector, the mask 9081 // will contain only elements from the first one and the second one will be 9082 // undef. 9083 if (V2.isUndef()) { 9084 ShiftElts = 0; 9085 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 9086 TargetOrder = OriginalOrderLow; 9087 Swap = false; 9088 // Skip if not the correct element or mask of other elements don't equal 9089 // to our expected order. 9090 if (MaskOneElt == VINSERTHSrcElem && 9091 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9092 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9093 FoundCandidate = true; 9094 break; 9095 } 9096 } else { // If both operands are defined. 9097 // Target order is [8,15] if the current mask is between [0,7]. 9098 TargetOrder = 9099 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 9100 // Skip if mask of other elements don't equal our expected order. 9101 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9102 // We only need the last 3 bits for the number of shifts. 9103 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 9104 : BigEndianShifts[MaskOneElt & 0x7]; 9105 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9106 Swap = MaskOneElt < NumHalfWords; 9107 FoundCandidate = true; 9108 break; 9109 } 9110 } 9111 } 9112 9113 if (!FoundCandidate) 9114 return SDValue(); 9115 9116 // Candidate found, construct the proper SDAG sequence with VINSERTH, 9117 // optionally with VECSHL if shift is required. 9118 if (Swap) 9119 std::swap(V1, V2); 9120 if (V2.isUndef()) 9121 V2 = V1; 9122 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9123 if (ShiftElts) { 9124 // Double ShiftElts because we're left shifting on v16i8 type. 9125 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9126 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 9127 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 9128 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9129 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9130 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9131 } 9132 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 9133 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9134 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9135 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9136 } 9137 9138 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be 9139 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise 9140 /// return the default SDValue. 9141 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN, 9142 SelectionDAG &DAG) const { 9143 // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles 9144 // to v16i8. Peek through the bitcasts to get the actual operands. 9145 SDValue LHS = peekThroughBitcasts(SVN->getOperand(0)); 9146 SDValue RHS = peekThroughBitcasts(SVN->getOperand(1)); 9147 9148 auto ShuffleMask = SVN->getMask(); 9149 SDValue VecShuffle(SVN, 0); 9150 SDLoc DL(SVN); 9151 9152 // Check that we have a four byte shuffle. 9153 if (!isNByteElemShuffleMask(SVN, 4, 1)) 9154 return SDValue(); 9155 9156 // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx. 9157 if (RHS->getOpcode() != ISD::BUILD_VECTOR) { 9158 std::swap(LHS, RHS); 9159 VecShuffle = DAG.getCommutedVectorShuffle(*SVN); 9160 ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask(); 9161 } 9162 9163 // Ensure that the RHS is a vector of constants. 9164 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode()); 9165 if (!BVN) 9166 return SDValue(); 9167 9168 // Check if RHS is a splat of 4-bytes (or smaller). 9169 APInt APSplatValue, APSplatUndef; 9170 unsigned SplatBitSize; 9171 bool HasAnyUndefs; 9172 if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize, 9173 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 9174 SplatBitSize > 32) 9175 return SDValue(); 9176 9177 // Check that the shuffle mask matches the semantics of XXSPLTI32DX. 9178 // The instruction splats a constant C into two words of the source vector 9179 // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }. 9180 // Thus we check that the shuffle mask is the equivalent of 9181 // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively. 9182 // Note: the check above of isNByteElemShuffleMask() ensures that the bytes 9183 // within each word are consecutive, so we only need to check the first byte. 9184 SDValue Index; 9185 bool IsLE = Subtarget.isLittleEndian(); 9186 if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) && 9187 (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 && 9188 ShuffleMask[4] > 15 && ShuffleMask[12] > 15)) 9189 Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32); 9190 else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) && 9191 (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 && 9192 ShuffleMask[0] > 15 && ShuffleMask[8] > 15)) 9193 Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32); 9194 else 9195 return SDValue(); 9196 9197 // If the splat is narrower than 32-bits, we need to get the 32-bit value 9198 // for XXSPLTI32DX. 9199 unsigned SplatVal = APSplatValue.getZExtValue(); 9200 for (; SplatBitSize < 32; SplatBitSize <<= 1) 9201 SplatVal |= (SplatVal << SplatBitSize); 9202 9203 SDValue SplatNode = DAG.getNode( 9204 PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS), 9205 Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32)); 9206 return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode); 9207 } 9208 9209 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8). 9210 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is 9211 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128) 9212 /// i.e (or (shl x, C1), (srl x, 128-C1)). 9213 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { 9214 assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL"); 9215 assert(Op.getValueType() == MVT::v1i128 && 9216 "Only set v1i128 as custom, other type shouldn't reach here!"); 9217 SDLoc dl(Op); 9218 SDValue N0 = peekThroughBitcasts(Op.getOperand(0)); 9219 SDValue N1 = peekThroughBitcasts(Op.getOperand(1)); 9220 unsigned SHLAmt = N1.getConstantOperandVal(0); 9221 if (SHLAmt % 8 == 0) { 9222 SmallVector<int, 16> Mask(16, 0); 9223 std::iota(Mask.begin(), Mask.end(), 0); 9224 std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end()); 9225 if (SDValue Shuffle = 9226 DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0), 9227 DAG.getUNDEF(MVT::v16i8), Mask)) 9228 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle); 9229 } 9230 SDValue ArgVal = DAG.getBitcast(MVT::i128, N0); 9231 SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal, 9232 DAG.getConstant(SHLAmt, dl, MVT::i32)); 9233 SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal, 9234 DAG.getConstant(128 - SHLAmt, dl, MVT::i32)); 9235 SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp); 9236 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp); 9237 } 9238 9239 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 9240 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 9241 /// return the code it can be lowered into. Worst case, it can always be 9242 /// lowered into a vperm. 9243 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 9244 SelectionDAG &DAG) const { 9245 SDLoc dl(Op); 9246 SDValue V1 = Op.getOperand(0); 9247 SDValue V2 = Op.getOperand(1); 9248 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 9249 9250 // Any nodes that were combined in the target-independent combiner prior 9251 // to vector legalization will not be sent to the target combine. Try to 9252 // combine it here. 9253 if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) { 9254 if (!isa<ShuffleVectorSDNode>(NewShuffle)) 9255 return NewShuffle; 9256 Op = NewShuffle; 9257 SVOp = cast<ShuffleVectorSDNode>(Op); 9258 V1 = Op.getOperand(0); 9259 V2 = Op.getOperand(1); 9260 } 9261 EVT VT = Op.getValueType(); 9262 bool isLittleEndian = Subtarget.isLittleEndian(); 9263 9264 unsigned ShiftElts, InsertAtByte; 9265 bool Swap = false; 9266 9267 // If this is a load-and-splat, we can do that with a single instruction 9268 // in some cases. However if the load has multiple uses, we don't want to 9269 // combine it because that will just produce multiple loads. 9270 bool IsPermutedLoad = false; 9271 const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad); 9272 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() && 9273 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) && 9274 InputLoad->hasOneUse()) { 9275 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4); 9276 int SplatIdx = 9277 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG); 9278 9279 // The splat index for permuted loads will be in the left half of the vector 9280 // which is strictly wider than the loaded value by 8 bytes. So we need to 9281 // adjust the splat index to point to the correct address in memory. 9282 if (IsPermutedLoad) { 9283 assert(isLittleEndian && "Unexpected permuted load on big endian target"); 9284 SplatIdx += IsFourByte ? 2 : 1; 9285 assert((SplatIdx < (IsFourByte ? 4 : 2)) && 9286 "Splat of a value outside of the loaded memory"); 9287 } 9288 9289 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 9290 // For 4-byte load-and-splat, we need Power9. 9291 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) { 9292 uint64_t Offset = 0; 9293 if (IsFourByte) 9294 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4; 9295 else 9296 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8; 9297 9298 SDValue BasePtr = LD->getBasePtr(); 9299 if (Offset != 0) 9300 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 9301 BasePtr, DAG.getIntPtrConstant(Offset, dl)); 9302 SDValue Ops[] = { 9303 LD->getChain(), // Chain 9304 BasePtr, // BasePtr 9305 DAG.getValueType(Op.getValueType()) // VT 9306 }; 9307 SDVTList VTL = 9308 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other); 9309 SDValue LdSplt = 9310 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL, 9311 Ops, LD->getMemoryVT(), LD->getMemOperand()); 9312 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1)); 9313 if (LdSplt.getValueType() != SVOp->getValueType(0)) 9314 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt); 9315 return LdSplt; 9316 } 9317 } 9318 if (Subtarget.hasP9Vector() && 9319 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 9320 isLittleEndian)) { 9321 if (Swap) 9322 std::swap(V1, V2); 9323 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9324 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 9325 if (ShiftElts) { 9326 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 9327 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9328 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 9329 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9330 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9331 } 9332 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 9333 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9334 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9335 } 9336 9337 if (Subtarget.hasPrefixInstrs()) { 9338 SDValue SplatInsertNode; 9339 if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG))) 9340 return SplatInsertNode; 9341 } 9342 9343 if (Subtarget.hasP9Altivec()) { 9344 SDValue NewISDNode; 9345 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 9346 return NewISDNode; 9347 9348 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 9349 return NewISDNode; 9350 } 9351 9352 if (Subtarget.hasVSX() && 9353 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9354 if (Swap) 9355 std::swap(V1, V2); 9356 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9357 SDValue Conv2 = 9358 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 9359 9360 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 9361 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9362 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 9363 } 9364 9365 if (Subtarget.hasVSX() && 9366 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9367 if (Swap) 9368 std::swap(V1, V2); 9369 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9370 SDValue Conv2 = 9371 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 9372 9373 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 9374 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9375 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 9376 } 9377 9378 if (Subtarget.hasP9Vector()) { 9379 if (PPC::isXXBRHShuffleMask(SVOp)) { 9380 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9381 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv); 9382 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 9383 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 9384 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9385 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv); 9386 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 9387 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 9388 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9389 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv); 9390 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 9391 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 9392 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 9393 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv); 9394 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 9395 } 9396 } 9397 9398 if (Subtarget.hasVSX()) { 9399 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 9400 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG); 9401 9402 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9403 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 9404 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9405 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 9406 } 9407 9408 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 9409 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 9410 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 9411 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 9412 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 9413 } 9414 } 9415 9416 // Cases that are handled by instructions that take permute immediates 9417 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 9418 // selected by the instruction selector. 9419 if (V2.isUndef()) { 9420 if (PPC::isSplatShuffleMask(SVOp, 1) || 9421 PPC::isSplatShuffleMask(SVOp, 2) || 9422 PPC::isSplatShuffleMask(SVOp, 4) || 9423 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 9424 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 9425 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 9426 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 9427 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 9428 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 9429 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 9430 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 9431 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 9432 (Subtarget.hasP8Altivec() && ( 9433 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 9434 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 9435 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 9436 return Op; 9437 } 9438 } 9439 9440 // Altivec has a variety of "shuffle immediates" that take two vector inputs 9441 // and produce a fixed permutation. If any of these match, do not lower to 9442 // VPERM. 9443 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 9444 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 9445 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 9446 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 9447 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9448 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9449 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9450 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9451 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9452 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9453 (Subtarget.hasP8Altivec() && ( 9454 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 9455 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 9456 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 9457 return Op; 9458 9459 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 9460 // perfect shuffle table to emit an optimal matching sequence. 9461 ArrayRef<int> PermMask = SVOp->getMask(); 9462 9463 unsigned PFIndexes[4]; 9464 bool isFourElementShuffle = true; 9465 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 9466 unsigned EltNo = 8; // Start out undef. 9467 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 9468 if (PermMask[i*4+j] < 0) 9469 continue; // Undef, ignore it. 9470 9471 unsigned ByteSource = PermMask[i*4+j]; 9472 if ((ByteSource & 3) != j) { 9473 isFourElementShuffle = false; 9474 break; 9475 } 9476 9477 if (EltNo == 8) { 9478 EltNo = ByteSource/4; 9479 } else if (EltNo != ByteSource/4) { 9480 isFourElementShuffle = false; 9481 break; 9482 } 9483 } 9484 PFIndexes[i] = EltNo; 9485 } 9486 9487 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 9488 // perfect shuffle vector to determine if it is cost effective to do this as 9489 // discrete instructions, or whether we should use a vperm. 9490 // For now, we skip this for little endian until such time as we have a 9491 // little-endian perfect shuffle table. 9492 if (isFourElementShuffle && !isLittleEndian) { 9493 // Compute the index in the perfect shuffle table. 9494 unsigned PFTableIndex = 9495 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 9496 9497 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 9498 unsigned Cost = (PFEntry >> 30); 9499 9500 // Determining when to avoid vperm is tricky. Many things affect the cost 9501 // of vperm, particularly how many times the perm mask needs to be computed. 9502 // For example, if the perm mask can be hoisted out of a loop or is already 9503 // used (perhaps because there are multiple permutes with the same shuffle 9504 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 9505 // the loop requires an extra register. 9506 // 9507 // As a compromise, we only emit discrete instructions if the shuffle can be 9508 // generated in 3 or fewer operations. When we have loop information 9509 // available, if this block is within a loop, we should avoid using vperm 9510 // for 3-operation perms and use a constant pool load instead. 9511 if (Cost < 3) 9512 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 9513 } 9514 9515 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 9516 // vector that will get spilled to the constant pool. 9517 if (V2.isUndef()) V2 = V1; 9518 9519 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 9520 // that it is in input element units, not in bytes. Convert now. 9521 9522 // For little endian, the order of the input vectors is reversed, and 9523 // the permutation mask is complemented with respect to 31. This is 9524 // necessary to produce proper semantics with the big-endian-biased vperm 9525 // instruction. 9526 EVT EltVT = V1.getValueType().getVectorElementType(); 9527 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 9528 9529 SmallVector<SDValue, 16> ResultMask; 9530 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 9531 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 9532 9533 for (unsigned j = 0; j != BytesPerElement; ++j) 9534 if (isLittleEndian) 9535 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 9536 dl, MVT::i32)); 9537 else 9538 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 9539 MVT::i32)); 9540 } 9541 9542 ShufflesHandledWithVPERM++; 9543 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 9544 LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n"); 9545 LLVM_DEBUG(SVOp->dump()); 9546 LLVM_DEBUG(dbgs() << "With the following permute control vector:\n"); 9547 LLVM_DEBUG(VPermMask.dump()); 9548 9549 if (isLittleEndian) 9550 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9551 V2, V1, VPermMask); 9552 else 9553 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9554 V1, V2, VPermMask); 9555 } 9556 9557 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 9558 /// vector comparison. If it is, return true and fill in Opc/isDot with 9559 /// information about the intrinsic. 9560 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 9561 bool &isDot, const PPCSubtarget &Subtarget) { 9562 unsigned IntrinsicID = 9563 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 9564 CompareOpc = -1; 9565 isDot = false; 9566 switch (IntrinsicID) { 9567 default: 9568 return false; 9569 // Comparison predicates. 9570 case Intrinsic::ppc_altivec_vcmpbfp_p: 9571 CompareOpc = 966; 9572 isDot = true; 9573 break; 9574 case Intrinsic::ppc_altivec_vcmpeqfp_p: 9575 CompareOpc = 198; 9576 isDot = true; 9577 break; 9578 case Intrinsic::ppc_altivec_vcmpequb_p: 9579 CompareOpc = 6; 9580 isDot = true; 9581 break; 9582 case Intrinsic::ppc_altivec_vcmpequh_p: 9583 CompareOpc = 70; 9584 isDot = true; 9585 break; 9586 case Intrinsic::ppc_altivec_vcmpequw_p: 9587 CompareOpc = 134; 9588 isDot = true; 9589 break; 9590 case Intrinsic::ppc_altivec_vcmpequd_p: 9591 if (Subtarget.hasP8Altivec()) { 9592 CompareOpc = 199; 9593 isDot = true; 9594 } else 9595 return false; 9596 break; 9597 case Intrinsic::ppc_altivec_vcmpneb_p: 9598 case Intrinsic::ppc_altivec_vcmpneh_p: 9599 case Intrinsic::ppc_altivec_vcmpnew_p: 9600 case Intrinsic::ppc_altivec_vcmpnezb_p: 9601 case Intrinsic::ppc_altivec_vcmpnezh_p: 9602 case Intrinsic::ppc_altivec_vcmpnezw_p: 9603 if (Subtarget.hasP9Altivec()) { 9604 switch (IntrinsicID) { 9605 default: 9606 llvm_unreachable("Unknown comparison intrinsic."); 9607 case Intrinsic::ppc_altivec_vcmpneb_p: 9608 CompareOpc = 7; 9609 break; 9610 case Intrinsic::ppc_altivec_vcmpneh_p: 9611 CompareOpc = 71; 9612 break; 9613 case Intrinsic::ppc_altivec_vcmpnew_p: 9614 CompareOpc = 135; 9615 break; 9616 case Intrinsic::ppc_altivec_vcmpnezb_p: 9617 CompareOpc = 263; 9618 break; 9619 case Intrinsic::ppc_altivec_vcmpnezh_p: 9620 CompareOpc = 327; 9621 break; 9622 case Intrinsic::ppc_altivec_vcmpnezw_p: 9623 CompareOpc = 391; 9624 break; 9625 } 9626 isDot = true; 9627 } else 9628 return false; 9629 break; 9630 case Intrinsic::ppc_altivec_vcmpgefp_p: 9631 CompareOpc = 454; 9632 isDot = true; 9633 break; 9634 case Intrinsic::ppc_altivec_vcmpgtfp_p: 9635 CompareOpc = 710; 9636 isDot = true; 9637 break; 9638 case Intrinsic::ppc_altivec_vcmpgtsb_p: 9639 CompareOpc = 774; 9640 isDot = true; 9641 break; 9642 case Intrinsic::ppc_altivec_vcmpgtsh_p: 9643 CompareOpc = 838; 9644 isDot = true; 9645 break; 9646 case Intrinsic::ppc_altivec_vcmpgtsw_p: 9647 CompareOpc = 902; 9648 isDot = true; 9649 break; 9650 case Intrinsic::ppc_altivec_vcmpgtsd_p: 9651 if (Subtarget.hasP8Altivec()) { 9652 CompareOpc = 967; 9653 isDot = true; 9654 } else 9655 return false; 9656 break; 9657 case Intrinsic::ppc_altivec_vcmpgtub_p: 9658 CompareOpc = 518; 9659 isDot = true; 9660 break; 9661 case Intrinsic::ppc_altivec_vcmpgtuh_p: 9662 CompareOpc = 582; 9663 isDot = true; 9664 break; 9665 case Intrinsic::ppc_altivec_vcmpgtuw_p: 9666 CompareOpc = 646; 9667 isDot = true; 9668 break; 9669 case Intrinsic::ppc_altivec_vcmpgtud_p: 9670 if (Subtarget.hasP8Altivec()) { 9671 CompareOpc = 711; 9672 isDot = true; 9673 } else 9674 return false; 9675 break; 9676 9677 case Intrinsic::ppc_altivec_vcmpequq: 9678 case Intrinsic::ppc_altivec_vcmpgtsq: 9679 case Intrinsic::ppc_altivec_vcmpgtuq: 9680 if (!Subtarget.isISA3_1()) 9681 return false; 9682 switch (IntrinsicID) { 9683 default: 9684 llvm_unreachable("Unknown comparison intrinsic."); 9685 case Intrinsic::ppc_altivec_vcmpequq: 9686 CompareOpc = 455; 9687 break; 9688 case Intrinsic::ppc_altivec_vcmpgtsq: 9689 CompareOpc = 903; 9690 break; 9691 case Intrinsic::ppc_altivec_vcmpgtuq: 9692 CompareOpc = 647; 9693 break; 9694 } 9695 break; 9696 9697 // VSX predicate comparisons use the same infrastructure 9698 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9699 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9700 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9701 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9702 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9703 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9704 if (Subtarget.hasVSX()) { 9705 switch (IntrinsicID) { 9706 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9707 CompareOpc = 99; 9708 break; 9709 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9710 CompareOpc = 115; 9711 break; 9712 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9713 CompareOpc = 107; 9714 break; 9715 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9716 CompareOpc = 67; 9717 break; 9718 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9719 CompareOpc = 83; 9720 break; 9721 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9722 CompareOpc = 75; 9723 break; 9724 } 9725 isDot = true; 9726 } else 9727 return false; 9728 break; 9729 9730 // Normal Comparisons. 9731 case Intrinsic::ppc_altivec_vcmpbfp: 9732 CompareOpc = 966; 9733 break; 9734 case Intrinsic::ppc_altivec_vcmpeqfp: 9735 CompareOpc = 198; 9736 break; 9737 case Intrinsic::ppc_altivec_vcmpequb: 9738 CompareOpc = 6; 9739 break; 9740 case Intrinsic::ppc_altivec_vcmpequh: 9741 CompareOpc = 70; 9742 break; 9743 case Intrinsic::ppc_altivec_vcmpequw: 9744 CompareOpc = 134; 9745 break; 9746 case Intrinsic::ppc_altivec_vcmpequd: 9747 if (Subtarget.hasP8Altivec()) 9748 CompareOpc = 199; 9749 else 9750 return false; 9751 break; 9752 case Intrinsic::ppc_altivec_vcmpneb: 9753 case Intrinsic::ppc_altivec_vcmpneh: 9754 case Intrinsic::ppc_altivec_vcmpnew: 9755 case Intrinsic::ppc_altivec_vcmpnezb: 9756 case Intrinsic::ppc_altivec_vcmpnezh: 9757 case Intrinsic::ppc_altivec_vcmpnezw: 9758 if (Subtarget.hasP9Altivec()) 9759 switch (IntrinsicID) { 9760 default: 9761 llvm_unreachable("Unknown comparison intrinsic."); 9762 case Intrinsic::ppc_altivec_vcmpneb: 9763 CompareOpc = 7; 9764 break; 9765 case Intrinsic::ppc_altivec_vcmpneh: 9766 CompareOpc = 71; 9767 break; 9768 case Intrinsic::ppc_altivec_vcmpnew: 9769 CompareOpc = 135; 9770 break; 9771 case Intrinsic::ppc_altivec_vcmpnezb: 9772 CompareOpc = 263; 9773 break; 9774 case Intrinsic::ppc_altivec_vcmpnezh: 9775 CompareOpc = 327; 9776 break; 9777 case Intrinsic::ppc_altivec_vcmpnezw: 9778 CompareOpc = 391; 9779 break; 9780 } 9781 else 9782 return false; 9783 break; 9784 case Intrinsic::ppc_altivec_vcmpgefp: 9785 CompareOpc = 454; 9786 break; 9787 case Intrinsic::ppc_altivec_vcmpgtfp: 9788 CompareOpc = 710; 9789 break; 9790 case Intrinsic::ppc_altivec_vcmpgtsb: 9791 CompareOpc = 774; 9792 break; 9793 case Intrinsic::ppc_altivec_vcmpgtsh: 9794 CompareOpc = 838; 9795 break; 9796 case Intrinsic::ppc_altivec_vcmpgtsw: 9797 CompareOpc = 902; 9798 break; 9799 case Intrinsic::ppc_altivec_vcmpgtsd: 9800 if (Subtarget.hasP8Altivec()) 9801 CompareOpc = 967; 9802 else 9803 return false; 9804 break; 9805 case Intrinsic::ppc_altivec_vcmpgtub: 9806 CompareOpc = 518; 9807 break; 9808 case Intrinsic::ppc_altivec_vcmpgtuh: 9809 CompareOpc = 582; 9810 break; 9811 case Intrinsic::ppc_altivec_vcmpgtuw: 9812 CompareOpc = 646; 9813 break; 9814 case Intrinsic::ppc_altivec_vcmpgtud: 9815 if (Subtarget.hasP8Altivec()) 9816 CompareOpc = 711; 9817 else 9818 return false; 9819 break; 9820 case Intrinsic::ppc_altivec_vcmpequq_p: 9821 case Intrinsic::ppc_altivec_vcmpgtsq_p: 9822 case Intrinsic::ppc_altivec_vcmpgtuq_p: 9823 if (!Subtarget.isISA3_1()) 9824 return false; 9825 switch (IntrinsicID) { 9826 default: 9827 llvm_unreachable("Unknown comparison intrinsic."); 9828 case Intrinsic::ppc_altivec_vcmpequq_p: 9829 CompareOpc = 455; 9830 break; 9831 case Intrinsic::ppc_altivec_vcmpgtsq_p: 9832 CompareOpc = 903; 9833 break; 9834 case Intrinsic::ppc_altivec_vcmpgtuq_p: 9835 CompareOpc = 647; 9836 break; 9837 } 9838 isDot = true; 9839 break; 9840 } 9841 return true; 9842 } 9843 9844 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 9845 /// lower, do it, otherwise return null. 9846 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 9847 SelectionDAG &DAG) const { 9848 unsigned IntrinsicID = 9849 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9850 9851 SDLoc dl(Op); 9852 9853 switch (IntrinsicID) { 9854 case Intrinsic::thread_pointer: 9855 // Reads the thread pointer register, used for __builtin_thread_pointer. 9856 if (Subtarget.isPPC64()) 9857 return DAG.getRegister(PPC::X13, MVT::i64); 9858 return DAG.getRegister(PPC::R2, MVT::i32); 9859 9860 case Intrinsic::ppc_mma_disassemble_acc: 9861 case Intrinsic::ppc_vsx_disassemble_pair: { 9862 int NumVecs = 2; 9863 SDValue WideVec = Op.getOperand(1); 9864 if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) { 9865 NumVecs = 4; 9866 WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec); 9867 } 9868 SmallVector<SDValue, 4> RetOps; 9869 for (int VecNo = 0; VecNo < NumVecs; VecNo++) { 9870 SDValue Extract = DAG.getNode( 9871 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec, 9872 DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo 9873 : VecNo, 9874 dl, MVT::i64)); 9875 RetOps.push_back(Extract); 9876 } 9877 return DAG.getMergeValues(RetOps, dl); 9878 } 9879 } 9880 9881 // If this is a lowered altivec predicate compare, CompareOpc is set to the 9882 // opcode number of the comparison. 9883 int CompareOpc; 9884 bool isDot; 9885 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 9886 return SDValue(); // Don't custom lower most intrinsics. 9887 9888 // If this is a non-dot comparison, make the VCMP node and we are done. 9889 if (!isDot) { 9890 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 9891 Op.getOperand(1), Op.getOperand(2), 9892 DAG.getConstant(CompareOpc, dl, MVT::i32)); 9893 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 9894 } 9895 9896 // Create the PPCISD altivec 'dot' comparison node. 9897 SDValue Ops[] = { 9898 Op.getOperand(2), // LHS 9899 Op.getOperand(3), // RHS 9900 DAG.getConstant(CompareOpc, dl, MVT::i32) 9901 }; 9902 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 9903 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops); 9904 9905 // Now that we have the comparison, emit a copy from the CR to a GPR. 9906 // This is flagged to the above dot comparison. 9907 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 9908 DAG.getRegister(PPC::CR6, MVT::i32), 9909 CompNode.getValue(1)); 9910 9911 // Unpack the result based on how the target uses it. 9912 unsigned BitNo; // Bit # of CR6. 9913 bool InvertBit; // Invert result? 9914 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 9915 default: // Can't happen, don't crash on invalid number though. 9916 case 0: // Return the value of the EQ bit of CR6. 9917 BitNo = 0; InvertBit = false; 9918 break; 9919 case 1: // Return the inverted value of the EQ bit of CR6. 9920 BitNo = 0; InvertBit = true; 9921 break; 9922 case 2: // Return the value of the LT bit of CR6. 9923 BitNo = 2; InvertBit = false; 9924 break; 9925 case 3: // Return the inverted value of the LT bit of CR6. 9926 BitNo = 2; InvertBit = true; 9927 break; 9928 } 9929 9930 // Shift the bit into the low position. 9931 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 9932 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 9933 // Isolate the bit. 9934 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 9935 DAG.getConstant(1, dl, MVT::i32)); 9936 9937 // If we are supposed to, toggle the bit. 9938 if (InvertBit) 9939 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 9940 DAG.getConstant(1, dl, MVT::i32)); 9941 return Flags; 9942 } 9943 9944 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 9945 SelectionDAG &DAG) const { 9946 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 9947 // the beginning of the argument list. 9948 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 9949 SDLoc DL(Op); 9950 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 9951 case Intrinsic::ppc_cfence: { 9952 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 9953 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 9954 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 9955 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 9956 Op.getOperand(ArgStart + 1)), 9957 Op.getOperand(0)), 9958 0); 9959 } 9960 default: 9961 break; 9962 } 9963 return SDValue(); 9964 } 9965 9966 // Lower scalar BSWAP64 to xxbrd. 9967 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 9968 SDLoc dl(Op); 9969 // MTVSRDD 9970 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 9971 Op.getOperand(0)); 9972 // XXBRD 9973 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op); 9974 // MFVSRD 9975 int VectorIndex = 0; 9976 if (Subtarget.isLittleEndian()) 9977 VectorIndex = 1; 9978 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 9979 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 9980 return Op; 9981 } 9982 9983 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 9984 // compared to a value that is atomically loaded (atomic loads zero-extend). 9985 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 9986 SelectionDAG &DAG) const { 9987 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 9988 "Expecting an atomic compare-and-swap here."); 9989 SDLoc dl(Op); 9990 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 9991 EVT MemVT = AtomicNode->getMemoryVT(); 9992 if (MemVT.getSizeInBits() >= 32) 9993 return Op; 9994 9995 SDValue CmpOp = Op.getOperand(2); 9996 // If this is already correctly zero-extended, leave it alone. 9997 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 9998 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 9999 return Op; 10000 10001 // Clear the high bits of the compare operand. 10002 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 10003 SDValue NewCmpOp = 10004 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 10005 DAG.getConstant(MaskVal, dl, MVT::i32)); 10006 10007 // Replace the existing compare operand with the properly zero-extended one. 10008 SmallVector<SDValue, 4> Ops; 10009 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 10010 Ops.push_back(AtomicNode->getOperand(i)); 10011 Ops[2] = NewCmpOp; 10012 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 10013 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 10014 auto NodeTy = 10015 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 10016 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 10017 } 10018 10019 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 10020 SelectionDAG &DAG) const { 10021 SDLoc dl(Op); 10022 // Create a stack slot that is 16-byte aligned. 10023 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10024 int FrameIdx = MFI.CreateStackObject(16, Align(16), false); 10025 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10026 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 10027 10028 // Store the input value into Value#0 of the stack slot. 10029 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 10030 MachinePointerInfo()); 10031 // Load it out. 10032 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 10033 } 10034 10035 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 10036 SelectionDAG &DAG) const { 10037 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 10038 "Should only be called for ISD::INSERT_VECTOR_ELT"); 10039 10040 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 10041 // We have legal lowering for constant indices but not for variable ones. 10042 if (!C) 10043 return SDValue(); 10044 10045 EVT VT = Op.getValueType(); 10046 SDLoc dl(Op); 10047 SDValue V1 = Op.getOperand(0); 10048 SDValue V2 = Op.getOperand(1); 10049 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 10050 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 10051 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 10052 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 10053 unsigned InsertAtElement = C->getZExtValue(); 10054 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 10055 if (Subtarget.isLittleEndian()) { 10056 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 10057 } 10058 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 10059 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 10060 } 10061 return Op; 10062 } 10063 10064 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 10065 SelectionDAG &DAG) const { 10066 SDLoc dl(Op); 10067 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 10068 SDValue LoadChain = LN->getChain(); 10069 SDValue BasePtr = LN->getBasePtr(); 10070 EVT VT = Op.getValueType(); 10071 10072 if (VT != MVT::v256i1 && VT != MVT::v512i1) 10073 return Op; 10074 10075 // Type v256i1 is used for pairs and v512i1 is used for accumulators. 10076 // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in 10077 // 2 or 4 vsx registers. 10078 assert((VT != MVT::v512i1 || Subtarget.hasMMA()) && 10079 "Type unsupported without MMA"); 10080 assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && 10081 "Type unsupported without paired vector support"); 10082 Align Alignment = LN->getAlign(); 10083 SmallVector<SDValue, 4> Loads; 10084 SmallVector<SDValue, 4> LoadChains; 10085 unsigned NumVecs = VT.getSizeInBits() / 128; 10086 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) { 10087 SDValue Load = 10088 DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr, 10089 LN->getPointerInfo().getWithOffset(Idx * 16), 10090 commonAlignment(Alignment, Idx * 16), 10091 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10092 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10093 DAG.getConstant(16, dl, BasePtr.getValueType())); 10094 Loads.push_back(Load); 10095 LoadChains.push_back(Load.getValue(1)); 10096 } 10097 if (Subtarget.isLittleEndian()) { 10098 std::reverse(Loads.begin(), Loads.end()); 10099 std::reverse(LoadChains.begin(), LoadChains.end()); 10100 } 10101 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 10102 SDValue Value = 10103 DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD, 10104 dl, VT, Loads); 10105 SDValue RetOps[] = {Value, TF}; 10106 return DAG.getMergeValues(RetOps, dl); 10107 } 10108 10109 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 10110 SelectionDAG &DAG) const { 10111 SDLoc dl(Op); 10112 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 10113 SDValue StoreChain = SN->getChain(); 10114 SDValue BasePtr = SN->getBasePtr(); 10115 SDValue Value = SN->getValue(); 10116 EVT StoreVT = Value.getValueType(); 10117 10118 if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1) 10119 return Op; 10120 10121 // Type v256i1 is used for pairs and v512i1 is used for accumulators. 10122 // Here we create 2 or 4 v16i8 stores to store the pair or accumulator 10123 // underlying registers individually. 10124 assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) && 10125 "Type unsupported without MMA"); 10126 assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && 10127 "Type unsupported without paired vector support"); 10128 Align Alignment = SN->getAlign(); 10129 SmallVector<SDValue, 4> Stores; 10130 unsigned NumVecs = 2; 10131 if (StoreVT == MVT::v512i1) { 10132 Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value); 10133 NumVecs = 4; 10134 } 10135 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) { 10136 unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx; 10137 SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value, 10138 DAG.getConstant(VecNum, dl, MVT::i64)); 10139 SDValue Store = 10140 DAG.getStore(StoreChain, dl, Elt, BasePtr, 10141 SN->getPointerInfo().getWithOffset(Idx * 16), 10142 commonAlignment(Alignment, Idx * 16), 10143 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 10144 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10145 DAG.getConstant(16, dl, BasePtr.getValueType())); 10146 Stores.push_back(Store); 10147 } 10148 SDValue TF = DAG.getTokenFactor(dl, Stores); 10149 return TF; 10150 } 10151 10152 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 10153 SDLoc dl(Op); 10154 if (Op.getValueType() == MVT::v4i32) { 10155 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10156 10157 SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl); 10158 // +16 as shift amt. 10159 SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl); 10160 SDValue RHSSwap = // = vrlw RHS, 16 10161 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 10162 10163 // Shrinkify inputs to v8i16. 10164 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 10165 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 10166 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 10167 10168 // Low parts multiplied together, generating 32-bit results (we ignore the 10169 // top parts). 10170 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 10171 LHS, RHS, DAG, dl, MVT::v4i32); 10172 10173 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 10174 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 10175 // Shift the high parts up 16 bits. 10176 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 10177 Neg16, DAG, dl); 10178 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 10179 } else if (Op.getValueType() == MVT::v16i8) { 10180 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10181 bool isLittleEndian = Subtarget.isLittleEndian(); 10182 10183 // Multiply the even 8-bit parts, producing 16-bit sums. 10184 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 10185 LHS, RHS, DAG, dl, MVT::v8i16); 10186 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 10187 10188 // Multiply the odd 8-bit parts, producing 16-bit sums. 10189 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 10190 LHS, RHS, DAG, dl, MVT::v8i16); 10191 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 10192 10193 // Merge the results together. Because vmuleub and vmuloub are 10194 // instructions with a big-endian bias, we must reverse the 10195 // element numbering and reverse the meaning of "odd" and "even" 10196 // when generating little endian code. 10197 int Ops[16]; 10198 for (unsigned i = 0; i != 8; ++i) { 10199 if (isLittleEndian) { 10200 Ops[i*2 ] = 2*i; 10201 Ops[i*2+1] = 2*i+16; 10202 } else { 10203 Ops[i*2 ] = 2*i+1; 10204 Ops[i*2+1] = 2*i+1+16; 10205 } 10206 } 10207 if (isLittleEndian) 10208 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 10209 else 10210 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 10211 } else { 10212 llvm_unreachable("Unknown mul to lower!"); 10213 } 10214 } 10215 10216 SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 10217 bool IsStrict = Op->isStrictFPOpcode(); 10218 if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 && 10219 !Subtarget.hasP9Vector()) 10220 return SDValue(); 10221 10222 return Op; 10223 } 10224 10225 // Custom lowering for fpext vf32 to v2f64 10226 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 10227 10228 assert(Op.getOpcode() == ISD::FP_EXTEND && 10229 "Should only be called for ISD::FP_EXTEND"); 10230 10231 // FIXME: handle extends from half precision float vectors on P9. 10232 // We only want to custom lower an extend from v2f32 to v2f64. 10233 if (Op.getValueType() != MVT::v2f64 || 10234 Op.getOperand(0).getValueType() != MVT::v2f32) 10235 return SDValue(); 10236 10237 SDLoc dl(Op); 10238 SDValue Op0 = Op.getOperand(0); 10239 10240 switch (Op0.getOpcode()) { 10241 default: 10242 return SDValue(); 10243 case ISD::EXTRACT_SUBVECTOR: { 10244 assert(Op0.getNumOperands() == 2 && 10245 isa<ConstantSDNode>(Op0->getOperand(1)) && 10246 "Node should have 2 operands with second one being a constant!"); 10247 10248 if (Op0.getOperand(0).getValueType() != MVT::v4f32) 10249 return SDValue(); 10250 10251 // Custom lower is only done for high or low doubleword. 10252 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue(); 10253 if (Idx % 2 != 0) 10254 return SDValue(); 10255 10256 // Since input is v4f32, at this point Idx is either 0 or 2. 10257 // Shift to get the doubleword position we want. 10258 int DWord = Idx >> 1; 10259 10260 // High and low word positions are different on little endian. 10261 if (Subtarget.isLittleEndian()) 10262 DWord ^= 0x1; 10263 10264 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, 10265 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32)); 10266 } 10267 case ISD::FADD: 10268 case ISD::FMUL: 10269 case ISD::FSUB: { 10270 SDValue NewLoad[2]; 10271 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) { 10272 // Ensure both input are loads. 10273 SDValue LdOp = Op0.getOperand(i); 10274 if (LdOp.getOpcode() != ISD::LOAD) 10275 return SDValue(); 10276 // Generate new load node. 10277 LoadSDNode *LD = cast<LoadSDNode>(LdOp); 10278 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10279 NewLoad[i] = DAG.getMemIntrinsicNode( 10280 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10281 LD->getMemoryVT(), LD->getMemOperand()); 10282 } 10283 SDValue NewOp = 10284 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0], 10285 NewLoad[1], Op0.getNode()->getFlags()); 10286 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp, 10287 DAG.getConstant(0, dl, MVT::i32)); 10288 } 10289 case ISD::LOAD: { 10290 LoadSDNode *LD = cast<LoadSDNode>(Op0); 10291 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10292 SDValue NewLd = DAG.getMemIntrinsicNode( 10293 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10294 LD->getMemoryVT(), LD->getMemOperand()); 10295 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd, 10296 DAG.getConstant(0, dl, MVT::i32)); 10297 } 10298 } 10299 llvm_unreachable("ERROR:Should return for all cases within swtich."); 10300 } 10301 10302 /// LowerOperation - Provide custom lowering hooks for some operations. 10303 /// 10304 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10305 switch (Op.getOpcode()) { 10306 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 10307 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10308 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10309 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10310 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10311 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 10312 case ISD::SETCC: return LowerSETCC(Op, DAG); 10313 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 10314 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 10315 10316 // Variable argument lowering. 10317 case ISD::VASTART: return LowerVASTART(Op, DAG); 10318 case ISD::VAARG: return LowerVAARG(Op, DAG); 10319 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 10320 10321 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 10322 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 10323 case ISD::GET_DYNAMIC_AREA_OFFSET: 10324 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 10325 10326 // Exception handling lowering. 10327 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 10328 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 10329 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 10330 10331 case ISD::LOAD: return LowerLOAD(Op, DAG); 10332 case ISD::STORE: return LowerSTORE(Op, DAG); 10333 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 10334 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 10335 case ISD::STRICT_FP_TO_UINT: 10336 case ISD::STRICT_FP_TO_SINT: 10337 case ISD::FP_TO_UINT: 10338 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 10339 case ISD::STRICT_UINT_TO_FP: 10340 case ISD::STRICT_SINT_TO_FP: 10341 case ISD::UINT_TO_FP: 10342 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 10343 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 10344 10345 // Lower 64-bit shifts. 10346 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 10347 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 10348 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 10349 10350 case ISD::FSHL: return LowerFunnelShift(Op, DAG); 10351 case ISD::FSHR: return LowerFunnelShift(Op, DAG); 10352 10353 // Vector-related lowering. 10354 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10355 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10356 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 10357 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10358 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10359 case ISD::MUL: return LowerMUL(Op, DAG); 10360 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 10361 case ISD::STRICT_FP_ROUND: 10362 case ISD::FP_ROUND: 10363 return LowerFP_ROUND(Op, DAG); 10364 case ISD::ROTL: return LowerROTL(Op, DAG); 10365 10366 // For counter-based loop handling. 10367 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 10368 10369 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 10370 10371 // Frame & Return address. 10372 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 10373 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 10374 10375 case ISD::INTRINSIC_VOID: 10376 return LowerINTRINSIC_VOID(Op, DAG); 10377 case ISD::BSWAP: 10378 return LowerBSWAP(Op, DAG); 10379 case ISD::ATOMIC_CMP_SWAP: 10380 return LowerATOMIC_CMP_SWAP(Op, DAG); 10381 } 10382 } 10383 10384 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 10385 SmallVectorImpl<SDValue>&Results, 10386 SelectionDAG &DAG) const { 10387 SDLoc dl(N); 10388 switch (N->getOpcode()) { 10389 default: 10390 llvm_unreachable("Do not know how to custom type legalize this operation!"); 10391 case ISD::READCYCLECOUNTER: { 10392 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 10393 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 10394 10395 Results.push_back( 10396 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1))); 10397 Results.push_back(RTB.getValue(2)); 10398 break; 10399 } 10400 case ISD::INTRINSIC_W_CHAIN: { 10401 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 10402 Intrinsic::loop_decrement) 10403 break; 10404 10405 assert(N->getValueType(0) == MVT::i1 && 10406 "Unexpected result type for CTR decrement intrinsic"); 10407 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 10408 N->getValueType(0)); 10409 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 10410 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 10411 N->getOperand(1)); 10412 10413 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 10414 Results.push_back(NewInt.getValue(1)); 10415 break; 10416 } 10417 case ISD::VAARG: { 10418 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 10419 return; 10420 10421 EVT VT = N->getValueType(0); 10422 10423 if (VT == MVT::i64) { 10424 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 10425 10426 Results.push_back(NewNode); 10427 Results.push_back(NewNode.getValue(1)); 10428 } 10429 return; 10430 } 10431 case ISD::STRICT_FP_TO_SINT: 10432 case ISD::STRICT_FP_TO_UINT: 10433 case ISD::FP_TO_SINT: 10434 case ISD::FP_TO_UINT: 10435 // LowerFP_TO_INT() can only handle f32 and f64. 10436 if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() == 10437 MVT::ppcf128) 10438 return; 10439 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 10440 return; 10441 case ISD::TRUNCATE: { 10442 if (!N->getValueType(0).isVector()) 10443 return; 10444 SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG); 10445 if (Lowered) 10446 Results.push_back(Lowered); 10447 return; 10448 } 10449 case ISD::FSHL: 10450 case ISD::FSHR: 10451 // Don't handle funnel shifts here. 10452 return; 10453 case ISD::BITCAST: 10454 // Don't handle bitcast here. 10455 return; 10456 case ISD::FP_EXTEND: 10457 SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG); 10458 if (Lowered) 10459 Results.push_back(Lowered); 10460 return; 10461 } 10462 } 10463 10464 //===----------------------------------------------------------------------===// 10465 // Other Lowering Code 10466 //===----------------------------------------------------------------------===// 10467 10468 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 10469 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 10470 Function *Func = Intrinsic::getDeclaration(M, Id); 10471 return Builder.CreateCall(Func, {}); 10472 } 10473 10474 // The mappings for emitLeading/TrailingFence is taken from 10475 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 10476 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 10477 Instruction *Inst, 10478 AtomicOrdering Ord) const { 10479 if (Ord == AtomicOrdering::SequentiallyConsistent) 10480 return callIntrinsic(Builder, Intrinsic::ppc_sync); 10481 if (isReleaseOrStronger(Ord)) 10482 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10483 return nullptr; 10484 } 10485 10486 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 10487 Instruction *Inst, 10488 AtomicOrdering Ord) const { 10489 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 10490 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 10491 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 10492 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 10493 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 10494 return Builder.CreateCall( 10495 Intrinsic::getDeclaration( 10496 Builder.GetInsertBlock()->getParent()->getParent(), 10497 Intrinsic::ppc_cfence, {Inst->getType()}), 10498 {Inst}); 10499 // FIXME: Can use isync for rmw operation. 10500 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10501 } 10502 return nullptr; 10503 } 10504 10505 MachineBasicBlock * 10506 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 10507 unsigned AtomicSize, 10508 unsigned BinOpcode, 10509 unsigned CmpOpcode, 10510 unsigned CmpPred) const { 10511 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10512 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10513 10514 auto LoadMnemonic = PPC::LDARX; 10515 auto StoreMnemonic = PPC::STDCX; 10516 switch (AtomicSize) { 10517 default: 10518 llvm_unreachable("Unexpected size of atomic entity"); 10519 case 1: 10520 LoadMnemonic = PPC::LBARX; 10521 StoreMnemonic = PPC::STBCX; 10522 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10523 break; 10524 case 2: 10525 LoadMnemonic = PPC::LHARX; 10526 StoreMnemonic = PPC::STHCX; 10527 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10528 break; 10529 case 4: 10530 LoadMnemonic = PPC::LWARX; 10531 StoreMnemonic = PPC::STWCX; 10532 break; 10533 case 8: 10534 LoadMnemonic = PPC::LDARX; 10535 StoreMnemonic = PPC::STDCX; 10536 break; 10537 } 10538 10539 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10540 MachineFunction *F = BB->getParent(); 10541 MachineFunction::iterator It = ++BB->getIterator(); 10542 10543 Register dest = MI.getOperand(0).getReg(); 10544 Register ptrA = MI.getOperand(1).getReg(); 10545 Register ptrB = MI.getOperand(2).getReg(); 10546 Register incr = MI.getOperand(3).getReg(); 10547 DebugLoc dl = MI.getDebugLoc(); 10548 10549 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10550 MachineBasicBlock *loop2MBB = 10551 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10552 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10553 F->insert(It, loopMBB); 10554 if (CmpOpcode) 10555 F->insert(It, loop2MBB); 10556 F->insert(It, exitMBB); 10557 exitMBB->splice(exitMBB->begin(), BB, 10558 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10559 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10560 10561 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10562 Register TmpReg = (!BinOpcode) ? incr : 10563 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 10564 : &PPC::GPRCRegClass); 10565 10566 // thisMBB: 10567 // ... 10568 // fallthrough --> loopMBB 10569 BB->addSuccessor(loopMBB); 10570 10571 // loopMBB: 10572 // l[wd]arx dest, ptr 10573 // add r0, dest, incr 10574 // st[wd]cx. r0, ptr 10575 // bne- loopMBB 10576 // fallthrough --> exitMBB 10577 10578 // For max/min... 10579 // loopMBB: 10580 // l[wd]arx dest, ptr 10581 // cmpl?[wd] incr, dest 10582 // bgt exitMBB 10583 // loop2MBB: 10584 // st[wd]cx. dest, ptr 10585 // bne- loopMBB 10586 // fallthrough --> exitMBB 10587 10588 BB = loopMBB; 10589 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10590 .addReg(ptrA).addReg(ptrB); 10591 if (BinOpcode) 10592 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 10593 if (CmpOpcode) { 10594 // Signed comparisons of byte or halfword values must be sign-extended. 10595 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 10596 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10597 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 10598 ExtReg).addReg(dest); 10599 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10600 .addReg(incr).addReg(ExtReg); 10601 } else 10602 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10603 .addReg(incr).addReg(dest); 10604 10605 BuildMI(BB, dl, TII->get(PPC::BCC)) 10606 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 10607 BB->addSuccessor(loop2MBB); 10608 BB->addSuccessor(exitMBB); 10609 BB = loop2MBB; 10610 } 10611 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10612 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 10613 BuildMI(BB, dl, TII->get(PPC::BCC)) 10614 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 10615 BB->addSuccessor(loopMBB); 10616 BB->addSuccessor(exitMBB); 10617 10618 // exitMBB: 10619 // ... 10620 BB = exitMBB; 10621 return BB; 10622 } 10623 10624 static bool isSignExtended(MachineInstr &MI, const PPCInstrInfo *TII) { 10625 switch(MI.getOpcode()) { 10626 default: 10627 return false; 10628 case PPC::COPY: 10629 return TII->isSignExtended(MI); 10630 case PPC::LHA: 10631 case PPC::LHA8: 10632 case PPC::LHAU: 10633 case PPC::LHAU8: 10634 case PPC::LHAUX: 10635 case PPC::LHAUX8: 10636 case PPC::LHAX: 10637 case PPC::LHAX8: 10638 case PPC::LWA: 10639 case PPC::LWAUX: 10640 case PPC::LWAX: 10641 case PPC::LWAX_32: 10642 case PPC::LWA_32: 10643 case PPC::PLHA: 10644 case PPC::PLHA8: 10645 case PPC::PLHA8pc: 10646 case PPC::PLHApc: 10647 case PPC::PLWA: 10648 case PPC::PLWA8: 10649 case PPC::PLWA8pc: 10650 case PPC::PLWApc: 10651 case PPC::EXTSB: 10652 case PPC::EXTSB8: 10653 case PPC::EXTSB8_32_64: 10654 case PPC::EXTSB8_rec: 10655 case PPC::EXTSB_rec: 10656 case PPC::EXTSH: 10657 case PPC::EXTSH8: 10658 case PPC::EXTSH8_32_64: 10659 case PPC::EXTSH8_rec: 10660 case PPC::EXTSH_rec: 10661 case PPC::EXTSW: 10662 case PPC::EXTSWSLI: 10663 case PPC::EXTSWSLI_32_64: 10664 case PPC::EXTSWSLI_32_64_rec: 10665 case PPC::EXTSWSLI_rec: 10666 case PPC::EXTSW_32: 10667 case PPC::EXTSW_32_64: 10668 case PPC::EXTSW_32_64_rec: 10669 case PPC::EXTSW_rec: 10670 case PPC::SRAW: 10671 case PPC::SRAWI: 10672 case PPC::SRAWI_rec: 10673 case PPC::SRAW_rec: 10674 return true; 10675 } 10676 return false; 10677 } 10678 10679 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( 10680 MachineInstr &MI, MachineBasicBlock *BB, 10681 bool is8bit, // operation 10682 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const { 10683 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10684 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 10685 10686 // If this is a signed comparison and the value being compared is not known 10687 // to be sign extended, sign extend it here. 10688 DebugLoc dl = MI.getDebugLoc(); 10689 MachineFunction *F = BB->getParent(); 10690 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10691 Register incr = MI.getOperand(3).getReg(); 10692 bool IsSignExtended = Register::isVirtualRegister(incr) && 10693 isSignExtended(*RegInfo.getVRegDef(incr), TII); 10694 10695 if (CmpOpcode == PPC::CMPW && !IsSignExtended) { 10696 Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10697 BuildMI(*BB, MI, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg) 10698 .addReg(MI.getOperand(3).getReg()); 10699 MI.getOperand(3).setReg(ValueReg); 10700 } 10701 // If we support part-word atomic mnemonics, just use them 10702 if (Subtarget.hasPartwordAtomics()) 10703 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode, 10704 CmpPred); 10705 10706 // In 64 bit mode we have to use 64 bits for addresses, even though the 10707 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 10708 // registers without caring whether they're 32 or 64, but here we're 10709 // doing actual arithmetic on the addresses. 10710 bool is64bit = Subtarget.isPPC64(); 10711 bool isLittleEndian = Subtarget.isLittleEndian(); 10712 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10713 10714 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10715 MachineFunction::iterator It = ++BB->getIterator(); 10716 10717 Register dest = MI.getOperand(0).getReg(); 10718 Register ptrA = MI.getOperand(1).getReg(); 10719 Register ptrB = MI.getOperand(2).getReg(); 10720 10721 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10722 MachineBasicBlock *loop2MBB = 10723 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10724 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10725 F->insert(It, loopMBB); 10726 if (CmpOpcode) 10727 F->insert(It, loop2MBB); 10728 F->insert(It, exitMBB); 10729 exitMBB->splice(exitMBB->begin(), BB, 10730 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10731 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10732 10733 const TargetRegisterClass *RC = 10734 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10735 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 10736 10737 Register PtrReg = RegInfo.createVirtualRegister(RC); 10738 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 10739 Register ShiftReg = 10740 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 10741 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC); 10742 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 10743 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 10744 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 10745 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 10746 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC); 10747 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 10748 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 10749 Register Ptr1Reg; 10750 Register TmpReg = 10751 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC); 10752 10753 // thisMBB: 10754 // ... 10755 // fallthrough --> loopMBB 10756 BB->addSuccessor(loopMBB); 10757 10758 // The 4-byte load must be aligned, while a char or short may be 10759 // anywhere in the word. Hence all this nasty bookkeeping code. 10760 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10761 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10762 // xori shift, shift1, 24 [16] 10763 // rlwinm ptr, ptr1, 0, 0, 29 10764 // slw incr2, incr, shift 10765 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10766 // slw mask, mask2, shift 10767 // loopMBB: 10768 // lwarx tmpDest, ptr 10769 // add tmp, tmpDest, incr2 10770 // andc tmp2, tmpDest, mask 10771 // and tmp3, tmp, mask 10772 // or tmp4, tmp3, tmp2 10773 // stwcx. tmp4, ptr 10774 // bne- loopMBB 10775 // fallthrough --> exitMBB 10776 // srw dest, tmpDest, shift 10777 if (ptrA != ZeroReg) { 10778 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10779 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10780 .addReg(ptrA) 10781 .addReg(ptrB); 10782 } else { 10783 Ptr1Reg = ptrB; 10784 } 10785 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 10786 // mode. 10787 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 10788 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 10789 .addImm(3) 10790 .addImm(27) 10791 .addImm(is8bit ? 28 : 27); 10792 if (!isLittleEndian) 10793 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 10794 .addReg(Shift1Reg) 10795 .addImm(is8bit ? 24 : 16); 10796 if (is64bit) 10797 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10798 .addReg(Ptr1Reg) 10799 .addImm(0) 10800 .addImm(61); 10801 else 10802 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10803 .addReg(Ptr1Reg) 10804 .addImm(0) 10805 .addImm(0) 10806 .addImm(29); 10807 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg); 10808 if (is8bit) 10809 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10810 else { 10811 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10812 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10813 .addReg(Mask3Reg) 10814 .addImm(65535); 10815 } 10816 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10817 .addReg(Mask2Reg) 10818 .addReg(ShiftReg); 10819 10820 BB = loopMBB; 10821 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10822 .addReg(ZeroReg) 10823 .addReg(PtrReg); 10824 if (BinOpcode) 10825 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 10826 .addReg(Incr2Reg) 10827 .addReg(TmpDestReg); 10828 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 10829 .addReg(TmpDestReg) 10830 .addReg(MaskReg); 10831 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg); 10832 if (CmpOpcode) { 10833 // For unsigned comparisons, we can directly compare the shifted values. 10834 // For signed comparisons we shift and sign extend. 10835 Register SReg = RegInfo.createVirtualRegister(GPRC); 10836 BuildMI(BB, dl, TII->get(PPC::AND), SReg) 10837 .addReg(TmpDestReg) 10838 .addReg(MaskReg); 10839 unsigned ValueReg = SReg; 10840 unsigned CmpReg = Incr2Reg; 10841 if (CmpOpcode == PPC::CMPW) { 10842 ValueReg = RegInfo.createVirtualRegister(GPRC); 10843 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 10844 .addReg(SReg) 10845 .addReg(ShiftReg); 10846 Register ValueSReg = RegInfo.createVirtualRegister(GPRC); 10847 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 10848 .addReg(ValueReg); 10849 ValueReg = ValueSReg; 10850 CmpReg = incr; 10851 } 10852 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10853 .addReg(CmpReg) 10854 .addReg(ValueReg); 10855 BuildMI(BB, dl, TII->get(PPC::BCC)) 10856 .addImm(CmpPred) 10857 .addReg(PPC::CR0) 10858 .addMBB(exitMBB); 10859 BB->addSuccessor(loop2MBB); 10860 BB->addSuccessor(exitMBB); 10861 BB = loop2MBB; 10862 } 10863 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg); 10864 BuildMI(BB, dl, TII->get(PPC::STWCX)) 10865 .addReg(Tmp4Reg) 10866 .addReg(ZeroReg) 10867 .addReg(PtrReg); 10868 BuildMI(BB, dl, TII->get(PPC::BCC)) 10869 .addImm(PPC::PRED_NE) 10870 .addReg(PPC::CR0) 10871 .addMBB(loopMBB); 10872 BB->addSuccessor(loopMBB); 10873 BB->addSuccessor(exitMBB); 10874 10875 // exitMBB: 10876 // ... 10877 BB = exitMBB; 10878 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 10879 .addReg(TmpDestReg) 10880 .addReg(ShiftReg); 10881 return BB; 10882 } 10883 10884 llvm::MachineBasicBlock * 10885 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 10886 MachineBasicBlock *MBB) const { 10887 DebugLoc DL = MI.getDebugLoc(); 10888 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10889 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 10890 10891 MachineFunction *MF = MBB->getParent(); 10892 MachineRegisterInfo &MRI = MF->getRegInfo(); 10893 10894 const BasicBlock *BB = MBB->getBasicBlock(); 10895 MachineFunction::iterator I = ++MBB->getIterator(); 10896 10897 Register DstReg = MI.getOperand(0).getReg(); 10898 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 10899 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 10900 Register mainDstReg = MRI.createVirtualRegister(RC); 10901 Register restoreDstReg = MRI.createVirtualRegister(RC); 10902 10903 MVT PVT = getPointerTy(MF->getDataLayout()); 10904 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10905 "Invalid Pointer Size!"); 10906 // For v = setjmp(buf), we generate 10907 // 10908 // thisMBB: 10909 // SjLjSetup mainMBB 10910 // bl mainMBB 10911 // v_restore = 1 10912 // b sinkMBB 10913 // 10914 // mainMBB: 10915 // buf[LabelOffset] = LR 10916 // v_main = 0 10917 // 10918 // sinkMBB: 10919 // v = phi(main, restore) 10920 // 10921 10922 MachineBasicBlock *thisMBB = MBB; 10923 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 10924 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 10925 MF->insert(I, mainMBB); 10926 MF->insert(I, sinkMBB); 10927 10928 MachineInstrBuilder MIB; 10929 10930 // Transfer the remainder of BB and its successor edges to sinkMBB. 10931 sinkMBB->splice(sinkMBB->begin(), MBB, 10932 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 10933 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 10934 10935 // Note that the structure of the jmp_buf used here is not compatible 10936 // with that used by libc, and is not designed to be. Specifically, it 10937 // stores only those 'reserved' registers that LLVM does not otherwise 10938 // understand how to spill. Also, by convention, by the time this 10939 // intrinsic is called, Clang has already stored the frame address in the 10940 // first slot of the buffer and stack address in the third. Following the 10941 // X86 target code, we'll store the jump address in the second slot. We also 10942 // need to save the TOC pointer (R2) to handle jumps between shared 10943 // libraries, and that will be stored in the fourth slot. The thread 10944 // identifier (R13) is not affected. 10945 10946 // thisMBB: 10947 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10948 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10949 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10950 10951 // Prepare IP either in reg. 10952 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 10953 Register LabelReg = MRI.createVirtualRegister(PtrRC); 10954 Register BufReg = MI.getOperand(1).getReg(); 10955 10956 if (Subtarget.is64BitELFABI()) { 10957 setUsesTOCBasePtr(*MBB->getParent()); 10958 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 10959 .addReg(PPC::X2) 10960 .addImm(TOCOffset) 10961 .addReg(BufReg) 10962 .cloneMemRefs(MI); 10963 } 10964 10965 // Naked functions never have a base pointer, and so we use r1. For all 10966 // other functions, this decision must be delayed until during PEI. 10967 unsigned BaseReg; 10968 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 10969 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 10970 else 10971 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 10972 10973 MIB = BuildMI(*thisMBB, MI, DL, 10974 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 10975 .addReg(BaseReg) 10976 .addImm(BPOffset) 10977 .addReg(BufReg) 10978 .cloneMemRefs(MI); 10979 10980 // Setup 10981 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 10982 MIB.addRegMask(TRI->getNoPreservedMask()); 10983 10984 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 10985 10986 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 10987 .addMBB(mainMBB); 10988 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 10989 10990 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 10991 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 10992 10993 // mainMBB: 10994 // mainDstReg = 0 10995 MIB = 10996 BuildMI(mainMBB, DL, 10997 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 10998 10999 // Store IP 11000 if (Subtarget.isPPC64()) { 11001 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 11002 .addReg(LabelReg) 11003 .addImm(LabelOffset) 11004 .addReg(BufReg); 11005 } else { 11006 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 11007 .addReg(LabelReg) 11008 .addImm(LabelOffset) 11009 .addReg(BufReg); 11010 } 11011 MIB.cloneMemRefs(MI); 11012 11013 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 11014 mainMBB->addSuccessor(sinkMBB); 11015 11016 // sinkMBB: 11017 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 11018 TII->get(PPC::PHI), DstReg) 11019 .addReg(mainDstReg).addMBB(mainMBB) 11020 .addReg(restoreDstReg).addMBB(thisMBB); 11021 11022 MI.eraseFromParent(); 11023 return sinkMBB; 11024 } 11025 11026 MachineBasicBlock * 11027 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 11028 MachineBasicBlock *MBB) const { 11029 DebugLoc DL = MI.getDebugLoc(); 11030 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11031 11032 MachineFunction *MF = MBB->getParent(); 11033 MachineRegisterInfo &MRI = MF->getRegInfo(); 11034 11035 MVT PVT = getPointerTy(MF->getDataLayout()); 11036 assert((PVT == MVT::i64 || PVT == MVT::i32) && 11037 "Invalid Pointer Size!"); 11038 11039 const TargetRegisterClass *RC = 11040 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11041 Register Tmp = MRI.createVirtualRegister(RC); 11042 // Since FP is only updated here but NOT referenced, it's treated as GPR. 11043 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 11044 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 11045 unsigned BP = 11046 (PVT == MVT::i64) 11047 ? PPC::X30 11048 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 11049 : PPC::R30); 11050 11051 MachineInstrBuilder MIB; 11052 11053 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 11054 const int64_t SPOffset = 2 * PVT.getStoreSize(); 11055 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11056 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11057 11058 Register BufReg = MI.getOperand(0).getReg(); 11059 11060 // Reload FP (the jumped-to function may not have had a 11061 // frame pointer, and if so, then its r31 will be restored 11062 // as necessary). 11063 if (PVT == MVT::i64) { 11064 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 11065 .addImm(0) 11066 .addReg(BufReg); 11067 } else { 11068 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 11069 .addImm(0) 11070 .addReg(BufReg); 11071 } 11072 MIB.cloneMemRefs(MI); 11073 11074 // Reload IP 11075 if (PVT == MVT::i64) { 11076 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 11077 .addImm(LabelOffset) 11078 .addReg(BufReg); 11079 } else { 11080 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 11081 .addImm(LabelOffset) 11082 .addReg(BufReg); 11083 } 11084 MIB.cloneMemRefs(MI); 11085 11086 // Reload SP 11087 if (PVT == MVT::i64) { 11088 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 11089 .addImm(SPOffset) 11090 .addReg(BufReg); 11091 } else { 11092 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 11093 .addImm(SPOffset) 11094 .addReg(BufReg); 11095 } 11096 MIB.cloneMemRefs(MI); 11097 11098 // Reload BP 11099 if (PVT == MVT::i64) { 11100 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 11101 .addImm(BPOffset) 11102 .addReg(BufReg); 11103 } else { 11104 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 11105 .addImm(BPOffset) 11106 .addReg(BufReg); 11107 } 11108 MIB.cloneMemRefs(MI); 11109 11110 // Reload TOC 11111 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 11112 setUsesTOCBasePtr(*MBB->getParent()); 11113 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 11114 .addImm(TOCOffset) 11115 .addReg(BufReg) 11116 .cloneMemRefs(MI); 11117 } 11118 11119 // Jump 11120 BuildMI(*MBB, MI, DL, 11121 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 11122 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 11123 11124 MI.eraseFromParent(); 11125 return MBB; 11126 } 11127 11128 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const { 11129 // If the function specifically requests inline stack probes, emit them. 11130 if (MF.getFunction().hasFnAttribute("probe-stack")) 11131 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() == 11132 "inline-asm"; 11133 return false; 11134 } 11135 11136 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const { 11137 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 11138 unsigned StackAlign = TFI->getStackAlignment(); 11139 assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) && 11140 "Unexpected stack alignment"); 11141 // The default stack probe size is 4096 if the function has no 11142 // stack-probe-size attribute. 11143 unsigned StackProbeSize = 4096; 11144 const Function &Fn = MF.getFunction(); 11145 if (Fn.hasFnAttribute("stack-probe-size")) 11146 Fn.getFnAttribute("stack-probe-size") 11147 .getValueAsString() 11148 .getAsInteger(0, StackProbeSize); 11149 // Round down to the stack alignment. 11150 StackProbeSize &= ~(StackAlign - 1); 11151 return StackProbeSize ? StackProbeSize : StackAlign; 11152 } 11153 11154 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted 11155 // into three phases. In the first phase, it uses pseudo instruction 11156 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and 11157 // FinalStackPtr. In the second phase, it generates a loop for probing blocks. 11158 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of 11159 // MaxCallFrameSize so that it can calculate correct data area pointer. 11160 MachineBasicBlock * 11161 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI, 11162 MachineBasicBlock *MBB) const { 11163 const bool isPPC64 = Subtarget.isPPC64(); 11164 MachineFunction *MF = MBB->getParent(); 11165 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11166 DebugLoc DL = MI.getDebugLoc(); 11167 const unsigned ProbeSize = getStackProbeSize(*MF); 11168 const BasicBlock *ProbedBB = MBB->getBasicBlock(); 11169 MachineRegisterInfo &MRI = MF->getRegInfo(); 11170 // The CFG of probing stack looks as 11171 // +-----+ 11172 // | MBB | 11173 // +--+--+ 11174 // | 11175 // +----v----+ 11176 // +--->+ TestMBB +---+ 11177 // | +----+----+ | 11178 // | | | 11179 // | +-----v----+ | 11180 // +---+ BlockMBB | | 11181 // +----------+ | 11182 // | 11183 // +---------+ | 11184 // | TailMBB +<--+ 11185 // +---------+ 11186 // In MBB, calculate previous frame pointer and final stack pointer. 11187 // In TestMBB, test if sp is equal to final stack pointer, if so, jump to 11188 // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB. 11189 // TailMBB is spliced via \p MI. 11190 MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB); 11191 MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB); 11192 MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB); 11193 11194 MachineFunction::iterator MBBIter = ++MBB->getIterator(); 11195 MF->insert(MBBIter, TestMBB); 11196 MF->insert(MBBIter, BlockMBB); 11197 MF->insert(MBBIter, TailMBB); 11198 11199 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 11200 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 11201 11202 Register DstReg = MI.getOperand(0).getReg(); 11203 Register NegSizeReg = MI.getOperand(1).getReg(); 11204 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1; 11205 Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11206 Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11207 Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11208 11209 // Since value of NegSizeReg might be realigned in prologepilog, insert a 11210 // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and 11211 // NegSize. 11212 unsigned ProbeOpc; 11213 if (!MRI.hasOneNonDBGUse(NegSizeReg)) 11214 ProbeOpc = 11215 isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32; 11216 else 11217 // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg 11218 // and NegSizeReg will be allocated in the same phyreg to avoid 11219 // redundant copy when NegSizeReg has only one use which is current MI and 11220 // will be replaced by PREPARE_PROBED_ALLOCA then. 11221 ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 11222 : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32; 11223 BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer) 11224 .addDef(ActualNegSizeReg) 11225 .addReg(NegSizeReg) 11226 .add(MI.getOperand(2)) 11227 .add(MI.getOperand(3)); 11228 11229 // Calculate final stack pointer, which equals to SP + ActualNegSize. 11230 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), 11231 FinalStackPtr) 11232 .addReg(SPReg) 11233 .addReg(ActualNegSizeReg); 11234 11235 // Materialize a scratch register for update. 11236 int64_t NegProbeSize = -(int64_t)ProbeSize; 11237 assert(isInt<32>(NegProbeSize) && "Unhandled probe size!"); 11238 Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11239 if (!isInt<16>(NegProbeSize)) { 11240 Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11241 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg) 11242 .addImm(NegProbeSize >> 16); 11243 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI), 11244 ScratchReg) 11245 .addReg(TempReg) 11246 .addImm(NegProbeSize & 0xFFFF); 11247 } else 11248 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg) 11249 .addImm(NegProbeSize); 11250 11251 { 11252 // Probing leading residual part. 11253 Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11254 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div) 11255 .addReg(ActualNegSizeReg) 11256 .addReg(ScratchReg); 11257 Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11258 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul) 11259 .addReg(Div) 11260 .addReg(ScratchReg); 11261 Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11262 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod) 11263 .addReg(Mul) 11264 .addReg(ActualNegSizeReg); 11265 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg) 11266 .addReg(FramePointer) 11267 .addReg(SPReg) 11268 .addReg(NegMod); 11269 } 11270 11271 { 11272 // Remaining part should be multiple of ProbeSize. 11273 Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass); 11274 BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult) 11275 .addReg(SPReg) 11276 .addReg(FinalStackPtr); 11277 BuildMI(TestMBB, DL, TII->get(PPC::BCC)) 11278 .addImm(PPC::PRED_EQ) 11279 .addReg(CmpResult) 11280 .addMBB(TailMBB); 11281 TestMBB->addSuccessor(BlockMBB); 11282 TestMBB->addSuccessor(TailMBB); 11283 } 11284 11285 { 11286 // Touch the block. 11287 // |P...|P...|P... 11288 BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg) 11289 .addReg(FramePointer) 11290 .addReg(SPReg) 11291 .addReg(ScratchReg); 11292 BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB); 11293 BlockMBB->addSuccessor(TestMBB); 11294 } 11295 11296 // Calculation of MaxCallFrameSize is deferred to prologepilog, use 11297 // DYNAREAOFFSET pseudo instruction to get the future result. 11298 Register MaxCallFrameSizeReg = 11299 MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 11300 BuildMI(TailMBB, DL, 11301 TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET), 11302 MaxCallFrameSizeReg) 11303 .add(MI.getOperand(2)) 11304 .add(MI.getOperand(3)); 11305 BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg) 11306 .addReg(SPReg) 11307 .addReg(MaxCallFrameSizeReg); 11308 11309 // Splice instructions after MI to TailMBB. 11310 TailMBB->splice(TailMBB->end(), MBB, 11311 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 11312 TailMBB->transferSuccessorsAndUpdatePHIs(MBB); 11313 MBB->addSuccessor(TestMBB); 11314 11315 // Delete the pseudo instruction. 11316 MI.eraseFromParent(); 11317 11318 ++NumDynamicAllocaProbed; 11319 return TailMBB; 11320 } 11321 11322 MachineBasicBlock * 11323 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 11324 MachineBasicBlock *BB) const { 11325 if (MI.getOpcode() == TargetOpcode::STACKMAP || 11326 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 11327 if (Subtarget.is64BitELFABI() && 11328 MI.getOpcode() == TargetOpcode::PATCHPOINT && 11329 !Subtarget.isUsingPCRelativeCalls()) { 11330 // Call lowering should have added an r2 operand to indicate a dependence 11331 // on the TOC base pointer value. It can't however, because there is no 11332 // way to mark the dependence as implicit there, and so the stackmap code 11333 // will confuse it with a regular operand. Instead, add the dependence 11334 // here. 11335 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 11336 } 11337 11338 return emitPatchPoint(MI, BB); 11339 } 11340 11341 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 11342 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 11343 return emitEHSjLjSetJmp(MI, BB); 11344 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 11345 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 11346 return emitEHSjLjLongJmp(MI, BB); 11347 } 11348 11349 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11350 11351 // To "insert" these instructions we actually have to insert their 11352 // control-flow patterns. 11353 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11354 MachineFunction::iterator It = ++BB->getIterator(); 11355 11356 MachineFunction *F = BB->getParent(); 11357 11358 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11359 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 || 11360 MI.getOpcode() == PPC::SELECT_I8) { 11361 SmallVector<MachineOperand, 2> Cond; 11362 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11363 MI.getOpcode() == PPC::SELECT_CC_I8) 11364 Cond.push_back(MI.getOperand(4)); 11365 else 11366 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 11367 Cond.push_back(MI.getOperand(1)); 11368 11369 DebugLoc dl = MI.getDebugLoc(); 11370 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 11371 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 11372 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 || 11373 MI.getOpcode() == PPC::SELECT_CC_F8 || 11374 MI.getOpcode() == PPC::SELECT_CC_F16 || 11375 MI.getOpcode() == PPC::SELECT_CC_VRRC || 11376 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 11377 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 11378 MI.getOpcode() == PPC::SELECT_CC_VSRC || 11379 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 11380 MI.getOpcode() == PPC::SELECT_CC_SPE || 11381 MI.getOpcode() == PPC::SELECT_F4 || 11382 MI.getOpcode() == PPC::SELECT_F8 || 11383 MI.getOpcode() == PPC::SELECT_F16 || 11384 MI.getOpcode() == PPC::SELECT_SPE || 11385 MI.getOpcode() == PPC::SELECT_SPE4 || 11386 MI.getOpcode() == PPC::SELECT_VRRC || 11387 MI.getOpcode() == PPC::SELECT_VSFRC || 11388 MI.getOpcode() == PPC::SELECT_VSSRC || 11389 MI.getOpcode() == PPC::SELECT_VSRC) { 11390 // The incoming instruction knows the destination vreg to set, the 11391 // condition code register to branch on, the true/false values to 11392 // select between, and a branch opcode to use. 11393 11394 // thisMBB: 11395 // ... 11396 // TrueVal = ... 11397 // cmpTY ccX, r1, r2 11398 // bCC copy1MBB 11399 // fallthrough --> copy0MBB 11400 MachineBasicBlock *thisMBB = BB; 11401 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 11402 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11403 DebugLoc dl = MI.getDebugLoc(); 11404 F->insert(It, copy0MBB); 11405 F->insert(It, sinkMBB); 11406 11407 // Transfer the remainder of BB and its successor edges to sinkMBB. 11408 sinkMBB->splice(sinkMBB->begin(), BB, 11409 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11410 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11411 11412 // Next, add the true and fallthrough blocks as its successors. 11413 BB->addSuccessor(copy0MBB); 11414 BB->addSuccessor(sinkMBB); 11415 11416 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 11417 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 11418 MI.getOpcode() == PPC::SELECT_F16 || 11419 MI.getOpcode() == PPC::SELECT_SPE4 || 11420 MI.getOpcode() == PPC::SELECT_SPE || 11421 MI.getOpcode() == PPC::SELECT_VRRC || 11422 MI.getOpcode() == PPC::SELECT_VSFRC || 11423 MI.getOpcode() == PPC::SELECT_VSSRC || 11424 MI.getOpcode() == PPC::SELECT_VSRC) { 11425 BuildMI(BB, dl, TII->get(PPC::BC)) 11426 .addReg(MI.getOperand(1).getReg()) 11427 .addMBB(sinkMBB); 11428 } else { 11429 unsigned SelectPred = MI.getOperand(4).getImm(); 11430 BuildMI(BB, dl, TII->get(PPC::BCC)) 11431 .addImm(SelectPred) 11432 .addReg(MI.getOperand(1).getReg()) 11433 .addMBB(sinkMBB); 11434 } 11435 11436 // copy0MBB: 11437 // %FalseValue = ... 11438 // # fallthrough to sinkMBB 11439 BB = copy0MBB; 11440 11441 // Update machine-CFG edges 11442 BB->addSuccessor(sinkMBB); 11443 11444 // sinkMBB: 11445 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 11446 // ... 11447 BB = sinkMBB; 11448 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 11449 .addReg(MI.getOperand(3).getReg()) 11450 .addMBB(copy0MBB) 11451 .addReg(MI.getOperand(2).getReg()) 11452 .addMBB(thisMBB); 11453 } else if (MI.getOpcode() == PPC::ReadTB) { 11454 // To read the 64-bit time-base register on a 32-bit target, we read the 11455 // two halves. Should the counter have wrapped while it was being read, we 11456 // need to try again. 11457 // ... 11458 // readLoop: 11459 // mfspr Rx,TBU # load from TBU 11460 // mfspr Ry,TB # load from TB 11461 // mfspr Rz,TBU # load from TBU 11462 // cmpw crX,Rx,Rz # check if 'old'='new' 11463 // bne readLoop # branch if they're not equal 11464 // ... 11465 11466 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 11467 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11468 DebugLoc dl = MI.getDebugLoc(); 11469 F->insert(It, readMBB); 11470 F->insert(It, sinkMBB); 11471 11472 // Transfer the remainder of BB and its successor edges to sinkMBB. 11473 sinkMBB->splice(sinkMBB->begin(), BB, 11474 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11475 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11476 11477 BB->addSuccessor(readMBB); 11478 BB = readMBB; 11479 11480 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11481 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 11482 Register LoReg = MI.getOperand(0).getReg(); 11483 Register HiReg = MI.getOperand(1).getReg(); 11484 11485 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 11486 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 11487 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 11488 11489 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11490 11491 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 11492 .addReg(HiReg) 11493 .addReg(ReadAgainReg); 11494 BuildMI(BB, dl, TII->get(PPC::BCC)) 11495 .addImm(PPC::PRED_NE) 11496 .addReg(CmpReg) 11497 .addMBB(readMBB); 11498 11499 BB->addSuccessor(readMBB); 11500 BB->addSuccessor(sinkMBB); 11501 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 11502 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 11503 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 11504 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 11505 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 11506 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 11507 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 11508 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 11509 11510 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 11511 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 11512 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 11513 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 11514 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 11515 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 11516 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 11517 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 11518 11519 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 11520 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 11521 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 11522 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 11523 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 11524 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 11525 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 11526 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 11527 11528 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 11529 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 11530 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 11531 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 11532 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 11533 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 11534 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 11535 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 11536 11537 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 11538 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 11539 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 11540 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 11541 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 11542 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 11543 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 11544 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 11545 11546 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 11547 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 11548 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 11549 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 11550 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 11551 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 11552 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 11553 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 11554 11555 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 11556 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 11557 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 11558 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 11559 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 11560 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 11561 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 11562 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 11563 11564 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 11565 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 11566 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 11567 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 11568 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 11569 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 11570 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 11571 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 11572 11573 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 11574 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 11575 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 11576 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 11577 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 11578 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 11579 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 11580 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 11581 11582 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 11583 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 11584 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 11585 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 11586 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 11587 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 11588 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 11589 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 11590 11591 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 11592 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 11593 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 11594 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 11595 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 11596 BB = EmitAtomicBinary(MI, BB, 4, 0); 11597 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 11598 BB = EmitAtomicBinary(MI, BB, 8, 0); 11599 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 11600 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 11601 (Subtarget.hasPartwordAtomics() && 11602 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 11603 (Subtarget.hasPartwordAtomics() && 11604 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 11605 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 11606 11607 auto LoadMnemonic = PPC::LDARX; 11608 auto StoreMnemonic = PPC::STDCX; 11609 switch (MI.getOpcode()) { 11610 default: 11611 llvm_unreachable("Compare and swap of unknown size"); 11612 case PPC::ATOMIC_CMP_SWAP_I8: 11613 LoadMnemonic = PPC::LBARX; 11614 StoreMnemonic = PPC::STBCX; 11615 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11616 break; 11617 case PPC::ATOMIC_CMP_SWAP_I16: 11618 LoadMnemonic = PPC::LHARX; 11619 StoreMnemonic = PPC::STHCX; 11620 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11621 break; 11622 case PPC::ATOMIC_CMP_SWAP_I32: 11623 LoadMnemonic = PPC::LWARX; 11624 StoreMnemonic = PPC::STWCX; 11625 break; 11626 case PPC::ATOMIC_CMP_SWAP_I64: 11627 LoadMnemonic = PPC::LDARX; 11628 StoreMnemonic = PPC::STDCX; 11629 break; 11630 } 11631 Register dest = MI.getOperand(0).getReg(); 11632 Register ptrA = MI.getOperand(1).getReg(); 11633 Register ptrB = MI.getOperand(2).getReg(); 11634 Register oldval = MI.getOperand(3).getReg(); 11635 Register newval = MI.getOperand(4).getReg(); 11636 DebugLoc dl = MI.getDebugLoc(); 11637 11638 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11639 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11640 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11641 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11642 F->insert(It, loop1MBB); 11643 F->insert(It, loop2MBB); 11644 F->insert(It, midMBB); 11645 F->insert(It, exitMBB); 11646 exitMBB->splice(exitMBB->begin(), BB, 11647 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11648 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11649 11650 // thisMBB: 11651 // ... 11652 // fallthrough --> loopMBB 11653 BB->addSuccessor(loop1MBB); 11654 11655 // loop1MBB: 11656 // l[bhwd]arx dest, ptr 11657 // cmp[wd] dest, oldval 11658 // bne- midMBB 11659 // loop2MBB: 11660 // st[bhwd]cx. newval, ptr 11661 // bne- loopMBB 11662 // b exitBB 11663 // midMBB: 11664 // st[bhwd]cx. dest, ptr 11665 // exitBB: 11666 BB = loop1MBB; 11667 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB); 11668 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 11669 .addReg(oldval) 11670 .addReg(dest); 11671 BuildMI(BB, dl, TII->get(PPC::BCC)) 11672 .addImm(PPC::PRED_NE) 11673 .addReg(PPC::CR0) 11674 .addMBB(midMBB); 11675 BB->addSuccessor(loop2MBB); 11676 BB->addSuccessor(midMBB); 11677 11678 BB = loop2MBB; 11679 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11680 .addReg(newval) 11681 .addReg(ptrA) 11682 .addReg(ptrB); 11683 BuildMI(BB, dl, TII->get(PPC::BCC)) 11684 .addImm(PPC::PRED_NE) 11685 .addReg(PPC::CR0) 11686 .addMBB(loop1MBB); 11687 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11688 BB->addSuccessor(loop1MBB); 11689 BB->addSuccessor(exitMBB); 11690 11691 BB = midMBB; 11692 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11693 .addReg(dest) 11694 .addReg(ptrA) 11695 .addReg(ptrB); 11696 BB->addSuccessor(exitMBB); 11697 11698 // exitMBB: 11699 // ... 11700 BB = exitMBB; 11701 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 11702 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 11703 // We must use 64-bit registers for addresses when targeting 64-bit, 11704 // since we're actually doing arithmetic on them. Other registers 11705 // can be 32-bit. 11706 bool is64bit = Subtarget.isPPC64(); 11707 bool isLittleEndian = Subtarget.isLittleEndian(); 11708 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 11709 11710 Register dest = MI.getOperand(0).getReg(); 11711 Register ptrA = MI.getOperand(1).getReg(); 11712 Register ptrB = MI.getOperand(2).getReg(); 11713 Register oldval = MI.getOperand(3).getReg(); 11714 Register newval = MI.getOperand(4).getReg(); 11715 DebugLoc dl = MI.getDebugLoc(); 11716 11717 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11718 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11719 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11720 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11721 F->insert(It, loop1MBB); 11722 F->insert(It, loop2MBB); 11723 F->insert(It, midMBB); 11724 F->insert(It, exitMBB); 11725 exitMBB->splice(exitMBB->begin(), BB, 11726 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11727 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11728 11729 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11730 const TargetRegisterClass *RC = 11731 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11732 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 11733 11734 Register PtrReg = RegInfo.createVirtualRegister(RC); 11735 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 11736 Register ShiftReg = 11737 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 11738 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC); 11739 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC); 11740 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC); 11741 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC); 11742 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 11743 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 11744 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 11745 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 11746 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 11747 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 11748 Register Ptr1Reg; 11749 Register TmpReg = RegInfo.createVirtualRegister(GPRC); 11750 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 11751 // thisMBB: 11752 // ... 11753 // fallthrough --> loopMBB 11754 BB->addSuccessor(loop1MBB); 11755 11756 // The 4-byte load must be aligned, while a char or short may be 11757 // anywhere in the word. Hence all this nasty bookkeeping code. 11758 // add ptr1, ptrA, ptrB [copy if ptrA==0] 11759 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 11760 // xori shift, shift1, 24 [16] 11761 // rlwinm ptr, ptr1, 0, 0, 29 11762 // slw newval2, newval, shift 11763 // slw oldval2, oldval,shift 11764 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 11765 // slw mask, mask2, shift 11766 // and newval3, newval2, mask 11767 // and oldval3, oldval2, mask 11768 // loop1MBB: 11769 // lwarx tmpDest, ptr 11770 // and tmp, tmpDest, mask 11771 // cmpw tmp, oldval3 11772 // bne- midMBB 11773 // loop2MBB: 11774 // andc tmp2, tmpDest, mask 11775 // or tmp4, tmp2, newval3 11776 // stwcx. tmp4, ptr 11777 // bne- loop1MBB 11778 // b exitBB 11779 // midMBB: 11780 // stwcx. tmpDest, ptr 11781 // exitBB: 11782 // srw dest, tmpDest, shift 11783 if (ptrA != ZeroReg) { 11784 Ptr1Reg = RegInfo.createVirtualRegister(RC); 11785 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 11786 .addReg(ptrA) 11787 .addReg(ptrB); 11788 } else { 11789 Ptr1Reg = ptrB; 11790 } 11791 11792 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 11793 // mode. 11794 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 11795 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 11796 .addImm(3) 11797 .addImm(27) 11798 .addImm(is8bit ? 28 : 27); 11799 if (!isLittleEndian) 11800 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 11801 .addReg(Shift1Reg) 11802 .addImm(is8bit ? 24 : 16); 11803 if (is64bit) 11804 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 11805 .addReg(Ptr1Reg) 11806 .addImm(0) 11807 .addImm(61); 11808 else 11809 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 11810 .addReg(Ptr1Reg) 11811 .addImm(0) 11812 .addImm(0) 11813 .addImm(29); 11814 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 11815 .addReg(newval) 11816 .addReg(ShiftReg); 11817 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 11818 .addReg(oldval) 11819 .addReg(ShiftReg); 11820 if (is8bit) 11821 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 11822 else { 11823 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 11824 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 11825 .addReg(Mask3Reg) 11826 .addImm(65535); 11827 } 11828 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 11829 .addReg(Mask2Reg) 11830 .addReg(ShiftReg); 11831 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 11832 .addReg(NewVal2Reg) 11833 .addReg(MaskReg); 11834 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 11835 .addReg(OldVal2Reg) 11836 .addReg(MaskReg); 11837 11838 BB = loop1MBB; 11839 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 11840 .addReg(ZeroReg) 11841 .addReg(PtrReg); 11842 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg) 11843 .addReg(TmpDestReg) 11844 .addReg(MaskReg); 11845 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 11846 .addReg(TmpReg) 11847 .addReg(OldVal3Reg); 11848 BuildMI(BB, dl, TII->get(PPC::BCC)) 11849 .addImm(PPC::PRED_NE) 11850 .addReg(PPC::CR0) 11851 .addMBB(midMBB); 11852 BB->addSuccessor(loop2MBB); 11853 BB->addSuccessor(midMBB); 11854 11855 BB = loop2MBB; 11856 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 11857 .addReg(TmpDestReg) 11858 .addReg(MaskReg); 11859 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg) 11860 .addReg(Tmp2Reg) 11861 .addReg(NewVal3Reg); 11862 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11863 .addReg(Tmp4Reg) 11864 .addReg(ZeroReg) 11865 .addReg(PtrReg); 11866 BuildMI(BB, dl, TII->get(PPC::BCC)) 11867 .addImm(PPC::PRED_NE) 11868 .addReg(PPC::CR0) 11869 .addMBB(loop1MBB); 11870 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11871 BB->addSuccessor(loop1MBB); 11872 BB->addSuccessor(exitMBB); 11873 11874 BB = midMBB; 11875 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11876 .addReg(TmpDestReg) 11877 .addReg(ZeroReg) 11878 .addReg(PtrReg); 11879 BB->addSuccessor(exitMBB); 11880 11881 // exitMBB: 11882 // ... 11883 BB = exitMBB; 11884 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 11885 .addReg(TmpReg) 11886 .addReg(ShiftReg); 11887 } else if (MI.getOpcode() == PPC::FADDrtz) { 11888 // This pseudo performs an FADD with rounding mode temporarily forced 11889 // to round-to-zero. We emit this via custom inserter since the FPSCR 11890 // is not modeled at the SelectionDAG level. 11891 Register Dest = MI.getOperand(0).getReg(); 11892 Register Src1 = MI.getOperand(1).getReg(); 11893 Register Src2 = MI.getOperand(2).getReg(); 11894 DebugLoc dl = MI.getDebugLoc(); 11895 11896 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11897 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 11898 11899 // Save FPSCR value. 11900 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 11901 11902 // Set rounding mode to round-to-zero. 11903 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)) 11904 .addImm(31) 11905 .addReg(PPC::RM, RegState::ImplicitDefine); 11906 11907 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)) 11908 .addImm(30) 11909 .addReg(PPC::RM, RegState::ImplicitDefine); 11910 11911 // Perform addition. 11912 auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest) 11913 .addReg(Src1) 11914 .addReg(Src2); 11915 if (MI.getFlag(MachineInstr::NoFPExcept)) 11916 MIB.setMIFlag(MachineInstr::NoFPExcept); 11917 11918 // Restore FPSCR value. 11919 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 11920 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 11921 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT || 11922 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 11923 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) { 11924 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 11925 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) 11926 ? PPC::ANDI8_rec 11927 : PPC::ANDI_rec; 11928 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 11929 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8); 11930 11931 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11932 Register Dest = RegInfo.createVirtualRegister( 11933 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); 11934 11935 DebugLoc Dl = MI.getDebugLoc(); 11936 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest) 11937 .addReg(MI.getOperand(1).getReg()) 11938 .addImm(1); 11939 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11940 MI.getOperand(0).getReg()) 11941 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT); 11942 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 11943 DebugLoc Dl = MI.getDebugLoc(); 11944 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11945 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11946 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 11947 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11948 MI.getOperand(0).getReg()) 11949 .addReg(CRReg); 11950 } else if (MI.getOpcode() == PPC::TBEGIN_RET) { 11951 DebugLoc Dl = MI.getDebugLoc(); 11952 unsigned Imm = MI.getOperand(1).getImm(); 11953 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm); 11954 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11955 MI.getOperand(0).getReg()) 11956 .addReg(PPC::CR0EQ); 11957 } else if (MI.getOpcode() == PPC::SETRNDi) { 11958 DebugLoc dl = MI.getDebugLoc(); 11959 Register OldFPSCRReg = MI.getOperand(0).getReg(); 11960 11961 // Save FPSCR value. 11962 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 11963 11964 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has 11965 // the following settings: 11966 // 00 Round to nearest 11967 // 01 Round to 0 11968 // 10 Round to +inf 11969 // 11 Round to -inf 11970 11971 // When the operand is immediate, using the two least significant bits of 11972 // the immediate to set the bits 62:63 of FPSCR. 11973 unsigned Mode = MI.getOperand(1).getImm(); 11974 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0)) 11975 .addImm(31) 11976 .addReg(PPC::RM, RegState::ImplicitDefine); 11977 11978 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0)) 11979 .addImm(30) 11980 .addReg(PPC::RM, RegState::ImplicitDefine); 11981 } else if (MI.getOpcode() == PPC::SETRND) { 11982 DebugLoc dl = MI.getDebugLoc(); 11983 11984 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg 11985 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg. 11986 // If the target doesn't have DirectMove, we should use stack to do the 11987 // conversion, because the target doesn't have the instructions like mtvsrd 11988 // or mfvsrd to do this conversion directly. 11989 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) { 11990 if (Subtarget.hasDirectMove()) { 11991 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg) 11992 .addReg(SrcReg); 11993 } else { 11994 // Use stack to do the register copy. 11995 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD; 11996 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11997 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg); 11998 if (RC == &PPC::F8RCRegClass) { 11999 // Copy register from F8RCRegClass to G8RCRegclass. 12000 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) && 12001 "Unsupported RegClass."); 12002 12003 StoreOp = PPC::STFD; 12004 LoadOp = PPC::LD; 12005 } else { 12006 // Copy register from G8RCRegClass to F8RCRegclass. 12007 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && 12008 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && 12009 "Unsupported RegClass."); 12010 } 12011 12012 MachineFrameInfo &MFI = F->getFrameInfo(); 12013 int FrameIdx = MFI.CreateStackObject(8, Align(8), false); 12014 12015 MachineMemOperand *MMOStore = F->getMachineMemOperand( 12016 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 12017 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), 12018 MFI.getObjectAlign(FrameIdx)); 12019 12020 // Store the SrcReg into the stack. 12021 BuildMI(*BB, MI, dl, TII->get(StoreOp)) 12022 .addReg(SrcReg) 12023 .addImm(0) 12024 .addFrameIndex(FrameIdx) 12025 .addMemOperand(MMOStore); 12026 12027 MachineMemOperand *MMOLoad = F->getMachineMemOperand( 12028 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 12029 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), 12030 MFI.getObjectAlign(FrameIdx)); 12031 12032 // Load from the stack where SrcReg is stored, and save to DestReg, 12033 // so we have done the RegClass conversion from RegClass::SrcReg to 12034 // RegClass::DestReg. 12035 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg) 12036 .addImm(0) 12037 .addFrameIndex(FrameIdx) 12038 .addMemOperand(MMOLoad); 12039 } 12040 }; 12041 12042 Register OldFPSCRReg = MI.getOperand(0).getReg(); 12043 12044 // Save FPSCR value. 12045 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 12046 12047 // When the operand is gprc register, use two least significant bits of the 12048 // register and mtfsf instruction to set the bits 62:63 of FPSCR. 12049 // 12050 // copy OldFPSCRTmpReg, OldFPSCRReg 12051 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1) 12052 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62 12053 // copy NewFPSCRReg, NewFPSCRTmpReg 12054 // mtfsf 255, NewFPSCRReg 12055 MachineOperand SrcOp = MI.getOperand(1); 12056 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12057 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12058 12059 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg); 12060 12061 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12062 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12063 12064 // The first operand of INSERT_SUBREG should be a register which has 12065 // subregisters, we only care about its RegClass, so we should use an 12066 // IMPLICIT_DEF register. 12067 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg); 12068 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg) 12069 .addReg(ImDefReg) 12070 .add(SrcOp) 12071 .addImm(1); 12072 12073 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12074 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg) 12075 .addReg(OldFPSCRTmpReg) 12076 .addReg(ExtSrcReg) 12077 .addImm(0) 12078 .addImm(62); 12079 12080 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 12081 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg); 12082 12083 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63 12084 // bits of FPSCR. 12085 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)) 12086 .addImm(255) 12087 .addReg(NewFPSCRReg) 12088 .addImm(0) 12089 .addImm(0); 12090 } else if (MI.getOpcode() == PPC::SETFLM) { 12091 DebugLoc Dl = MI.getDebugLoc(); 12092 12093 // Result of setflm is previous FPSCR content, so we need to save it first. 12094 Register OldFPSCRReg = MI.getOperand(0).getReg(); 12095 BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg); 12096 12097 // Put bits in 32:63 to FPSCR. 12098 Register NewFPSCRReg = MI.getOperand(1).getReg(); 12099 BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF)) 12100 .addImm(255) 12101 .addReg(NewFPSCRReg) 12102 .addImm(0) 12103 .addImm(0); 12104 } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 || 12105 MI.getOpcode() == PPC::PROBED_ALLOCA_64) { 12106 return emitProbedAlloca(MI, BB); 12107 } else { 12108 llvm_unreachable("Unexpected instr type to insert"); 12109 } 12110 12111 MI.eraseFromParent(); // The pseudo instruction is gone now. 12112 return BB; 12113 } 12114 12115 //===----------------------------------------------------------------------===// 12116 // Target Optimization Hooks 12117 //===----------------------------------------------------------------------===// 12118 12119 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 12120 // For the estimates, convergence is quadratic, so we essentially double the 12121 // number of digits correct after every iteration. For both FRE and FRSQRTE, 12122 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 12123 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 12124 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 12125 if (VT.getScalarType() == MVT::f64) 12126 RefinementSteps++; 12127 return RefinementSteps; 12128 } 12129 12130 SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, 12131 const DenormalMode &Mode) const { 12132 // We only have VSX Vector Test for software Square Root. 12133 EVT VT = Op.getValueType(); 12134 if (!isTypeLegal(MVT::i1) || 12135 (VT != MVT::f64 && 12136 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))) 12137 return TargetLowering::getSqrtInputTest(Op, DAG, Mode); 12138 12139 SDLoc DL(Op); 12140 // The output register of FTSQRT is CR field. 12141 SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op); 12142 // ftsqrt BF,FRB 12143 // Let e_b be the unbiased exponent of the double-precision 12144 // floating-point operand in register FRB. 12145 // fe_flag is set to 1 if either of the following conditions occurs. 12146 // - The double-precision floating-point operand in register FRB is a zero, 12147 // a NaN, or an infinity, or a negative value. 12148 // - e_b is less than or equal to -970. 12149 // Otherwise fe_flag is set to 0. 12150 // Both VSX and non-VSX versions would set EQ bit in the CR if the number is 12151 // not eligible for iteration. (zero/negative/infinity/nan or unbiased 12152 // exponent is less than -970) 12153 SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32); 12154 return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1, 12155 FTSQRT, SRIdxVal), 12156 0); 12157 } 12158 12159 SDValue 12160 PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op, 12161 SelectionDAG &DAG) const { 12162 // We only have VSX Vector Square Root. 12163 EVT VT = Op.getValueType(); 12164 if (VT != MVT::f64 && 12165 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())) 12166 return TargetLowering::getSqrtResultForDenormInput(Op, DAG); 12167 12168 return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op); 12169 } 12170 12171 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 12172 int Enabled, int &RefinementSteps, 12173 bool &UseOneConstNR, 12174 bool Reciprocal) const { 12175 EVT VT = Operand.getValueType(); 12176 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 12177 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 12178 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 12179 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 12180 if (RefinementSteps == ReciprocalEstimate::Unspecified) 12181 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 12182 12183 // The Newton-Raphson computation with a single constant does not provide 12184 // enough accuracy on some CPUs. 12185 UseOneConstNR = !Subtarget.needsTwoConstNR(); 12186 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 12187 } 12188 return SDValue(); 12189 } 12190 12191 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 12192 int Enabled, 12193 int &RefinementSteps) const { 12194 EVT VT = Operand.getValueType(); 12195 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 12196 (VT == MVT::f64 && Subtarget.hasFRE()) || 12197 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 12198 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 12199 if (RefinementSteps == ReciprocalEstimate::Unspecified) 12200 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 12201 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 12202 } 12203 return SDValue(); 12204 } 12205 12206 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 12207 // Note: This functionality is used only when unsafe-fp-math is enabled, and 12208 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 12209 // enabled for division), this functionality is redundant with the default 12210 // combiner logic (once the division -> reciprocal/multiply transformation 12211 // has taken place). As a result, this matters more for older cores than for 12212 // newer ones. 12213 12214 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 12215 // reciprocal if there are two or more FDIVs (for embedded cores with only 12216 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 12217 switch (Subtarget.getCPUDirective()) { 12218 default: 12219 return 3; 12220 case PPC::DIR_440: 12221 case PPC::DIR_A2: 12222 case PPC::DIR_E500: 12223 case PPC::DIR_E500mc: 12224 case PPC::DIR_E5500: 12225 return 2; 12226 } 12227 } 12228 12229 // isConsecutiveLSLoc needs to work even if all adds have not yet been 12230 // collapsed, and so we need to look through chains of them. 12231 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 12232 int64_t& Offset, SelectionDAG &DAG) { 12233 if (DAG.isBaseWithConstantOffset(Loc)) { 12234 Base = Loc.getOperand(0); 12235 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 12236 12237 // The base might itself be a base plus an offset, and if so, accumulate 12238 // that as well. 12239 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 12240 } 12241 } 12242 12243 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 12244 unsigned Bytes, int Dist, 12245 SelectionDAG &DAG) { 12246 if (VT.getSizeInBits() / 8 != Bytes) 12247 return false; 12248 12249 SDValue BaseLoc = Base->getBasePtr(); 12250 if (Loc.getOpcode() == ISD::FrameIndex) { 12251 if (BaseLoc.getOpcode() != ISD::FrameIndex) 12252 return false; 12253 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 12254 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 12255 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 12256 int FS = MFI.getObjectSize(FI); 12257 int BFS = MFI.getObjectSize(BFI); 12258 if (FS != BFS || FS != (int)Bytes) return false; 12259 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 12260 } 12261 12262 SDValue Base1 = Loc, Base2 = BaseLoc; 12263 int64_t Offset1 = 0, Offset2 = 0; 12264 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 12265 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 12266 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 12267 return true; 12268 12269 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12270 const GlobalValue *GV1 = nullptr; 12271 const GlobalValue *GV2 = nullptr; 12272 Offset1 = 0; 12273 Offset2 = 0; 12274 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 12275 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 12276 if (isGA1 && isGA2 && GV1 == GV2) 12277 return Offset1 == (Offset2 + Dist*Bytes); 12278 return false; 12279 } 12280 12281 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 12282 // not enforce equality of the chain operands. 12283 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 12284 unsigned Bytes, int Dist, 12285 SelectionDAG &DAG) { 12286 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 12287 EVT VT = LS->getMemoryVT(); 12288 SDValue Loc = LS->getBasePtr(); 12289 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 12290 } 12291 12292 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 12293 EVT VT; 12294 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12295 default: return false; 12296 case Intrinsic::ppc_altivec_lvx: 12297 case Intrinsic::ppc_altivec_lvxl: 12298 case Intrinsic::ppc_vsx_lxvw4x: 12299 case Intrinsic::ppc_vsx_lxvw4x_be: 12300 VT = MVT::v4i32; 12301 break; 12302 case Intrinsic::ppc_vsx_lxvd2x: 12303 case Intrinsic::ppc_vsx_lxvd2x_be: 12304 VT = MVT::v2f64; 12305 break; 12306 case Intrinsic::ppc_altivec_lvebx: 12307 VT = MVT::i8; 12308 break; 12309 case Intrinsic::ppc_altivec_lvehx: 12310 VT = MVT::i16; 12311 break; 12312 case Intrinsic::ppc_altivec_lvewx: 12313 VT = MVT::i32; 12314 break; 12315 } 12316 12317 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 12318 } 12319 12320 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 12321 EVT VT; 12322 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12323 default: return false; 12324 case Intrinsic::ppc_altivec_stvx: 12325 case Intrinsic::ppc_altivec_stvxl: 12326 case Intrinsic::ppc_vsx_stxvw4x: 12327 VT = MVT::v4i32; 12328 break; 12329 case Intrinsic::ppc_vsx_stxvd2x: 12330 VT = MVT::v2f64; 12331 break; 12332 case Intrinsic::ppc_vsx_stxvw4x_be: 12333 VT = MVT::v4i32; 12334 break; 12335 case Intrinsic::ppc_vsx_stxvd2x_be: 12336 VT = MVT::v2f64; 12337 break; 12338 case Intrinsic::ppc_altivec_stvebx: 12339 VT = MVT::i8; 12340 break; 12341 case Intrinsic::ppc_altivec_stvehx: 12342 VT = MVT::i16; 12343 break; 12344 case Intrinsic::ppc_altivec_stvewx: 12345 VT = MVT::i32; 12346 break; 12347 } 12348 12349 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 12350 } 12351 12352 return false; 12353 } 12354 12355 // Return true is there is a nearyby consecutive load to the one provided 12356 // (regardless of alignment). We search up and down the chain, looking though 12357 // token factors and other loads (but nothing else). As a result, a true result 12358 // indicates that it is safe to create a new consecutive load adjacent to the 12359 // load provided. 12360 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 12361 SDValue Chain = LD->getChain(); 12362 EVT VT = LD->getMemoryVT(); 12363 12364 SmallSet<SDNode *, 16> LoadRoots; 12365 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 12366 SmallSet<SDNode *, 16> Visited; 12367 12368 // First, search up the chain, branching to follow all token-factor operands. 12369 // If we find a consecutive load, then we're done, otherwise, record all 12370 // nodes just above the top-level loads and token factors. 12371 while (!Queue.empty()) { 12372 SDNode *ChainNext = Queue.pop_back_val(); 12373 if (!Visited.insert(ChainNext).second) 12374 continue; 12375 12376 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 12377 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12378 return true; 12379 12380 if (!Visited.count(ChainLD->getChain().getNode())) 12381 Queue.push_back(ChainLD->getChain().getNode()); 12382 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 12383 for (const SDUse &O : ChainNext->ops()) 12384 if (!Visited.count(O.getNode())) 12385 Queue.push_back(O.getNode()); 12386 } else 12387 LoadRoots.insert(ChainNext); 12388 } 12389 12390 // Second, search down the chain, starting from the top-level nodes recorded 12391 // in the first phase. These top-level nodes are the nodes just above all 12392 // loads and token factors. Starting with their uses, recursively look though 12393 // all loads (just the chain uses) and token factors to find a consecutive 12394 // load. 12395 Visited.clear(); 12396 Queue.clear(); 12397 12398 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 12399 IE = LoadRoots.end(); I != IE; ++I) { 12400 Queue.push_back(*I); 12401 12402 while (!Queue.empty()) { 12403 SDNode *LoadRoot = Queue.pop_back_val(); 12404 if (!Visited.insert(LoadRoot).second) 12405 continue; 12406 12407 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 12408 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12409 return true; 12410 12411 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 12412 UE = LoadRoot->use_end(); UI != UE; ++UI) 12413 if (((isa<MemSDNode>(*UI) && 12414 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 12415 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 12416 Queue.push_back(*UI); 12417 } 12418 } 12419 12420 return false; 12421 } 12422 12423 /// This function is called when we have proved that a SETCC node can be replaced 12424 /// by subtraction (and other supporting instructions) so that the result of 12425 /// comparison is kept in a GPR instead of CR. This function is purely for 12426 /// codegen purposes and has some flags to guide the codegen process. 12427 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 12428 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 12429 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12430 12431 // Zero extend the operands to the largest legal integer. Originally, they 12432 // must be of a strictly smaller size. 12433 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 12434 DAG.getConstant(Size, DL, MVT::i32)); 12435 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 12436 DAG.getConstant(Size, DL, MVT::i32)); 12437 12438 // Swap if needed. Depends on the condition code. 12439 if (Swap) 12440 std::swap(Op0, Op1); 12441 12442 // Subtract extended integers. 12443 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 12444 12445 // Move the sign bit to the least significant position and zero out the rest. 12446 // Now the least significant bit carries the result of original comparison. 12447 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 12448 DAG.getConstant(Size - 1, DL, MVT::i32)); 12449 auto Final = Shifted; 12450 12451 // Complement the result if needed. Based on the condition code. 12452 if (Complement) 12453 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 12454 DAG.getConstant(1, DL, MVT::i64)); 12455 12456 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 12457 } 12458 12459 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 12460 DAGCombinerInfo &DCI) const { 12461 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12462 12463 SelectionDAG &DAG = DCI.DAG; 12464 SDLoc DL(N); 12465 12466 // Size of integers being compared has a critical role in the following 12467 // analysis, so we prefer to do this when all types are legal. 12468 if (!DCI.isAfterLegalizeDAG()) 12469 return SDValue(); 12470 12471 // If all users of SETCC extend its value to a legal integer type 12472 // then we replace SETCC with a subtraction 12473 for (SDNode::use_iterator UI = N->use_begin(), 12474 UE = N->use_end(); UI != UE; ++UI) { 12475 if (UI->getOpcode() != ISD::ZERO_EXTEND) 12476 return SDValue(); 12477 } 12478 12479 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 12480 auto OpSize = N->getOperand(0).getValueSizeInBits(); 12481 12482 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 12483 12484 if (OpSize < Size) { 12485 switch (CC) { 12486 default: break; 12487 case ISD::SETULT: 12488 return generateEquivalentSub(N, Size, false, false, DL, DAG); 12489 case ISD::SETULE: 12490 return generateEquivalentSub(N, Size, true, true, DL, DAG); 12491 case ISD::SETUGT: 12492 return generateEquivalentSub(N, Size, false, true, DL, DAG); 12493 case ISD::SETUGE: 12494 return generateEquivalentSub(N, Size, true, false, DL, DAG); 12495 } 12496 } 12497 12498 return SDValue(); 12499 } 12500 12501 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 12502 DAGCombinerInfo &DCI) const { 12503 SelectionDAG &DAG = DCI.DAG; 12504 SDLoc dl(N); 12505 12506 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 12507 // If we're tracking CR bits, we need to be careful that we don't have: 12508 // trunc(binary-ops(zext(x), zext(y))) 12509 // or 12510 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 12511 // such that we're unnecessarily moving things into GPRs when it would be 12512 // better to keep them in CR bits. 12513 12514 // Note that trunc here can be an actual i1 trunc, or can be the effective 12515 // truncation that comes from a setcc or select_cc. 12516 if (N->getOpcode() == ISD::TRUNCATE && 12517 N->getValueType(0) != MVT::i1) 12518 return SDValue(); 12519 12520 if (N->getOperand(0).getValueType() != MVT::i32 && 12521 N->getOperand(0).getValueType() != MVT::i64) 12522 return SDValue(); 12523 12524 if (N->getOpcode() == ISD::SETCC || 12525 N->getOpcode() == ISD::SELECT_CC) { 12526 // If we're looking at a comparison, then we need to make sure that the 12527 // high bits (all except for the first) don't matter the result. 12528 ISD::CondCode CC = 12529 cast<CondCodeSDNode>(N->getOperand( 12530 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 12531 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 12532 12533 if (ISD::isSignedIntSetCC(CC)) { 12534 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 12535 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 12536 return SDValue(); 12537 } else if (ISD::isUnsignedIntSetCC(CC)) { 12538 if (!DAG.MaskedValueIsZero(N->getOperand(0), 12539 APInt::getHighBitsSet(OpBits, OpBits-1)) || 12540 !DAG.MaskedValueIsZero(N->getOperand(1), 12541 APInt::getHighBitsSet(OpBits, OpBits-1))) 12542 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 12543 : SDValue()); 12544 } else { 12545 // This is neither a signed nor an unsigned comparison, just make sure 12546 // that the high bits are equal. 12547 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0)); 12548 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1)); 12549 12550 // We don't really care about what is known about the first bit (if 12551 // anything), so pretend that it is known zero for both to ensure they can 12552 // be compared as constants. 12553 Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0); 12554 Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0); 12555 12556 if (!Op1Known.isConstant() || !Op2Known.isConstant() || 12557 Op1Known.getConstant() != Op2Known.getConstant()) 12558 return SDValue(); 12559 } 12560 } 12561 12562 // We now know that the higher-order bits are irrelevant, we just need to 12563 // make sure that all of the intermediate operations are bit operations, and 12564 // all inputs are extensions. 12565 if (N->getOperand(0).getOpcode() != ISD::AND && 12566 N->getOperand(0).getOpcode() != ISD::OR && 12567 N->getOperand(0).getOpcode() != ISD::XOR && 12568 N->getOperand(0).getOpcode() != ISD::SELECT && 12569 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 12570 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 12571 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 12572 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 12573 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 12574 return SDValue(); 12575 12576 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 12577 N->getOperand(1).getOpcode() != ISD::AND && 12578 N->getOperand(1).getOpcode() != ISD::OR && 12579 N->getOperand(1).getOpcode() != ISD::XOR && 12580 N->getOperand(1).getOpcode() != ISD::SELECT && 12581 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 12582 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 12583 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 12584 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 12585 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 12586 return SDValue(); 12587 12588 SmallVector<SDValue, 4> Inputs; 12589 SmallVector<SDValue, 8> BinOps, PromOps; 12590 SmallPtrSet<SDNode *, 16> Visited; 12591 12592 for (unsigned i = 0; i < 2; ++i) { 12593 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12594 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12595 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12596 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12597 isa<ConstantSDNode>(N->getOperand(i))) 12598 Inputs.push_back(N->getOperand(i)); 12599 else 12600 BinOps.push_back(N->getOperand(i)); 12601 12602 if (N->getOpcode() == ISD::TRUNCATE) 12603 break; 12604 } 12605 12606 // Visit all inputs, collect all binary operations (and, or, xor and 12607 // select) that are all fed by extensions. 12608 while (!BinOps.empty()) { 12609 SDValue BinOp = BinOps.pop_back_val(); 12610 12611 if (!Visited.insert(BinOp.getNode()).second) 12612 continue; 12613 12614 PromOps.push_back(BinOp); 12615 12616 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12617 // The condition of the select is not promoted. 12618 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12619 continue; 12620 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12621 continue; 12622 12623 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12624 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12625 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12626 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12627 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12628 Inputs.push_back(BinOp.getOperand(i)); 12629 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12630 BinOp.getOperand(i).getOpcode() == ISD::OR || 12631 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12632 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12633 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 12634 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12635 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12636 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12637 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 12638 BinOps.push_back(BinOp.getOperand(i)); 12639 } else { 12640 // We have an input that is not an extension or another binary 12641 // operation; we'll abort this transformation. 12642 return SDValue(); 12643 } 12644 } 12645 } 12646 12647 // Make sure that this is a self-contained cluster of operations (which 12648 // is not quite the same thing as saying that everything has only one 12649 // use). 12650 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12651 if (isa<ConstantSDNode>(Inputs[i])) 12652 continue; 12653 12654 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12655 UE = Inputs[i].getNode()->use_end(); 12656 UI != UE; ++UI) { 12657 SDNode *User = *UI; 12658 if (User != N && !Visited.count(User)) 12659 return SDValue(); 12660 12661 // Make sure that we're not going to promote the non-output-value 12662 // operand(s) or SELECT or SELECT_CC. 12663 // FIXME: Although we could sometimes handle this, and it does occur in 12664 // practice that one of the condition inputs to the select is also one of 12665 // the outputs, we currently can't deal with this. 12666 if (User->getOpcode() == ISD::SELECT) { 12667 if (User->getOperand(0) == Inputs[i]) 12668 return SDValue(); 12669 } else if (User->getOpcode() == ISD::SELECT_CC) { 12670 if (User->getOperand(0) == Inputs[i] || 12671 User->getOperand(1) == Inputs[i]) 12672 return SDValue(); 12673 } 12674 } 12675 } 12676 12677 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12678 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12679 UE = PromOps[i].getNode()->use_end(); 12680 UI != UE; ++UI) { 12681 SDNode *User = *UI; 12682 if (User != N && !Visited.count(User)) 12683 return SDValue(); 12684 12685 // Make sure that we're not going to promote the non-output-value 12686 // operand(s) or SELECT or SELECT_CC. 12687 // FIXME: Although we could sometimes handle this, and it does occur in 12688 // practice that one of the condition inputs to the select is also one of 12689 // the outputs, we currently can't deal with this. 12690 if (User->getOpcode() == ISD::SELECT) { 12691 if (User->getOperand(0) == PromOps[i]) 12692 return SDValue(); 12693 } else if (User->getOpcode() == ISD::SELECT_CC) { 12694 if (User->getOperand(0) == PromOps[i] || 12695 User->getOperand(1) == PromOps[i]) 12696 return SDValue(); 12697 } 12698 } 12699 } 12700 12701 // Replace all inputs with the extension operand. 12702 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12703 // Constants may have users outside the cluster of to-be-promoted nodes, 12704 // and so we need to replace those as we do the promotions. 12705 if (isa<ConstantSDNode>(Inputs[i])) 12706 continue; 12707 else 12708 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 12709 } 12710 12711 std::list<HandleSDNode> PromOpHandles; 12712 for (auto &PromOp : PromOps) 12713 PromOpHandles.emplace_back(PromOp); 12714 12715 // Replace all operations (these are all the same, but have a different 12716 // (i1) return type). DAG.getNode will validate that the types of 12717 // a binary operator match, so go through the list in reverse so that 12718 // we've likely promoted both operands first. Any intermediate truncations or 12719 // extensions disappear. 12720 while (!PromOpHandles.empty()) { 12721 SDValue PromOp = PromOpHandles.back().getValue(); 12722 PromOpHandles.pop_back(); 12723 12724 if (PromOp.getOpcode() == ISD::TRUNCATE || 12725 PromOp.getOpcode() == ISD::SIGN_EXTEND || 12726 PromOp.getOpcode() == ISD::ZERO_EXTEND || 12727 PromOp.getOpcode() == ISD::ANY_EXTEND) { 12728 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 12729 PromOp.getOperand(0).getValueType() != MVT::i1) { 12730 // The operand is not yet ready (see comment below). 12731 PromOpHandles.emplace_front(PromOp); 12732 continue; 12733 } 12734 12735 SDValue RepValue = PromOp.getOperand(0); 12736 if (isa<ConstantSDNode>(RepValue)) 12737 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 12738 12739 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 12740 continue; 12741 } 12742 12743 unsigned C; 12744 switch (PromOp.getOpcode()) { 12745 default: C = 0; break; 12746 case ISD::SELECT: C = 1; break; 12747 case ISD::SELECT_CC: C = 2; break; 12748 } 12749 12750 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12751 PromOp.getOperand(C).getValueType() != MVT::i1) || 12752 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12753 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 12754 // The to-be-promoted operands of this node have not yet been 12755 // promoted (this should be rare because we're going through the 12756 // list backward, but if one of the operands has several users in 12757 // this cluster of to-be-promoted nodes, it is possible). 12758 PromOpHandles.emplace_front(PromOp); 12759 continue; 12760 } 12761 12762 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12763 PromOp.getNode()->op_end()); 12764 12765 // If there are any constant inputs, make sure they're replaced now. 12766 for (unsigned i = 0; i < 2; ++i) 12767 if (isa<ConstantSDNode>(Ops[C+i])) 12768 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 12769 12770 DAG.ReplaceAllUsesOfValueWith(PromOp, 12771 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 12772 } 12773 12774 // Now we're left with the initial truncation itself. 12775 if (N->getOpcode() == ISD::TRUNCATE) 12776 return N->getOperand(0); 12777 12778 // Otherwise, this is a comparison. The operands to be compared have just 12779 // changed type (to i1), but everything else is the same. 12780 return SDValue(N, 0); 12781 } 12782 12783 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 12784 DAGCombinerInfo &DCI) const { 12785 SelectionDAG &DAG = DCI.DAG; 12786 SDLoc dl(N); 12787 12788 // If we're tracking CR bits, we need to be careful that we don't have: 12789 // zext(binary-ops(trunc(x), trunc(y))) 12790 // or 12791 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 12792 // such that we're unnecessarily moving things into CR bits that can more 12793 // efficiently stay in GPRs. Note that if we're not certain that the high 12794 // bits are set as required by the final extension, we still may need to do 12795 // some masking to get the proper behavior. 12796 12797 // This same functionality is important on PPC64 when dealing with 12798 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 12799 // the return values of functions. Because it is so similar, it is handled 12800 // here as well. 12801 12802 if (N->getValueType(0) != MVT::i32 && 12803 N->getValueType(0) != MVT::i64) 12804 return SDValue(); 12805 12806 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 12807 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 12808 return SDValue(); 12809 12810 if (N->getOperand(0).getOpcode() != ISD::AND && 12811 N->getOperand(0).getOpcode() != ISD::OR && 12812 N->getOperand(0).getOpcode() != ISD::XOR && 12813 N->getOperand(0).getOpcode() != ISD::SELECT && 12814 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 12815 return SDValue(); 12816 12817 SmallVector<SDValue, 4> Inputs; 12818 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 12819 SmallPtrSet<SDNode *, 16> Visited; 12820 12821 // Visit all inputs, collect all binary operations (and, or, xor and 12822 // select) that are all fed by truncations. 12823 while (!BinOps.empty()) { 12824 SDValue BinOp = BinOps.pop_back_val(); 12825 12826 if (!Visited.insert(BinOp.getNode()).second) 12827 continue; 12828 12829 PromOps.push_back(BinOp); 12830 12831 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12832 // The condition of the select is not promoted. 12833 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12834 continue; 12835 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12836 continue; 12837 12838 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12839 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12840 Inputs.push_back(BinOp.getOperand(i)); 12841 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12842 BinOp.getOperand(i).getOpcode() == ISD::OR || 12843 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12844 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12845 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 12846 BinOps.push_back(BinOp.getOperand(i)); 12847 } else { 12848 // We have an input that is not a truncation or another binary 12849 // operation; we'll abort this transformation. 12850 return SDValue(); 12851 } 12852 } 12853 } 12854 12855 // The operands of a select that must be truncated when the select is 12856 // promoted because the operand is actually part of the to-be-promoted set. 12857 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 12858 12859 // Make sure that this is a self-contained cluster of operations (which 12860 // is not quite the same thing as saying that everything has only one 12861 // use). 12862 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12863 if (isa<ConstantSDNode>(Inputs[i])) 12864 continue; 12865 12866 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12867 UE = Inputs[i].getNode()->use_end(); 12868 UI != UE; ++UI) { 12869 SDNode *User = *UI; 12870 if (User != N && !Visited.count(User)) 12871 return SDValue(); 12872 12873 // If we're going to promote the non-output-value operand(s) or SELECT or 12874 // SELECT_CC, record them for truncation. 12875 if (User->getOpcode() == ISD::SELECT) { 12876 if (User->getOperand(0) == Inputs[i]) 12877 SelectTruncOp[0].insert(std::make_pair(User, 12878 User->getOperand(0).getValueType())); 12879 } else if (User->getOpcode() == ISD::SELECT_CC) { 12880 if (User->getOperand(0) == Inputs[i]) 12881 SelectTruncOp[0].insert(std::make_pair(User, 12882 User->getOperand(0).getValueType())); 12883 if (User->getOperand(1) == Inputs[i]) 12884 SelectTruncOp[1].insert(std::make_pair(User, 12885 User->getOperand(1).getValueType())); 12886 } 12887 } 12888 } 12889 12890 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12891 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12892 UE = PromOps[i].getNode()->use_end(); 12893 UI != UE; ++UI) { 12894 SDNode *User = *UI; 12895 if (User != N && !Visited.count(User)) 12896 return SDValue(); 12897 12898 // If we're going to promote the non-output-value operand(s) or SELECT or 12899 // SELECT_CC, record them for truncation. 12900 if (User->getOpcode() == ISD::SELECT) { 12901 if (User->getOperand(0) == PromOps[i]) 12902 SelectTruncOp[0].insert(std::make_pair(User, 12903 User->getOperand(0).getValueType())); 12904 } else if (User->getOpcode() == ISD::SELECT_CC) { 12905 if (User->getOperand(0) == PromOps[i]) 12906 SelectTruncOp[0].insert(std::make_pair(User, 12907 User->getOperand(0).getValueType())); 12908 if (User->getOperand(1) == PromOps[i]) 12909 SelectTruncOp[1].insert(std::make_pair(User, 12910 User->getOperand(1).getValueType())); 12911 } 12912 } 12913 } 12914 12915 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 12916 bool ReallyNeedsExt = false; 12917 if (N->getOpcode() != ISD::ANY_EXTEND) { 12918 // If all of the inputs are not already sign/zero extended, then 12919 // we'll still need to do that at the end. 12920 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12921 if (isa<ConstantSDNode>(Inputs[i])) 12922 continue; 12923 12924 unsigned OpBits = 12925 Inputs[i].getOperand(0).getValueSizeInBits(); 12926 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 12927 12928 if ((N->getOpcode() == ISD::ZERO_EXTEND && 12929 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 12930 APInt::getHighBitsSet(OpBits, 12931 OpBits-PromBits))) || 12932 (N->getOpcode() == ISD::SIGN_EXTEND && 12933 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 12934 (OpBits-(PromBits-1)))) { 12935 ReallyNeedsExt = true; 12936 break; 12937 } 12938 } 12939 } 12940 12941 // Replace all inputs, either with the truncation operand, or a 12942 // truncation or extension to the final output type. 12943 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12944 // Constant inputs need to be replaced with the to-be-promoted nodes that 12945 // use them because they might have users outside of the cluster of 12946 // promoted nodes. 12947 if (isa<ConstantSDNode>(Inputs[i])) 12948 continue; 12949 12950 SDValue InSrc = Inputs[i].getOperand(0); 12951 if (Inputs[i].getValueType() == N->getValueType(0)) 12952 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 12953 else if (N->getOpcode() == ISD::SIGN_EXTEND) 12954 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12955 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 12956 else if (N->getOpcode() == ISD::ZERO_EXTEND) 12957 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12958 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 12959 else 12960 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12961 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 12962 } 12963 12964 std::list<HandleSDNode> PromOpHandles; 12965 for (auto &PromOp : PromOps) 12966 PromOpHandles.emplace_back(PromOp); 12967 12968 // Replace all operations (these are all the same, but have a different 12969 // (promoted) return type). DAG.getNode will validate that the types of 12970 // a binary operator match, so go through the list in reverse so that 12971 // we've likely promoted both operands first. 12972 while (!PromOpHandles.empty()) { 12973 SDValue PromOp = PromOpHandles.back().getValue(); 12974 PromOpHandles.pop_back(); 12975 12976 unsigned C; 12977 switch (PromOp.getOpcode()) { 12978 default: C = 0; break; 12979 case ISD::SELECT: C = 1; break; 12980 case ISD::SELECT_CC: C = 2; break; 12981 } 12982 12983 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12984 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 12985 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12986 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 12987 // The to-be-promoted operands of this node have not yet been 12988 // promoted (this should be rare because we're going through the 12989 // list backward, but if one of the operands has several users in 12990 // this cluster of to-be-promoted nodes, it is possible). 12991 PromOpHandles.emplace_front(PromOp); 12992 continue; 12993 } 12994 12995 // For SELECT and SELECT_CC nodes, we do a similar check for any 12996 // to-be-promoted comparison inputs. 12997 if (PromOp.getOpcode() == ISD::SELECT || 12998 PromOp.getOpcode() == ISD::SELECT_CC) { 12999 if ((SelectTruncOp[0].count(PromOp.getNode()) && 13000 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 13001 (SelectTruncOp[1].count(PromOp.getNode()) && 13002 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 13003 PromOpHandles.emplace_front(PromOp); 13004 continue; 13005 } 13006 } 13007 13008 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 13009 PromOp.getNode()->op_end()); 13010 13011 // If this node has constant inputs, then they'll need to be promoted here. 13012 for (unsigned i = 0; i < 2; ++i) { 13013 if (!isa<ConstantSDNode>(Ops[C+i])) 13014 continue; 13015 if (Ops[C+i].getValueType() == N->getValueType(0)) 13016 continue; 13017 13018 if (N->getOpcode() == ISD::SIGN_EXTEND) 13019 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 13020 else if (N->getOpcode() == ISD::ZERO_EXTEND) 13021 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 13022 else 13023 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 13024 } 13025 13026 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 13027 // truncate them again to the original value type. 13028 if (PromOp.getOpcode() == ISD::SELECT || 13029 PromOp.getOpcode() == ISD::SELECT_CC) { 13030 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 13031 if (SI0 != SelectTruncOp[0].end()) 13032 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 13033 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 13034 if (SI1 != SelectTruncOp[1].end()) 13035 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 13036 } 13037 13038 DAG.ReplaceAllUsesOfValueWith(PromOp, 13039 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 13040 } 13041 13042 // Now we're left with the initial extension itself. 13043 if (!ReallyNeedsExt) 13044 return N->getOperand(0); 13045 13046 // To zero extend, just mask off everything except for the first bit (in the 13047 // i1 case). 13048 if (N->getOpcode() == ISD::ZERO_EXTEND) 13049 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 13050 DAG.getConstant(APInt::getLowBitsSet( 13051 N->getValueSizeInBits(0), PromBits), 13052 dl, N->getValueType(0))); 13053 13054 assert(N->getOpcode() == ISD::SIGN_EXTEND && 13055 "Invalid extension type"); 13056 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 13057 SDValue ShiftCst = 13058 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 13059 return DAG.getNode( 13060 ISD::SRA, dl, N->getValueType(0), 13061 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 13062 ShiftCst); 13063 } 13064 13065 SDValue PPCTargetLowering::combineSetCC(SDNode *N, 13066 DAGCombinerInfo &DCI) const { 13067 assert(N->getOpcode() == ISD::SETCC && 13068 "Should be called with a SETCC node"); 13069 13070 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 13071 if (CC == ISD::SETNE || CC == ISD::SETEQ) { 13072 SDValue LHS = N->getOperand(0); 13073 SDValue RHS = N->getOperand(1); 13074 13075 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS. 13076 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && 13077 LHS.hasOneUse()) 13078 std::swap(LHS, RHS); 13079 13080 // x == 0-y --> x+y == 0 13081 // x != 0-y --> x+y != 0 13082 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && 13083 RHS.hasOneUse()) { 13084 SDLoc DL(N); 13085 SelectionDAG &DAG = DCI.DAG; 13086 EVT VT = N->getValueType(0); 13087 EVT OpVT = LHS.getValueType(); 13088 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1)); 13089 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); 13090 } 13091 } 13092 13093 return DAGCombineTruncBoolExt(N, DCI); 13094 } 13095 13096 // Is this an extending load from an f32 to an f64? 13097 static bool isFPExtLoad(SDValue Op) { 13098 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 13099 return LD->getExtensionType() == ISD::EXTLOAD && 13100 Op.getValueType() == MVT::f64; 13101 return false; 13102 } 13103 13104 /// Reduces the number of fp-to-int conversion when building a vector. 13105 /// 13106 /// If this vector is built out of floating to integer conversions, 13107 /// transform it to a vector built out of floating point values followed by a 13108 /// single floating to integer conversion of the vector. 13109 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 13110 /// becomes (fptosi (build_vector ($A, $B, ...))) 13111 SDValue PPCTargetLowering:: 13112 combineElementTruncationToVectorTruncation(SDNode *N, 13113 DAGCombinerInfo &DCI) const { 13114 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13115 "Should be called with a BUILD_VECTOR node"); 13116 13117 SelectionDAG &DAG = DCI.DAG; 13118 SDLoc dl(N); 13119 13120 SDValue FirstInput = N->getOperand(0); 13121 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 13122 "The input operand must be an fp-to-int conversion."); 13123 13124 // This combine happens after legalization so the fp_to_[su]i nodes are 13125 // already converted to PPCSISD nodes. 13126 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 13127 if (FirstConversion == PPCISD::FCTIDZ || 13128 FirstConversion == PPCISD::FCTIDUZ || 13129 FirstConversion == PPCISD::FCTIWZ || 13130 FirstConversion == PPCISD::FCTIWUZ) { 13131 bool IsSplat = true; 13132 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 13133 FirstConversion == PPCISD::FCTIWUZ; 13134 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 13135 SmallVector<SDValue, 4> Ops; 13136 EVT TargetVT = N->getValueType(0); 13137 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 13138 SDValue NextOp = N->getOperand(i); 13139 if (NextOp.getOpcode() != PPCISD::MFVSR) 13140 return SDValue(); 13141 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 13142 if (NextConversion != FirstConversion) 13143 return SDValue(); 13144 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 13145 // This is not valid if the input was originally double precision. It is 13146 // also not profitable to do unless this is an extending load in which 13147 // case doing this combine will allow us to combine consecutive loads. 13148 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 13149 return SDValue(); 13150 if (N->getOperand(i) != FirstInput) 13151 IsSplat = false; 13152 } 13153 13154 // If this is a splat, we leave it as-is since there will be only a single 13155 // fp-to-int conversion followed by a splat of the integer. This is better 13156 // for 32-bit and smaller ints and neutral for 64-bit ints. 13157 if (IsSplat) 13158 return SDValue(); 13159 13160 // Now that we know we have the right type of node, get its operands 13161 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 13162 SDValue In = N->getOperand(i).getOperand(0); 13163 if (Is32Bit) { 13164 // For 32-bit values, we need to add an FP_ROUND node (if we made it 13165 // here, we know that all inputs are extending loads so this is safe). 13166 if (In.isUndef()) 13167 Ops.push_back(DAG.getUNDEF(SrcVT)); 13168 else { 13169 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 13170 MVT::f32, In.getOperand(0), 13171 DAG.getIntPtrConstant(1, dl)); 13172 Ops.push_back(Trunc); 13173 } 13174 } else 13175 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 13176 } 13177 13178 unsigned Opcode; 13179 if (FirstConversion == PPCISD::FCTIDZ || 13180 FirstConversion == PPCISD::FCTIWZ) 13181 Opcode = ISD::FP_TO_SINT; 13182 else 13183 Opcode = ISD::FP_TO_UINT; 13184 13185 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 13186 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 13187 return DAG.getNode(Opcode, dl, TargetVT, BV); 13188 } 13189 return SDValue(); 13190 } 13191 13192 /// Reduce the number of loads when building a vector. 13193 /// 13194 /// Building a vector out of multiple loads can be converted to a load 13195 /// of the vector type if the loads are consecutive. If the loads are 13196 /// consecutive but in descending order, a shuffle is added at the end 13197 /// to reorder the vector. 13198 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 13199 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13200 "Should be called with a BUILD_VECTOR node"); 13201 13202 SDLoc dl(N); 13203 13204 // Return early for non byte-sized type, as they can't be consecutive. 13205 if (!N->getValueType(0).getVectorElementType().isByteSized()) 13206 return SDValue(); 13207 13208 bool InputsAreConsecutiveLoads = true; 13209 bool InputsAreReverseConsecutive = true; 13210 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize(); 13211 SDValue FirstInput = N->getOperand(0); 13212 bool IsRoundOfExtLoad = false; 13213 13214 if (FirstInput.getOpcode() == ISD::FP_ROUND && 13215 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 13216 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 13217 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 13218 } 13219 // Not a build vector of (possibly fp_rounded) loads. 13220 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) || 13221 N->getNumOperands() == 1) 13222 return SDValue(); 13223 13224 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 13225 // If any inputs are fp_round(extload), they all must be. 13226 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 13227 return SDValue(); 13228 13229 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 13230 N->getOperand(i); 13231 if (NextInput.getOpcode() != ISD::LOAD) 13232 return SDValue(); 13233 13234 SDValue PreviousInput = 13235 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 13236 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 13237 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 13238 13239 // If any inputs are fp_round(extload), they all must be. 13240 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 13241 return SDValue(); 13242 13243 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 13244 InputsAreConsecutiveLoads = false; 13245 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 13246 InputsAreReverseConsecutive = false; 13247 13248 // Exit early if the loads are neither consecutive nor reverse consecutive. 13249 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 13250 return SDValue(); 13251 } 13252 13253 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 13254 "The loads cannot be both consecutive and reverse consecutive."); 13255 13256 SDValue FirstLoadOp = 13257 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 13258 SDValue LastLoadOp = 13259 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 13260 N->getOperand(N->getNumOperands()-1); 13261 13262 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 13263 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 13264 if (InputsAreConsecutiveLoads) { 13265 assert(LD1 && "Input needs to be a LoadSDNode."); 13266 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 13267 LD1->getBasePtr(), LD1->getPointerInfo(), 13268 LD1->getAlignment()); 13269 } 13270 if (InputsAreReverseConsecutive) { 13271 assert(LDL && "Input needs to be a LoadSDNode."); 13272 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 13273 LDL->getBasePtr(), LDL->getPointerInfo(), 13274 LDL->getAlignment()); 13275 SmallVector<int, 16> Ops; 13276 for (int i = N->getNumOperands() - 1; i >= 0; i--) 13277 Ops.push_back(i); 13278 13279 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 13280 DAG.getUNDEF(N->getValueType(0)), Ops); 13281 } 13282 return SDValue(); 13283 } 13284 13285 // This function adds the required vector_shuffle needed to get 13286 // the elements of the vector extract in the correct position 13287 // as specified by the CorrectElems encoding. 13288 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 13289 SDValue Input, uint64_t Elems, 13290 uint64_t CorrectElems) { 13291 SDLoc dl(N); 13292 13293 unsigned NumElems = Input.getValueType().getVectorNumElements(); 13294 SmallVector<int, 16> ShuffleMask(NumElems, -1); 13295 13296 // Knowing the element indices being extracted from the original 13297 // vector and the order in which they're being inserted, just put 13298 // them at element indices required for the instruction. 13299 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13300 if (DAG.getDataLayout().isLittleEndian()) 13301 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 13302 else 13303 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 13304 CorrectElems = CorrectElems >> 8; 13305 Elems = Elems >> 8; 13306 } 13307 13308 SDValue Shuffle = 13309 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 13310 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 13311 13312 EVT VT = N->getValueType(0); 13313 SDValue Conv = DAG.getBitcast(VT, Shuffle); 13314 13315 EVT ExtVT = EVT::getVectorVT(*DAG.getContext(), 13316 Input.getValueType().getVectorElementType(), 13317 VT.getVectorNumElements()); 13318 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv, 13319 DAG.getValueType(ExtVT)); 13320 } 13321 13322 // Look for build vector patterns where input operands come from sign 13323 // extended vector_extract elements of specific indices. If the correct indices 13324 // aren't used, add a vector shuffle to fix up the indices and create 13325 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions 13326 // during instruction selection. 13327 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 13328 // This array encodes the indices that the vector sign extend instructions 13329 // extract from when extending from one type to another for both BE and LE. 13330 // The right nibble of each byte corresponds to the LE incides. 13331 // and the left nibble of each byte corresponds to the BE incides. 13332 // For example: 0x3074B8FC byte->word 13333 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 13334 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 13335 // For example: 0x000070F8 byte->double word 13336 // For LE: the allowed indices are: 0x0,0x8 13337 // For BE: the allowed indices are: 0x7,0xF 13338 uint64_t TargetElems[] = { 13339 0x3074B8FC, // b->w 13340 0x000070F8, // b->d 13341 0x10325476, // h->w 13342 0x00003074, // h->d 13343 0x00001032, // w->d 13344 }; 13345 13346 uint64_t Elems = 0; 13347 int Index; 13348 SDValue Input; 13349 13350 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 13351 if (!Op) 13352 return false; 13353 if (Op.getOpcode() != ISD::SIGN_EXTEND && 13354 Op.getOpcode() != ISD::SIGN_EXTEND_INREG) 13355 return false; 13356 13357 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value 13358 // of the right width. 13359 SDValue Extract = Op.getOperand(0); 13360 if (Extract.getOpcode() == ISD::ANY_EXTEND) 13361 Extract = Extract.getOperand(0); 13362 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13363 return false; 13364 13365 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 13366 if (!ExtOp) 13367 return false; 13368 13369 Index = ExtOp->getZExtValue(); 13370 if (Input && Input != Extract.getOperand(0)) 13371 return false; 13372 13373 if (!Input) 13374 Input = Extract.getOperand(0); 13375 13376 Elems = Elems << 8; 13377 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 13378 Elems |= Index; 13379 13380 return true; 13381 }; 13382 13383 // If the build vector operands aren't sign extended vector extracts, 13384 // of the same input vector, then return. 13385 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13386 if (!isSExtOfVecExtract(N->getOperand(i))) { 13387 return SDValue(); 13388 } 13389 } 13390 13391 // If the vector extract indicies are not correct, add the appropriate 13392 // vector_shuffle. 13393 int TgtElemArrayIdx; 13394 int InputSize = Input.getValueType().getScalarSizeInBits(); 13395 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 13396 if (InputSize + OutputSize == 40) 13397 TgtElemArrayIdx = 0; 13398 else if (InputSize + OutputSize == 72) 13399 TgtElemArrayIdx = 1; 13400 else if (InputSize + OutputSize == 48) 13401 TgtElemArrayIdx = 2; 13402 else if (InputSize + OutputSize == 80) 13403 TgtElemArrayIdx = 3; 13404 else if (InputSize + OutputSize == 96) 13405 TgtElemArrayIdx = 4; 13406 else 13407 return SDValue(); 13408 13409 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 13410 CorrectElems = DAG.getDataLayout().isLittleEndian() 13411 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 13412 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 13413 if (Elems != CorrectElems) { 13414 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 13415 } 13416 13417 // Regular lowering will catch cases where a shuffle is not needed. 13418 return SDValue(); 13419 } 13420 13421 // Look for the pattern of a load from a narrow width to i128, feeding 13422 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node 13423 // (LXVRZX). This node represents a zero extending load that will be matched 13424 // to the Load VSX Vector Rightmost instructions. 13425 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) { 13426 SDLoc DL(N); 13427 13428 // This combine is only eligible for a BUILD_VECTOR of v1i128. 13429 if (N->getValueType(0) != MVT::v1i128) 13430 return SDValue(); 13431 13432 SDValue Operand = N->getOperand(0); 13433 // Proceed with the transformation if the operand to the BUILD_VECTOR 13434 // is a load instruction. 13435 if (Operand.getOpcode() != ISD::LOAD) 13436 return SDValue(); 13437 13438 LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand); 13439 EVT MemoryType = LD->getMemoryVT(); 13440 13441 // This transformation is only valid if the we are loading either a byte, 13442 // halfword, word, or doubleword. 13443 bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 || 13444 MemoryType == MVT::i32 || MemoryType == MVT::i64; 13445 13446 // Ensure that the load from the narrow width is being zero extended to i128. 13447 if (!ValidLDType || 13448 (LD->getExtensionType() != ISD::ZEXTLOAD && 13449 LD->getExtensionType() != ISD::EXTLOAD)) 13450 return SDValue(); 13451 13452 SDValue LoadOps[] = { 13453 LD->getChain(), LD->getBasePtr(), 13454 DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)}; 13455 13456 return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL, 13457 DAG.getVTList(MVT::v1i128, MVT::Other), 13458 LoadOps, MemoryType, LD->getMemOperand()); 13459 } 13460 13461 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 13462 DAGCombinerInfo &DCI) const { 13463 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13464 "Should be called with a BUILD_VECTOR node"); 13465 13466 SelectionDAG &DAG = DCI.DAG; 13467 SDLoc dl(N); 13468 13469 if (!Subtarget.hasVSX()) 13470 return SDValue(); 13471 13472 // The target independent DAG combiner will leave a build_vector of 13473 // float-to-int conversions intact. We can generate MUCH better code for 13474 // a float-to-int conversion of a vector of floats. 13475 SDValue FirstInput = N->getOperand(0); 13476 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 13477 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 13478 if (Reduced) 13479 return Reduced; 13480 } 13481 13482 // If we're building a vector out of consecutive loads, just load that 13483 // vector type. 13484 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 13485 if (Reduced) 13486 return Reduced; 13487 13488 // If we're building a vector out of extended elements from another vector 13489 // we have P9 vector integer extend instructions. The code assumes legal 13490 // input types (i.e. it can't handle things like v4i16) so do not run before 13491 // legalization. 13492 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) { 13493 Reduced = combineBVOfVecSExt(N, DAG); 13494 if (Reduced) 13495 return Reduced; 13496 } 13497 13498 // On Power10, the Load VSX Vector Rightmost instructions can be utilized 13499 // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR 13500 // is a load from <valid narrow width> to i128. 13501 if (Subtarget.isISA3_1()) { 13502 SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG); 13503 if (BVOfZLoad) 13504 return BVOfZLoad; 13505 } 13506 13507 if (N->getValueType(0) != MVT::v2f64) 13508 return SDValue(); 13509 13510 // Looking for: 13511 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 13512 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 13513 FirstInput.getOpcode() != ISD::UINT_TO_FP) 13514 return SDValue(); 13515 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 13516 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 13517 return SDValue(); 13518 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 13519 return SDValue(); 13520 13521 SDValue Ext1 = FirstInput.getOperand(0); 13522 SDValue Ext2 = N->getOperand(1).getOperand(0); 13523 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 13524 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13525 return SDValue(); 13526 13527 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 13528 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 13529 if (!Ext1Op || !Ext2Op) 13530 return SDValue(); 13531 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 || 13532 Ext1.getOperand(0) != Ext2.getOperand(0)) 13533 return SDValue(); 13534 13535 int FirstElem = Ext1Op->getZExtValue(); 13536 int SecondElem = Ext2Op->getZExtValue(); 13537 int SubvecIdx; 13538 if (FirstElem == 0 && SecondElem == 1) 13539 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 13540 else if (FirstElem == 2 && SecondElem == 3) 13541 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 13542 else 13543 return SDValue(); 13544 13545 SDValue SrcVec = Ext1.getOperand(0); 13546 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 13547 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 13548 return DAG.getNode(NodeType, dl, MVT::v2f64, 13549 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 13550 } 13551 13552 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 13553 DAGCombinerInfo &DCI) const { 13554 assert((N->getOpcode() == ISD::SINT_TO_FP || 13555 N->getOpcode() == ISD::UINT_TO_FP) && 13556 "Need an int -> FP conversion node here"); 13557 13558 if (useSoftFloat() || !Subtarget.has64BitSupport()) 13559 return SDValue(); 13560 13561 SelectionDAG &DAG = DCI.DAG; 13562 SDLoc dl(N); 13563 SDValue Op(N, 0); 13564 13565 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 13566 // from the hardware. 13567 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 13568 return SDValue(); 13569 if (!Op.getOperand(0).getValueType().isSimple()) 13570 return SDValue(); 13571 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 13572 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 13573 return SDValue(); 13574 13575 SDValue FirstOperand(Op.getOperand(0)); 13576 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 13577 (FirstOperand.getValueType() == MVT::i8 || 13578 FirstOperand.getValueType() == MVT::i16); 13579 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 13580 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 13581 bool DstDouble = Op.getValueType() == MVT::f64; 13582 unsigned ConvOp = Signed ? 13583 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 13584 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 13585 SDValue WidthConst = 13586 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 13587 dl, false); 13588 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 13589 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 13590 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 13591 DAG.getVTList(MVT::f64, MVT::Other), 13592 Ops, MVT::i8, LDN->getMemOperand()); 13593 13594 // For signed conversion, we need to sign-extend the value in the VSR 13595 if (Signed) { 13596 SDValue ExtOps[] = { Ld, WidthConst }; 13597 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 13598 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 13599 } else 13600 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 13601 } 13602 13603 13604 // For i32 intermediate values, unfortunately, the conversion functions 13605 // leave the upper 32 bits of the value are undefined. Within the set of 13606 // scalar instructions, we have no method for zero- or sign-extending the 13607 // value. Thus, we cannot handle i32 intermediate values here. 13608 if (Op.getOperand(0).getValueType() == MVT::i32) 13609 return SDValue(); 13610 13611 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 13612 "UINT_TO_FP is supported only with FPCVT"); 13613 13614 // If we have FCFIDS, then use it when converting to single-precision. 13615 // Otherwise, convert to double-precision and then round. 13616 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13617 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 13618 : PPCISD::FCFIDS) 13619 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 13620 : PPCISD::FCFID); 13621 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13622 ? MVT::f32 13623 : MVT::f64; 13624 13625 // If we're converting from a float, to an int, and back to a float again, 13626 // then we don't need the store/load pair at all. 13627 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 13628 Subtarget.hasFPCVT()) || 13629 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 13630 SDValue Src = Op.getOperand(0).getOperand(0); 13631 if (Src.getValueType() == MVT::f32) { 13632 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 13633 DCI.AddToWorklist(Src.getNode()); 13634 } else if (Src.getValueType() != MVT::f64) { 13635 // Make sure that we don't pick up a ppc_fp128 source value. 13636 return SDValue(); 13637 } 13638 13639 unsigned FCTOp = 13640 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 13641 PPCISD::FCTIDUZ; 13642 13643 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 13644 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 13645 13646 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 13647 FP = DAG.getNode(ISD::FP_ROUND, dl, 13648 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 13649 DCI.AddToWorklist(FP.getNode()); 13650 } 13651 13652 return FP; 13653 } 13654 13655 return SDValue(); 13656 } 13657 13658 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 13659 // builtins) into loads with swaps. 13660 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 13661 DAGCombinerInfo &DCI) const { 13662 SelectionDAG &DAG = DCI.DAG; 13663 SDLoc dl(N); 13664 SDValue Chain; 13665 SDValue Base; 13666 MachineMemOperand *MMO; 13667 13668 switch (N->getOpcode()) { 13669 default: 13670 llvm_unreachable("Unexpected opcode for little endian VSX load"); 13671 case ISD::LOAD: { 13672 LoadSDNode *LD = cast<LoadSDNode>(N); 13673 Chain = LD->getChain(); 13674 Base = LD->getBasePtr(); 13675 MMO = LD->getMemOperand(); 13676 // If the MMO suggests this isn't a load of a full vector, leave 13677 // things alone. For a built-in, we have to make the change for 13678 // correctness, so if there is a size problem that will be a bug. 13679 if (MMO->getSize() < 16) 13680 return SDValue(); 13681 break; 13682 } 13683 case ISD::INTRINSIC_W_CHAIN: { 13684 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13685 Chain = Intrin->getChain(); 13686 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 13687 // us what we want. Get operand 2 instead. 13688 Base = Intrin->getOperand(2); 13689 MMO = Intrin->getMemOperand(); 13690 break; 13691 } 13692 } 13693 13694 MVT VecTy = N->getValueType(0).getSimpleVT(); 13695 13696 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 13697 // aligned and the type is a vector with elements up to 4 bytes 13698 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) && 13699 VecTy.getScalarSizeInBits() <= 32) { 13700 return SDValue(); 13701 } 13702 13703 SDValue LoadOps[] = { Chain, Base }; 13704 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 13705 DAG.getVTList(MVT::v2f64, MVT::Other), 13706 LoadOps, MVT::v2f64, MMO); 13707 13708 DCI.AddToWorklist(Load.getNode()); 13709 Chain = Load.getValue(1); 13710 SDValue Swap = DAG.getNode( 13711 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 13712 DCI.AddToWorklist(Swap.getNode()); 13713 13714 // Add a bitcast if the resulting load type doesn't match v2f64. 13715 if (VecTy != MVT::v2f64) { 13716 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 13717 DCI.AddToWorklist(N.getNode()); 13718 // Package {bitcast value, swap's chain} to match Load's shape. 13719 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 13720 N, Swap.getValue(1)); 13721 } 13722 13723 return Swap; 13724 } 13725 13726 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 13727 // builtins) into stores with swaps. 13728 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 13729 DAGCombinerInfo &DCI) const { 13730 SelectionDAG &DAG = DCI.DAG; 13731 SDLoc dl(N); 13732 SDValue Chain; 13733 SDValue Base; 13734 unsigned SrcOpnd; 13735 MachineMemOperand *MMO; 13736 13737 switch (N->getOpcode()) { 13738 default: 13739 llvm_unreachable("Unexpected opcode for little endian VSX store"); 13740 case ISD::STORE: { 13741 StoreSDNode *ST = cast<StoreSDNode>(N); 13742 Chain = ST->getChain(); 13743 Base = ST->getBasePtr(); 13744 MMO = ST->getMemOperand(); 13745 SrcOpnd = 1; 13746 // If the MMO suggests this isn't a store of a full vector, leave 13747 // things alone. For a built-in, we have to make the change for 13748 // correctness, so if there is a size problem that will be a bug. 13749 if (MMO->getSize() < 16) 13750 return SDValue(); 13751 break; 13752 } 13753 case ISD::INTRINSIC_VOID: { 13754 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13755 Chain = Intrin->getChain(); 13756 // Intrin->getBasePtr() oddly does not get what we want. 13757 Base = Intrin->getOperand(3); 13758 MMO = Intrin->getMemOperand(); 13759 SrcOpnd = 2; 13760 break; 13761 } 13762 } 13763 13764 SDValue Src = N->getOperand(SrcOpnd); 13765 MVT VecTy = Src.getValueType().getSimpleVT(); 13766 13767 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 13768 // aligned and the type is a vector with elements up to 4 bytes 13769 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) && 13770 VecTy.getScalarSizeInBits() <= 32) { 13771 return SDValue(); 13772 } 13773 13774 // All stores are done as v2f64 and possible bit cast. 13775 if (VecTy != MVT::v2f64) { 13776 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 13777 DCI.AddToWorklist(Src.getNode()); 13778 } 13779 13780 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 13781 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 13782 DCI.AddToWorklist(Swap.getNode()); 13783 Chain = Swap.getValue(1); 13784 SDValue StoreOps[] = { Chain, Swap, Base }; 13785 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 13786 DAG.getVTList(MVT::Other), 13787 StoreOps, VecTy, MMO); 13788 DCI.AddToWorklist(Store.getNode()); 13789 return Store; 13790 } 13791 13792 // Handle DAG combine for STORE (FP_TO_INT F). 13793 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 13794 DAGCombinerInfo &DCI) const { 13795 13796 SelectionDAG &DAG = DCI.DAG; 13797 SDLoc dl(N); 13798 unsigned Opcode = N->getOperand(1).getOpcode(); 13799 13800 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 13801 && "Not a FP_TO_INT Instruction!"); 13802 13803 SDValue Val = N->getOperand(1).getOperand(0); 13804 EVT Op1VT = N->getOperand(1).getValueType(); 13805 EVT ResVT = Val.getValueType(); 13806 13807 if (!isTypeLegal(ResVT)) 13808 return SDValue(); 13809 13810 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 13811 bool ValidTypeForStoreFltAsInt = 13812 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 13813 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 13814 13815 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() || 13816 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 13817 return SDValue(); 13818 13819 // Extend f32 values to f64 13820 if (ResVT.getScalarSizeInBits() == 32) { 13821 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 13822 DCI.AddToWorklist(Val.getNode()); 13823 } 13824 13825 // Set signed or unsigned conversion opcode. 13826 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 13827 PPCISD::FP_TO_SINT_IN_VSR : 13828 PPCISD::FP_TO_UINT_IN_VSR; 13829 13830 Val = DAG.getNode(ConvOpcode, 13831 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 13832 DCI.AddToWorklist(Val.getNode()); 13833 13834 // Set number of bytes being converted. 13835 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 13836 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 13837 DAG.getIntPtrConstant(ByteSize, dl, false), 13838 DAG.getValueType(Op1VT) }; 13839 13840 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 13841 DAG.getVTList(MVT::Other), Ops, 13842 cast<StoreSDNode>(N)->getMemoryVT(), 13843 cast<StoreSDNode>(N)->getMemOperand()); 13844 13845 DCI.AddToWorklist(Val.getNode()); 13846 return Val; 13847 } 13848 13849 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) { 13850 // Check that the source of the element keeps flipping 13851 // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts). 13852 bool PrevElemFromFirstVec = Mask[0] < NumElts; 13853 for (int i = 1, e = Mask.size(); i < e; i++) { 13854 if (PrevElemFromFirstVec && Mask[i] < NumElts) 13855 return false; 13856 if (!PrevElemFromFirstVec && Mask[i] >= NumElts) 13857 return false; 13858 PrevElemFromFirstVec = !PrevElemFromFirstVec; 13859 } 13860 return true; 13861 } 13862 13863 static bool isSplatBV(SDValue Op) { 13864 if (Op.getOpcode() != ISD::BUILD_VECTOR) 13865 return false; 13866 SDValue FirstOp; 13867 13868 // Find first non-undef input. 13869 for (int i = 0, e = Op.getNumOperands(); i < e; i++) { 13870 FirstOp = Op.getOperand(i); 13871 if (!FirstOp.isUndef()) 13872 break; 13873 } 13874 13875 // All inputs are undef or the same as the first non-undef input. 13876 for (int i = 1, e = Op.getNumOperands(); i < e; i++) 13877 if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef()) 13878 return false; 13879 return true; 13880 } 13881 13882 static SDValue isScalarToVec(SDValue Op) { 13883 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR) 13884 return Op; 13885 if (Op.getOpcode() != ISD::BITCAST) 13886 return SDValue(); 13887 Op = Op.getOperand(0); 13888 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR) 13889 return Op; 13890 return SDValue(); 13891 } 13892 13893 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV, 13894 int LHSMaxIdx, int RHSMinIdx, 13895 int RHSMaxIdx, int HalfVec) { 13896 for (int i = 0, e = ShuffV.size(); i < e; i++) { 13897 int Idx = ShuffV[i]; 13898 if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx)) 13899 ShuffV[i] += HalfVec; 13900 } 13901 } 13902 13903 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if 13904 // the original is: 13905 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C)))) 13906 // In such a case, just change the shuffle mask to extract the element 13907 // from the permuted index. 13908 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) { 13909 SDLoc dl(OrigSToV); 13910 EVT VT = OrigSToV.getValueType(); 13911 assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR && 13912 "Expecting a SCALAR_TO_VECTOR here"); 13913 SDValue Input = OrigSToV.getOperand(0); 13914 13915 if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 13916 ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1)); 13917 SDValue OrigVector = Input.getOperand(0); 13918 13919 // Can't handle non-const element indices or different vector types 13920 // for the input to the extract and the output of the scalar_to_vector. 13921 if (Idx && VT == OrigVector.getValueType()) { 13922 SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1); 13923 NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue(); 13924 return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask); 13925 } 13926 } 13927 return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT, 13928 OrigSToV.getOperand(0)); 13929 } 13930 13931 // On little endian subtargets, combine shuffles such as: 13932 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b 13933 // into: 13934 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b 13935 // because the latter can be matched to a single instruction merge. 13936 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute 13937 // to put the value into element zero. Adjust the shuffle mask so that the 13938 // vector can remain in permuted form (to prevent a swap prior to a shuffle). 13939 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN, 13940 SelectionDAG &DAG) const { 13941 SDValue LHS = SVN->getOperand(0); 13942 SDValue RHS = SVN->getOperand(1); 13943 auto Mask = SVN->getMask(); 13944 int NumElts = LHS.getValueType().getVectorNumElements(); 13945 SDValue Res(SVN, 0); 13946 SDLoc dl(SVN); 13947 13948 // None of these combines are useful on big endian systems since the ISA 13949 // already has a big endian bias. 13950 if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX()) 13951 return Res; 13952 13953 // If this is not a shuffle of a shuffle and the first element comes from 13954 // the second vector, canonicalize to the commuted form. This will make it 13955 // more likely to match one of the single instruction patterns. 13956 if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 13957 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) { 13958 std::swap(LHS, RHS); 13959 Res = DAG.getCommutedVectorShuffle(*SVN); 13960 Mask = cast<ShuffleVectorSDNode>(Res)->getMask(); 13961 } 13962 13963 // Adjust the shuffle mask if either input vector comes from a 13964 // SCALAR_TO_VECTOR and keep the respective input vector in permuted 13965 // form (to prevent the need for a swap). 13966 SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end()); 13967 SDValue SToVLHS = isScalarToVec(LHS); 13968 SDValue SToVRHS = isScalarToVec(RHS); 13969 if (SToVLHS || SToVRHS) { 13970 int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements() 13971 : SToVRHS.getValueType().getVectorNumElements(); 13972 int NumEltsOut = ShuffV.size(); 13973 13974 // Initially assume that neither input is permuted. These will be adjusted 13975 // accordingly if either input is. 13976 int LHSMaxIdx = -1; 13977 int RHSMinIdx = -1; 13978 int RHSMaxIdx = -1; 13979 int HalfVec = LHS.getValueType().getVectorNumElements() / 2; 13980 13981 // Get the permuted scalar to vector nodes for the source(s) that come from 13982 // ISD::SCALAR_TO_VECTOR. 13983 if (SToVLHS) { 13984 // Set up the values for the shuffle vector fixup. 13985 LHSMaxIdx = NumEltsOut / NumEltsIn; 13986 SToVLHS = getSToVPermuted(SToVLHS, DAG); 13987 if (SToVLHS.getValueType() != LHS.getValueType()) 13988 SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS); 13989 LHS = SToVLHS; 13990 } 13991 if (SToVRHS) { 13992 RHSMinIdx = NumEltsOut; 13993 RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx; 13994 SToVRHS = getSToVPermuted(SToVRHS, DAG); 13995 if (SToVRHS.getValueType() != RHS.getValueType()) 13996 SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS); 13997 RHS = SToVRHS; 13998 } 13999 14000 // Fix up the shuffle mask to reflect where the desired element actually is. 14001 // The minimum and maximum indices that correspond to element zero for both 14002 // the LHS and RHS are computed and will control which shuffle mask entries 14003 // are to be changed. For example, if the RHS is permuted, any shuffle mask 14004 // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by 14005 // HalfVec to refer to the corresponding element in the permuted vector. 14006 fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx, 14007 HalfVec); 14008 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV); 14009 14010 // We may have simplified away the shuffle. We won't be able to do anything 14011 // further with it here. 14012 if (!isa<ShuffleVectorSDNode>(Res)) 14013 return Res; 14014 Mask = cast<ShuffleVectorSDNode>(Res)->getMask(); 14015 } 14016 14017 // The common case after we commuted the shuffle is that the RHS is a splat 14018 // and we have elements coming in from the splat at indices that are not 14019 // conducive to using a merge. 14020 // Example: 14021 // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero> 14022 if (!isSplatBV(RHS)) 14023 return Res; 14024 14025 // We are looking for a mask such that all even elements are from 14026 // one vector and all odd elements from the other. 14027 if (!isAlternatingShuffMask(Mask, NumElts)) 14028 return Res; 14029 14030 // Adjust the mask so we are pulling in the same index from the splat 14031 // as the index from the interesting vector in consecutive elements. 14032 // Example (even elements from first vector): 14033 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero> 14034 if (Mask[0] < NumElts) 14035 for (int i = 1, e = Mask.size(); i < e; i += 2) 14036 ShuffV[i] = (ShuffV[i - 1] + NumElts); 14037 // Example (odd elements from first vector): 14038 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero> 14039 else 14040 for (int i = 0, e = Mask.size(); i < e; i += 2) 14041 ShuffV[i] = (ShuffV[i + 1] + NumElts); 14042 14043 // If the RHS has undefs, we need to remove them since we may have created 14044 // a shuffle that adds those instead of the splat value. 14045 SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue(); 14046 RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal); 14047 14048 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV); 14049 return Res; 14050 } 14051 14052 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN, 14053 LSBaseSDNode *LSBase, 14054 DAGCombinerInfo &DCI) const { 14055 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) && 14056 "Not a reverse memop pattern!"); 14057 14058 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool { 14059 auto Mask = SVN->getMask(); 14060 int i = 0; 14061 auto I = Mask.rbegin(); 14062 auto E = Mask.rend(); 14063 14064 for (; I != E; ++I) { 14065 if (*I != i) 14066 return false; 14067 i++; 14068 } 14069 return true; 14070 }; 14071 14072 SelectionDAG &DAG = DCI.DAG; 14073 EVT VT = SVN->getValueType(0); 14074 14075 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX()) 14076 return SDValue(); 14077 14078 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order. 14079 // See comment in PPCVSXSwapRemoval.cpp. 14080 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it. 14081 if (!Subtarget.hasP9Vector()) 14082 return SDValue(); 14083 14084 if(!IsElementReverse(SVN)) 14085 return SDValue(); 14086 14087 if (LSBase->getOpcode() == ISD::LOAD) { 14088 SDLoc dl(SVN); 14089 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()}; 14090 return DAG.getMemIntrinsicNode( 14091 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps, 14092 LSBase->getMemoryVT(), LSBase->getMemOperand()); 14093 } 14094 14095 if (LSBase->getOpcode() == ISD::STORE) { 14096 SDLoc dl(LSBase); 14097 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0), 14098 LSBase->getBasePtr()}; 14099 return DAG.getMemIntrinsicNode( 14100 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps, 14101 LSBase->getMemoryVT(), LSBase->getMemOperand()); 14102 } 14103 14104 llvm_unreachable("Expected a load or store node here"); 14105 } 14106 14107 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 14108 DAGCombinerInfo &DCI) const { 14109 SelectionDAG &DAG = DCI.DAG; 14110 SDLoc dl(N); 14111 switch (N->getOpcode()) { 14112 default: break; 14113 case ISD::ADD: 14114 return combineADD(N, DCI); 14115 case ISD::SHL: 14116 return combineSHL(N, DCI); 14117 case ISD::SRA: 14118 return combineSRA(N, DCI); 14119 case ISD::SRL: 14120 return combineSRL(N, DCI); 14121 case ISD::MUL: 14122 return combineMUL(N, DCI); 14123 case ISD::FMA: 14124 case PPCISD::FNMSUB: 14125 return combineFMALike(N, DCI); 14126 case PPCISD::SHL: 14127 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 14128 return N->getOperand(0); 14129 break; 14130 case PPCISD::SRL: 14131 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 14132 return N->getOperand(0); 14133 break; 14134 case PPCISD::SRA: 14135 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 14136 if (C->isNullValue() || // 0 >>s V -> 0. 14137 C->isAllOnesValue()) // -1 >>s V -> -1. 14138 return N->getOperand(0); 14139 } 14140 break; 14141 case ISD::SIGN_EXTEND: 14142 case ISD::ZERO_EXTEND: 14143 case ISD::ANY_EXTEND: 14144 return DAGCombineExtBoolTrunc(N, DCI); 14145 case ISD::TRUNCATE: 14146 return combineTRUNCATE(N, DCI); 14147 case ISD::SETCC: 14148 if (SDValue CSCC = combineSetCC(N, DCI)) 14149 return CSCC; 14150 LLVM_FALLTHROUGH; 14151 case ISD::SELECT_CC: 14152 return DAGCombineTruncBoolExt(N, DCI); 14153 case ISD::SINT_TO_FP: 14154 case ISD::UINT_TO_FP: 14155 return combineFPToIntToFP(N, DCI); 14156 case ISD::VECTOR_SHUFFLE: 14157 if (ISD::isNormalLoad(N->getOperand(0).getNode())) { 14158 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0)); 14159 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI); 14160 } 14161 return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG); 14162 case ISD::STORE: { 14163 14164 EVT Op1VT = N->getOperand(1).getValueType(); 14165 unsigned Opcode = N->getOperand(1).getOpcode(); 14166 14167 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 14168 SDValue Val= combineStoreFPToInt(N, DCI); 14169 if (Val) 14170 return Val; 14171 } 14172 14173 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) { 14174 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1)); 14175 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI); 14176 if (Val) 14177 return Val; 14178 } 14179 14180 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 14181 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 14182 N->getOperand(1).getNode()->hasOneUse() && 14183 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 14184 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 14185 14186 // STBRX can only handle simple types and it makes no sense to store less 14187 // two bytes in byte-reversed order. 14188 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 14189 if (mVT.isExtended() || mVT.getSizeInBits() < 16) 14190 break; 14191 14192 SDValue BSwapOp = N->getOperand(1).getOperand(0); 14193 // Do an any-extend to 32-bits if this is a half-word input. 14194 if (BSwapOp.getValueType() == MVT::i16) 14195 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 14196 14197 // If the type of BSWAP operand is wider than stored memory width 14198 // it need to be shifted to the right side before STBRX. 14199 if (Op1VT.bitsGT(mVT)) { 14200 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 14201 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 14202 DAG.getConstant(Shift, dl, MVT::i32)); 14203 // Need to truncate if this is a bswap of i64 stored as i32/i16. 14204 if (Op1VT == MVT::i64) 14205 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 14206 } 14207 14208 SDValue Ops[] = { 14209 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 14210 }; 14211 return 14212 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 14213 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 14214 cast<StoreSDNode>(N)->getMemOperand()); 14215 } 14216 14217 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 14218 // So it can increase the chance of CSE constant construction. 14219 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 14220 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 14221 // Need to sign-extended to 64-bits to handle negative values. 14222 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 14223 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 14224 MemVT.getSizeInBits()); 14225 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 14226 14227 // DAG.getTruncStore() can't be used here because it doesn't accept 14228 // the general (base + offset) addressing mode. 14229 // So we use UpdateNodeOperands and setTruncatingStore instead. 14230 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 14231 N->getOperand(3)); 14232 cast<StoreSDNode>(N)->setTruncatingStore(true); 14233 return SDValue(N, 0); 14234 } 14235 14236 // For little endian, VSX stores require generating xxswapd/lxvd2x. 14237 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 14238 if (Op1VT.isSimple()) { 14239 MVT StoreVT = Op1VT.getSimpleVT(); 14240 if (Subtarget.needsSwapsForVSXMemOps() && 14241 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 14242 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 14243 return expandVSXStoreForLE(N, DCI); 14244 } 14245 break; 14246 } 14247 case ISD::LOAD: { 14248 LoadSDNode *LD = cast<LoadSDNode>(N); 14249 EVT VT = LD->getValueType(0); 14250 14251 // For little endian, VSX loads require generating lxvd2x/xxswapd. 14252 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 14253 if (VT.isSimple()) { 14254 MVT LoadVT = VT.getSimpleVT(); 14255 if (Subtarget.needsSwapsForVSXMemOps() && 14256 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 14257 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 14258 return expandVSXLoadForLE(N, DCI); 14259 } 14260 14261 // We sometimes end up with a 64-bit integer load, from which we extract 14262 // two single-precision floating-point numbers. This happens with 14263 // std::complex<float>, and other similar structures, because of the way we 14264 // canonicalize structure copies. However, if we lack direct moves, 14265 // then the final bitcasts from the extracted integer values to the 14266 // floating-point numbers turn into store/load pairs. Even with direct moves, 14267 // just loading the two floating-point numbers is likely better. 14268 auto ReplaceTwoFloatLoad = [&]() { 14269 if (VT != MVT::i64) 14270 return false; 14271 14272 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 14273 LD->isVolatile()) 14274 return false; 14275 14276 // We're looking for a sequence like this: 14277 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 14278 // t16: i64 = srl t13, Constant:i32<32> 14279 // t17: i32 = truncate t16 14280 // t18: f32 = bitcast t17 14281 // t19: i32 = truncate t13 14282 // t20: f32 = bitcast t19 14283 14284 if (!LD->hasNUsesOfValue(2, 0)) 14285 return false; 14286 14287 auto UI = LD->use_begin(); 14288 while (UI.getUse().getResNo() != 0) ++UI; 14289 SDNode *Trunc = *UI++; 14290 while (UI.getUse().getResNo() != 0) ++UI; 14291 SDNode *RightShift = *UI; 14292 if (Trunc->getOpcode() != ISD::TRUNCATE) 14293 std::swap(Trunc, RightShift); 14294 14295 if (Trunc->getOpcode() != ISD::TRUNCATE || 14296 Trunc->getValueType(0) != MVT::i32 || 14297 !Trunc->hasOneUse()) 14298 return false; 14299 if (RightShift->getOpcode() != ISD::SRL || 14300 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 14301 RightShift->getConstantOperandVal(1) != 32 || 14302 !RightShift->hasOneUse()) 14303 return false; 14304 14305 SDNode *Trunc2 = *RightShift->use_begin(); 14306 if (Trunc2->getOpcode() != ISD::TRUNCATE || 14307 Trunc2->getValueType(0) != MVT::i32 || 14308 !Trunc2->hasOneUse()) 14309 return false; 14310 14311 SDNode *Bitcast = *Trunc->use_begin(); 14312 SDNode *Bitcast2 = *Trunc2->use_begin(); 14313 14314 if (Bitcast->getOpcode() != ISD::BITCAST || 14315 Bitcast->getValueType(0) != MVT::f32) 14316 return false; 14317 if (Bitcast2->getOpcode() != ISD::BITCAST || 14318 Bitcast2->getValueType(0) != MVT::f32) 14319 return false; 14320 14321 if (Subtarget.isLittleEndian()) 14322 std::swap(Bitcast, Bitcast2); 14323 14324 // Bitcast has the second float (in memory-layout order) and Bitcast2 14325 // has the first one. 14326 14327 SDValue BasePtr = LD->getBasePtr(); 14328 if (LD->isIndexed()) { 14329 assert(LD->getAddressingMode() == ISD::PRE_INC && 14330 "Non-pre-inc AM on PPC?"); 14331 BasePtr = 14332 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 14333 LD->getOffset()); 14334 } 14335 14336 auto MMOFlags = 14337 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 14338 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 14339 LD->getPointerInfo(), LD->getAlignment(), 14340 MMOFlags, LD->getAAInfo()); 14341 SDValue AddPtr = 14342 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 14343 BasePtr, DAG.getIntPtrConstant(4, dl)); 14344 SDValue FloatLoad2 = DAG.getLoad( 14345 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 14346 LD->getPointerInfo().getWithOffset(4), 14347 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 14348 14349 if (LD->isIndexed()) { 14350 // Note that DAGCombine should re-form any pre-increment load(s) from 14351 // what is produced here if that makes sense. 14352 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 14353 } 14354 14355 DCI.CombineTo(Bitcast2, FloatLoad); 14356 DCI.CombineTo(Bitcast, FloatLoad2); 14357 14358 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 14359 SDValue(FloatLoad2.getNode(), 1)); 14360 return true; 14361 }; 14362 14363 if (ReplaceTwoFloatLoad()) 14364 return SDValue(N, 0); 14365 14366 EVT MemVT = LD->getMemoryVT(); 14367 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 14368 Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty); 14369 if (LD->isUnindexed() && VT.isVector() && 14370 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 14371 // P8 and later hardware should just use LOAD. 14372 !Subtarget.hasP8Vector() && 14373 (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || 14374 VT == MVT::v4f32))) && 14375 LD->getAlign() < ABIAlignment) { 14376 // This is a type-legal unaligned Altivec load. 14377 SDValue Chain = LD->getChain(); 14378 SDValue Ptr = LD->getBasePtr(); 14379 bool isLittleEndian = Subtarget.isLittleEndian(); 14380 14381 // This implements the loading of unaligned vectors as described in 14382 // the venerable Apple Velocity Engine overview. Specifically: 14383 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 14384 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 14385 // 14386 // The general idea is to expand a sequence of one or more unaligned 14387 // loads into an alignment-based permutation-control instruction (lvsl 14388 // or lvsr), a series of regular vector loads (which always truncate 14389 // their input address to an aligned address), and a series of 14390 // permutations. The results of these permutations are the requested 14391 // loaded values. The trick is that the last "extra" load is not taken 14392 // from the address you might suspect (sizeof(vector) bytes after the 14393 // last requested load), but rather sizeof(vector) - 1 bytes after the 14394 // last requested vector. The point of this is to avoid a page fault if 14395 // the base address happened to be aligned. This works because if the 14396 // base address is aligned, then adding less than a full vector length 14397 // will cause the last vector in the sequence to be (re)loaded. 14398 // Otherwise, the next vector will be fetched as you might suspect was 14399 // necessary. 14400 14401 // We might be able to reuse the permutation generation from 14402 // a different base address offset from this one by an aligned amount. 14403 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 14404 // optimization later. 14405 Intrinsic::ID Intr, IntrLD, IntrPerm; 14406 MVT PermCntlTy, PermTy, LDTy; 14407 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr 14408 : Intrinsic::ppc_altivec_lvsl; 14409 IntrLD = Intrinsic::ppc_altivec_lvx; 14410 IntrPerm = Intrinsic::ppc_altivec_vperm; 14411 PermCntlTy = MVT::v16i8; 14412 PermTy = MVT::v4i32; 14413 LDTy = MVT::v4i32; 14414 14415 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 14416 14417 // Create the new MMO for the new base load. It is like the original MMO, 14418 // but represents an area in memory almost twice the vector size centered 14419 // on the original address. If the address is unaligned, we might start 14420 // reading up to (sizeof(vector)-1) bytes below the address of the 14421 // original unaligned load. 14422 MachineFunction &MF = DAG.getMachineFunction(); 14423 MachineMemOperand *BaseMMO = 14424 MF.getMachineMemOperand(LD->getMemOperand(), 14425 -(long)MemVT.getStoreSize()+1, 14426 2*MemVT.getStoreSize()-1); 14427 14428 // Create the new base load. 14429 SDValue LDXIntID = 14430 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 14431 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 14432 SDValue BaseLoad = 14433 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 14434 DAG.getVTList(PermTy, MVT::Other), 14435 BaseLoadOps, LDTy, BaseMMO); 14436 14437 // Note that the value of IncOffset (which is provided to the next 14438 // load's pointer info offset value, and thus used to calculate the 14439 // alignment), and the value of IncValue (which is actually used to 14440 // increment the pointer value) are different! This is because we 14441 // require the next load to appear to be aligned, even though it 14442 // is actually offset from the base pointer by a lesser amount. 14443 int IncOffset = VT.getSizeInBits() / 8; 14444 int IncValue = IncOffset; 14445 14446 // Walk (both up and down) the chain looking for another load at the real 14447 // (aligned) offset (the alignment of the other load does not matter in 14448 // this case). If found, then do not use the offset reduction trick, as 14449 // that will prevent the loads from being later combined (as they would 14450 // otherwise be duplicates). 14451 if (!findConsecutiveLoad(LD, DAG)) 14452 --IncValue; 14453 14454 SDValue Increment = 14455 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 14456 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 14457 14458 MachineMemOperand *ExtraMMO = 14459 MF.getMachineMemOperand(LD->getMemOperand(), 14460 1, 2*MemVT.getStoreSize()-1); 14461 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 14462 SDValue ExtraLoad = 14463 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 14464 DAG.getVTList(PermTy, MVT::Other), 14465 ExtraLoadOps, LDTy, ExtraMMO); 14466 14467 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 14468 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 14469 14470 // Because vperm has a big-endian bias, we must reverse the order 14471 // of the input vectors and complement the permute control vector 14472 // when generating little endian code. We have already handled the 14473 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 14474 // and ExtraLoad here. 14475 SDValue Perm; 14476 if (isLittleEndian) 14477 Perm = BuildIntrinsicOp(IntrPerm, 14478 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 14479 else 14480 Perm = BuildIntrinsicOp(IntrPerm, 14481 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 14482 14483 if (VT != PermTy) 14484 Perm = Subtarget.hasAltivec() 14485 ? DAG.getNode(ISD::BITCAST, dl, VT, Perm) 14486 : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, 14487 DAG.getTargetConstant(1, dl, MVT::i64)); 14488 // second argument is 1 because this rounding 14489 // is always exact. 14490 14491 // The output of the permutation is our loaded result, the TokenFactor is 14492 // our new chain. 14493 DCI.CombineTo(N, Perm, TF); 14494 return SDValue(N, 0); 14495 } 14496 } 14497 break; 14498 case ISD::INTRINSIC_WO_CHAIN: { 14499 bool isLittleEndian = Subtarget.isLittleEndian(); 14500 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 14501 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 14502 : Intrinsic::ppc_altivec_lvsl); 14503 if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) { 14504 SDValue Add = N->getOperand(1); 14505 14506 int Bits = 4 /* 16 byte alignment */; 14507 14508 if (DAG.MaskedValueIsZero(Add->getOperand(1), 14509 APInt::getAllOnesValue(Bits /* alignment */) 14510 .zext(Add.getScalarValueSizeInBits()))) { 14511 SDNode *BasePtr = Add->getOperand(0).getNode(); 14512 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14513 UE = BasePtr->use_end(); 14514 UI != UE; ++UI) { 14515 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14516 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == 14517 IID) { 14518 // We've found another LVSL/LVSR, and this address is an aligned 14519 // multiple of that one. The results will be the same, so use the 14520 // one we've just found instead. 14521 14522 return SDValue(*UI, 0); 14523 } 14524 } 14525 } 14526 14527 if (isa<ConstantSDNode>(Add->getOperand(1))) { 14528 SDNode *BasePtr = Add->getOperand(0).getNode(); 14529 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14530 UE = BasePtr->use_end(); UI != UE; ++UI) { 14531 if (UI->getOpcode() == ISD::ADD && 14532 isa<ConstantSDNode>(UI->getOperand(1)) && 14533 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 14534 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 14535 (1ULL << Bits) == 0) { 14536 SDNode *OtherAdd = *UI; 14537 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 14538 VE = OtherAdd->use_end(); VI != VE; ++VI) { 14539 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14540 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 14541 return SDValue(*VI, 0); 14542 } 14543 } 14544 } 14545 } 14546 } 14547 } 14548 14549 // Combine vmaxsw/h/b(a, a's negation) to abs(a) 14550 // Expose the vabsduw/h/b opportunity for down stream 14551 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() && 14552 (IID == Intrinsic::ppc_altivec_vmaxsw || 14553 IID == Intrinsic::ppc_altivec_vmaxsh || 14554 IID == Intrinsic::ppc_altivec_vmaxsb)) { 14555 SDValue V1 = N->getOperand(1); 14556 SDValue V2 = N->getOperand(2); 14557 if ((V1.getSimpleValueType() == MVT::v4i32 || 14558 V1.getSimpleValueType() == MVT::v8i16 || 14559 V1.getSimpleValueType() == MVT::v16i8) && 14560 V1.getSimpleValueType() == V2.getSimpleValueType()) { 14561 // (0-a, a) 14562 if (V1.getOpcode() == ISD::SUB && 14563 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 14564 V1.getOperand(1) == V2) { 14565 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2); 14566 } 14567 // (a, 0-a) 14568 if (V2.getOpcode() == ISD::SUB && 14569 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 14570 V2.getOperand(1) == V1) { 14571 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14572 } 14573 // (x-y, y-x) 14574 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB && 14575 V1.getOperand(0) == V2.getOperand(1) && 14576 V1.getOperand(1) == V2.getOperand(0)) { 14577 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14578 } 14579 } 14580 } 14581 } 14582 14583 break; 14584 case ISD::INTRINSIC_W_CHAIN: 14585 // For little endian, VSX loads require generating lxvd2x/xxswapd. 14586 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 14587 if (Subtarget.needsSwapsForVSXMemOps()) { 14588 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14589 default: 14590 break; 14591 case Intrinsic::ppc_vsx_lxvw4x: 14592 case Intrinsic::ppc_vsx_lxvd2x: 14593 return expandVSXLoadForLE(N, DCI); 14594 } 14595 } 14596 break; 14597 case ISD::INTRINSIC_VOID: 14598 // For little endian, VSX stores require generating xxswapd/stxvd2x. 14599 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 14600 if (Subtarget.needsSwapsForVSXMemOps()) { 14601 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14602 default: 14603 break; 14604 case Intrinsic::ppc_vsx_stxvw4x: 14605 case Intrinsic::ppc_vsx_stxvd2x: 14606 return expandVSXStoreForLE(N, DCI); 14607 } 14608 } 14609 break; 14610 case ISD::BSWAP: 14611 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 14612 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 14613 N->getOperand(0).hasOneUse() && 14614 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 14615 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 14616 N->getValueType(0) == MVT::i64))) { 14617 SDValue Load = N->getOperand(0); 14618 LoadSDNode *LD = cast<LoadSDNode>(Load); 14619 // Create the byte-swapping load. 14620 SDValue Ops[] = { 14621 LD->getChain(), // Chain 14622 LD->getBasePtr(), // Ptr 14623 DAG.getValueType(N->getValueType(0)) // VT 14624 }; 14625 SDValue BSLoad = 14626 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 14627 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 14628 MVT::i64 : MVT::i32, MVT::Other), 14629 Ops, LD->getMemoryVT(), LD->getMemOperand()); 14630 14631 // If this is an i16 load, insert the truncate. 14632 SDValue ResVal = BSLoad; 14633 if (N->getValueType(0) == MVT::i16) 14634 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 14635 14636 // First, combine the bswap away. This makes the value produced by the 14637 // load dead. 14638 DCI.CombineTo(N, ResVal); 14639 14640 // Next, combine the load away, we give it a bogus result value but a real 14641 // chain result. The result value is dead because the bswap is dead. 14642 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 14643 14644 // Return N so it doesn't get rechecked! 14645 return SDValue(N, 0); 14646 } 14647 break; 14648 case PPCISD::VCMP: 14649 // If a VCMP_rec node already exists with exactly the same operands as this 14650 // node, use its result instead of this node (VCMP_rec computes both a CR6 14651 // and a normal output). 14652 // 14653 if (!N->getOperand(0).hasOneUse() && 14654 !N->getOperand(1).hasOneUse() && 14655 !N->getOperand(2).hasOneUse()) { 14656 14657 // Scan all of the users of the LHS, looking for VCMP_rec's that match. 14658 SDNode *VCMPrecNode = nullptr; 14659 14660 SDNode *LHSN = N->getOperand(0).getNode(); 14661 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 14662 UI != E; ++UI) 14663 if (UI->getOpcode() == PPCISD::VCMP_rec && 14664 UI->getOperand(1) == N->getOperand(1) && 14665 UI->getOperand(2) == N->getOperand(2) && 14666 UI->getOperand(0) == N->getOperand(0)) { 14667 VCMPrecNode = *UI; 14668 break; 14669 } 14670 14671 // If there is no VCMP_rec node, or if the flag value has a single use, 14672 // don't transform this. 14673 if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1)) 14674 break; 14675 14676 // Look at the (necessarily single) use of the flag value. If it has a 14677 // chain, this transformation is more complex. Note that multiple things 14678 // could use the value result, which we should ignore. 14679 SDNode *FlagUser = nullptr; 14680 for (SDNode::use_iterator UI = VCMPrecNode->use_begin(); 14681 FlagUser == nullptr; ++UI) { 14682 assert(UI != VCMPrecNode->use_end() && "Didn't find user!"); 14683 SDNode *User = *UI; 14684 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 14685 if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) { 14686 FlagUser = User; 14687 break; 14688 } 14689 } 14690 } 14691 14692 // If the user is a MFOCRF instruction, we know this is safe. 14693 // Otherwise we give up for right now. 14694 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 14695 return SDValue(VCMPrecNode, 0); 14696 } 14697 break; 14698 case ISD::BRCOND: { 14699 SDValue Cond = N->getOperand(1); 14700 SDValue Target = N->getOperand(2); 14701 14702 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14703 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 14704 Intrinsic::loop_decrement) { 14705 14706 // We now need to make the intrinsic dead (it cannot be instruction 14707 // selected). 14708 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 14709 assert(Cond.getNode()->hasOneUse() && 14710 "Counter decrement has more than one use"); 14711 14712 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 14713 N->getOperand(0), Target); 14714 } 14715 } 14716 break; 14717 case ISD::BR_CC: { 14718 // If this is a branch on an altivec predicate comparison, lower this so 14719 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 14720 // lowering is done pre-legalize, because the legalizer lowers the predicate 14721 // compare down to code that is difficult to reassemble. 14722 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 14723 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 14724 14725 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 14726 // value. If so, pass-through the AND to get to the intrinsic. 14727 if (LHS.getOpcode() == ISD::AND && 14728 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 14729 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 14730 Intrinsic::loop_decrement && 14731 isa<ConstantSDNode>(LHS.getOperand(1)) && 14732 !isNullConstant(LHS.getOperand(1))) 14733 LHS = LHS.getOperand(0); 14734 14735 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14736 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 14737 Intrinsic::loop_decrement && 14738 isa<ConstantSDNode>(RHS)) { 14739 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 14740 "Counter decrement comparison is not EQ or NE"); 14741 14742 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14743 bool isBDNZ = (CC == ISD::SETEQ && Val) || 14744 (CC == ISD::SETNE && !Val); 14745 14746 // We now need to make the intrinsic dead (it cannot be instruction 14747 // selected). 14748 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 14749 assert(LHS.getNode()->hasOneUse() && 14750 "Counter decrement has more than one use"); 14751 14752 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 14753 N->getOperand(0), N->getOperand(4)); 14754 } 14755 14756 int CompareOpc; 14757 bool isDot; 14758 14759 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14760 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 14761 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 14762 assert(isDot && "Can't compare against a vector result!"); 14763 14764 // If this is a comparison against something other than 0/1, then we know 14765 // that the condition is never/always true. 14766 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14767 if (Val != 0 && Val != 1) { 14768 if (CC == ISD::SETEQ) // Cond never true, remove branch. 14769 return N->getOperand(0); 14770 // Always !=, turn it into an unconditional branch. 14771 return DAG.getNode(ISD::BR, dl, MVT::Other, 14772 N->getOperand(0), N->getOperand(4)); 14773 } 14774 14775 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 14776 14777 // Create the PPCISD altivec 'dot' comparison node. 14778 SDValue Ops[] = { 14779 LHS.getOperand(2), // LHS of compare 14780 LHS.getOperand(3), // RHS of compare 14781 DAG.getConstant(CompareOpc, dl, MVT::i32) 14782 }; 14783 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 14784 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops); 14785 14786 // Unpack the result based on how the target uses it. 14787 PPC::Predicate CompOpc; 14788 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 14789 default: // Can't happen, don't crash on invalid number though. 14790 case 0: // Branch on the value of the EQ bit of CR6. 14791 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 14792 break; 14793 case 1: // Branch on the inverted value of the EQ bit of CR6. 14794 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 14795 break; 14796 case 2: // Branch on the value of the LT bit of CR6. 14797 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 14798 break; 14799 case 3: // Branch on the inverted value of the LT bit of CR6. 14800 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 14801 break; 14802 } 14803 14804 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 14805 DAG.getConstant(CompOpc, dl, MVT::i32), 14806 DAG.getRegister(PPC::CR6, MVT::i32), 14807 N->getOperand(4), CompNode.getValue(1)); 14808 } 14809 break; 14810 } 14811 case ISD::BUILD_VECTOR: 14812 return DAGCombineBuildVector(N, DCI); 14813 case ISD::ABS: 14814 return combineABS(N, DCI); 14815 case ISD::VSELECT: 14816 return combineVSelect(N, DCI); 14817 } 14818 14819 return SDValue(); 14820 } 14821 14822 SDValue 14823 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 14824 SelectionDAG &DAG, 14825 SmallVectorImpl<SDNode *> &Created) const { 14826 // fold (sdiv X, pow2) 14827 EVT VT = N->getValueType(0); 14828 if (VT == MVT::i64 && !Subtarget.isPPC64()) 14829 return SDValue(); 14830 if ((VT != MVT::i32 && VT != MVT::i64) || 14831 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 14832 return SDValue(); 14833 14834 SDLoc DL(N); 14835 SDValue N0 = N->getOperand(0); 14836 14837 bool IsNegPow2 = (-Divisor).isPowerOf2(); 14838 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 14839 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 14840 14841 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 14842 Created.push_back(Op.getNode()); 14843 14844 if (IsNegPow2) { 14845 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 14846 Created.push_back(Op.getNode()); 14847 } 14848 14849 return Op; 14850 } 14851 14852 //===----------------------------------------------------------------------===// 14853 // Inline Assembly Support 14854 //===----------------------------------------------------------------------===// 14855 14856 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 14857 KnownBits &Known, 14858 const APInt &DemandedElts, 14859 const SelectionDAG &DAG, 14860 unsigned Depth) const { 14861 Known.resetAll(); 14862 switch (Op.getOpcode()) { 14863 default: break; 14864 case PPCISD::LBRX: { 14865 // lhbrx is known to have the top bits cleared out. 14866 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 14867 Known.Zero = 0xFFFF0000; 14868 break; 14869 } 14870 case ISD::INTRINSIC_WO_CHAIN: { 14871 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 14872 default: break; 14873 case Intrinsic::ppc_altivec_vcmpbfp_p: 14874 case Intrinsic::ppc_altivec_vcmpeqfp_p: 14875 case Intrinsic::ppc_altivec_vcmpequb_p: 14876 case Intrinsic::ppc_altivec_vcmpequh_p: 14877 case Intrinsic::ppc_altivec_vcmpequw_p: 14878 case Intrinsic::ppc_altivec_vcmpequd_p: 14879 case Intrinsic::ppc_altivec_vcmpequq_p: 14880 case Intrinsic::ppc_altivec_vcmpgefp_p: 14881 case Intrinsic::ppc_altivec_vcmpgtfp_p: 14882 case Intrinsic::ppc_altivec_vcmpgtsb_p: 14883 case Intrinsic::ppc_altivec_vcmpgtsh_p: 14884 case Intrinsic::ppc_altivec_vcmpgtsw_p: 14885 case Intrinsic::ppc_altivec_vcmpgtsd_p: 14886 case Intrinsic::ppc_altivec_vcmpgtsq_p: 14887 case Intrinsic::ppc_altivec_vcmpgtub_p: 14888 case Intrinsic::ppc_altivec_vcmpgtuh_p: 14889 case Intrinsic::ppc_altivec_vcmpgtuw_p: 14890 case Intrinsic::ppc_altivec_vcmpgtud_p: 14891 case Intrinsic::ppc_altivec_vcmpgtuq_p: 14892 Known.Zero = ~1U; // All bits but the low one are known to be zero. 14893 break; 14894 } 14895 } 14896 } 14897 } 14898 14899 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 14900 switch (Subtarget.getCPUDirective()) { 14901 default: break; 14902 case PPC::DIR_970: 14903 case PPC::DIR_PWR4: 14904 case PPC::DIR_PWR5: 14905 case PPC::DIR_PWR5X: 14906 case PPC::DIR_PWR6: 14907 case PPC::DIR_PWR6X: 14908 case PPC::DIR_PWR7: 14909 case PPC::DIR_PWR8: 14910 case PPC::DIR_PWR9: 14911 case PPC::DIR_PWR10: 14912 case PPC::DIR_PWR_FUTURE: { 14913 if (!ML) 14914 break; 14915 14916 if (!DisableInnermostLoopAlign32) { 14917 // If the nested loop is an innermost loop, prefer to a 32-byte alignment, 14918 // so that we can decrease cache misses and branch-prediction misses. 14919 // Actual alignment of the loop will depend on the hotness check and other 14920 // logic in alignBlocks. 14921 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) 14922 return Align(32); 14923 } 14924 14925 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 14926 14927 // For small loops (between 5 and 8 instructions), align to a 32-byte 14928 // boundary so that the entire loop fits in one instruction-cache line. 14929 uint64_t LoopSize = 0; 14930 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 14931 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 14932 LoopSize += TII->getInstSizeInBytes(*J); 14933 if (LoopSize > 32) 14934 break; 14935 } 14936 14937 if (LoopSize > 16 && LoopSize <= 32) 14938 return Align(32); 14939 14940 break; 14941 } 14942 } 14943 14944 return TargetLowering::getPrefLoopAlignment(ML); 14945 } 14946 14947 /// getConstraintType - Given a constraint, return the type of 14948 /// constraint it is for this target. 14949 PPCTargetLowering::ConstraintType 14950 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 14951 if (Constraint.size() == 1) { 14952 switch (Constraint[0]) { 14953 default: break; 14954 case 'b': 14955 case 'r': 14956 case 'f': 14957 case 'd': 14958 case 'v': 14959 case 'y': 14960 return C_RegisterClass; 14961 case 'Z': 14962 // FIXME: While Z does indicate a memory constraint, it specifically 14963 // indicates an r+r address (used in conjunction with the 'y' modifier 14964 // in the replacement string). Currently, we're forcing the base 14965 // register to be r0 in the asm printer (which is interpreted as zero) 14966 // and forming the complete address in the second register. This is 14967 // suboptimal. 14968 return C_Memory; 14969 } 14970 } else if (Constraint == "wc") { // individual CR bits. 14971 return C_RegisterClass; 14972 } else if (Constraint == "wa" || Constraint == "wd" || 14973 Constraint == "wf" || Constraint == "ws" || 14974 Constraint == "wi" || Constraint == "ww") { 14975 return C_RegisterClass; // VSX registers. 14976 } 14977 return TargetLowering::getConstraintType(Constraint); 14978 } 14979 14980 /// Examine constraint type and operand type and determine a weight value. 14981 /// This object must already have been set up with the operand type 14982 /// and the current alternative constraint selected. 14983 TargetLowering::ConstraintWeight 14984 PPCTargetLowering::getSingleConstraintMatchWeight( 14985 AsmOperandInfo &info, const char *constraint) const { 14986 ConstraintWeight weight = CW_Invalid; 14987 Value *CallOperandVal = info.CallOperandVal; 14988 // If we don't have a value, we can't do a match, 14989 // but allow it at the lowest weight. 14990 if (!CallOperandVal) 14991 return CW_Default; 14992 Type *type = CallOperandVal->getType(); 14993 14994 // Look at the constraint type. 14995 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 14996 return CW_Register; // an individual CR bit. 14997 else if ((StringRef(constraint) == "wa" || 14998 StringRef(constraint) == "wd" || 14999 StringRef(constraint) == "wf") && 15000 type->isVectorTy()) 15001 return CW_Register; 15002 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64)) 15003 return CW_Register; // just hold 64-bit integers data. 15004 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 15005 return CW_Register; 15006 else if (StringRef(constraint) == "ww" && type->isFloatTy()) 15007 return CW_Register; 15008 15009 switch (*constraint) { 15010 default: 15011 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 15012 break; 15013 case 'b': 15014 if (type->isIntegerTy()) 15015 weight = CW_Register; 15016 break; 15017 case 'f': 15018 if (type->isFloatTy()) 15019 weight = CW_Register; 15020 break; 15021 case 'd': 15022 if (type->isDoubleTy()) 15023 weight = CW_Register; 15024 break; 15025 case 'v': 15026 if (type->isVectorTy()) 15027 weight = CW_Register; 15028 break; 15029 case 'y': 15030 weight = CW_Register; 15031 break; 15032 case 'Z': 15033 weight = CW_Memory; 15034 break; 15035 } 15036 return weight; 15037 } 15038 15039 std::pair<unsigned, const TargetRegisterClass *> 15040 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 15041 StringRef Constraint, 15042 MVT VT) const { 15043 if (Constraint.size() == 1) { 15044 // GCC RS6000 Constraint Letters 15045 switch (Constraint[0]) { 15046 case 'b': // R1-R31 15047 if (VT == MVT::i64 && Subtarget.isPPC64()) 15048 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 15049 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 15050 case 'r': // R0-R31 15051 if (VT == MVT::i64 && Subtarget.isPPC64()) 15052 return std::make_pair(0U, &PPC::G8RCRegClass); 15053 return std::make_pair(0U, &PPC::GPRCRegClass); 15054 // 'd' and 'f' constraints are both defined to be "the floating point 15055 // registers", where one is for 32-bit and the other for 64-bit. We don't 15056 // really care overly much here so just give them all the same reg classes. 15057 case 'd': 15058 case 'f': 15059 if (Subtarget.hasSPE()) { 15060 if (VT == MVT::f32 || VT == MVT::i32) 15061 return std::make_pair(0U, &PPC::GPRCRegClass); 15062 if (VT == MVT::f64 || VT == MVT::i64) 15063 return std::make_pair(0U, &PPC::SPERCRegClass); 15064 } else { 15065 if (VT == MVT::f32 || VT == MVT::i32) 15066 return std::make_pair(0U, &PPC::F4RCRegClass); 15067 if (VT == MVT::f64 || VT == MVT::i64) 15068 return std::make_pair(0U, &PPC::F8RCRegClass); 15069 } 15070 break; 15071 case 'v': 15072 if (Subtarget.hasAltivec()) 15073 return std::make_pair(0U, &PPC::VRRCRegClass); 15074 break; 15075 case 'y': // crrc 15076 return std::make_pair(0U, &PPC::CRRCRegClass); 15077 } 15078 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 15079 // An individual CR bit. 15080 return std::make_pair(0U, &PPC::CRBITRCRegClass); 15081 } else if ((Constraint == "wa" || Constraint == "wd" || 15082 Constraint == "wf" || Constraint == "wi") && 15083 Subtarget.hasVSX()) { 15084 return std::make_pair(0U, &PPC::VSRCRegClass); 15085 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) { 15086 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 15087 return std::make_pair(0U, &PPC::VSSRCRegClass); 15088 else 15089 return std::make_pair(0U, &PPC::VSFRCRegClass); 15090 } 15091 15092 // If we name a VSX register, we can't defer to the base class because it 15093 // will not recognize the correct register (their names will be VSL{0-31} 15094 // and V{0-31} so they won't match). So we match them here. 15095 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') { 15096 int VSNum = atoi(Constraint.data() + 3); 15097 assert(VSNum >= 0 && VSNum <= 63 && 15098 "Attempted to access a vsr out of range"); 15099 if (VSNum < 32) 15100 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass); 15101 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass); 15102 } 15103 std::pair<unsigned, const TargetRegisterClass *> R = 15104 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 15105 15106 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 15107 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 15108 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 15109 // register. 15110 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 15111 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 15112 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 15113 PPC::GPRCRegClass.contains(R.first)) 15114 return std::make_pair(TRI->getMatchingSuperReg(R.first, 15115 PPC::sub_32, &PPC::G8RCRegClass), 15116 &PPC::G8RCRegClass); 15117 15118 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 15119 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 15120 R.first = PPC::CR0; 15121 R.second = &PPC::CRRCRegClass; 15122 } 15123 15124 return R; 15125 } 15126 15127 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 15128 /// vector. If it is invalid, don't add anything to Ops. 15129 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 15130 std::string &Constraint, 15131 std::vector<SDValue>&Ops, 15132 SelectionDAG &DAG) const { 15133 SDValue Result; 15134 15135 // Only support length 1 constraints. 15136 if (Constraint.length() > 1) return; 15137 15138 char Letter = Constraint[0]; 15139 switch (Letter) { 15140 default: break; 15141 case 'I': 15142 case 'J': 15143 case 'K': 15144 case 'L': 15145 case 'M': 15146 case 'N': 15147 case 'O': 15148 case 'P': { 15149 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 15150 if (!CST) return; // Must be an immediate to match. 15151 SDLoc dl(Op); 15152 int64_t Value = CST->getSExtValue(); 15153 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 15154 // numbers are printed as such. 15155 switch (Letter) { 15156 default: llvm_unreachable("Unknown constraint letter!"); 15157 case 'I': // "I" is a signed 16-bit constant. 15158 if (isInt<16>(Value)) 15159 Result = DAG.getTargetConstant(Value, dl, TCVT); 15160 break; 15161 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 15162 if (isShiftedUInt<16, 16>(Value)) 15163 Result = DAG.getTargetConstant(Value, dl, TCVT); 15164 break; 15165 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 15166 if (isShiftedInt<16, 16>(Value)) 15167 Result = DAG.getTargetConstant(Value, dl, TCVT); 15168 break; 15169 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 15170 if (isUInt<16>(Value)) 15171 Result = DAG.getTargetConstant(Value, dl, TCVT); 15172 break; 15173 case 'M': // "M" is a constant that is greater than 31. 15174 if (Value > 31) 15175 Result = DAG.getTargetConstant(Value, dl, TCVT); 15176 break; 15177 case 'N': // "N" is a positive constant that is an exact power of two. 15178 if (Value > 0 && isPowerOf2_64(Value)) 15179 Result = DAG.getTargetConstant(Value, dl, TCVT); 15180 break; 15181 case 'O': // "O" is the constant zero. 15182 if (Value == 0) 15183 Result = DAG.getTargetConstant(Value, dl, TCVT); 15184 break; 15185 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 15186 if (isInt<16>(-Value)) 15187 Result = DAG.getTargetConstant(Value, dl, TCVT); 15188 break; 15189 } 15190 break; 15191 } 15192 } 15193 15194 if (Result.getNode()) { 15195 Ops.push_back(Result); 15196 return; 15197 } 15198 15199 // Handle standard constraint letters. 15200 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 15201 } 15202 15203 // isLegalAddressingMode - Return true if the addressing mode represented 15204 // by AM is legal for this target, for a load/store of the specified type. 15205 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 15206 const AddrMode &AM, Type *Ty, 15207 unsigned AS, 15208 Instruction *I) const { 15209 // Vector type r+i form is supported since power9 as DQ form. We don't check 15210 // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC, 15211 // imm form is preferred and the offset can be adjusted to use imm form later 15212 // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and 15213 // max offset to check legal addressing mode, we should be a little aggressive 15214 // to contain other offsets for that LSRUse. 15215 if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector()) 15216 return false; 15217 15218 // PPC allows a sign-extended 16-bit immediate field. 15219 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 15220 return false; 15221 15222 // No global is ever allowed as a base. 15223 if (AM.BaseGV) 15224 return false; 15225 15226 // PPC only support r+r, 15227 switch (AM.Scale) { 15228 case 0: // "r+i" or just "i", depending on HasBaseReg. 15229 break; 15230 case 1: 15231 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 15232 return false; 15233 // Otherwise we have r+r or r+i. 15234 break; 15235 case 2: 15236 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 15237 return false; 15238 // Allow 2*r as r+r. 15239 break; 15240 default: 15241 // No other scales are supported. 15242 return false; 15243 } 15244 15245 return true; 15246 } 15247 15248 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 15249 SelectionDAG &DAG) const { 15250 MachineFunction &MF = DAG.getMachineFunction(); 15251 MachineFrameInfo &MFI = MF.getFrameInfo(); 15252 MFI.setReturnAddressIsTaken(true); 15253 15254 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 15255 return SDValue(); 15256 15257 SDLoc dl(Op); 15258 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 15259 15260 // Make sure the function does not optimize away the store of the RA to 15261 // the stack. 15262 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 15263 FuncInfo->setLRStoreRequired(); 15264 bool isPPC64 = Subtarget.isPPC64(); 15265 auto PtrVT = getPointerTy(MF.getDataLayout()); 15266 15267 if (Depth > 0) { 15268 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 15269 SDValue Offset = 15270 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 15271 isPPC64 ? MVT::i64 : MVT::i32); 15272 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 15273 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 15274 MachinePointerInfo()); 15275 } 15276 15277 // Just load the return address off the stack. 15278 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 15279 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 15280 MachinePointerInfo()); 15281 } 15282 15283 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 15284 SelectionDAG &DAG) const { 15285 SDLoc dl(Op); 15286 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 15287 15288 MachineFunction &MF = DAG.getMachineFunction(); 15289 MachineFrameInfo &MFI = MF.getFrameInfo(); 15290 MFI.setFrameAddressIsTaken(true); 15291 15292 EVT PtrVT = getPointerTy(MF.getDataLayout()); 15293 bool isPPC64 = PtrVT == MVT::i64; 15294 15295 // Naked functions never have a frame pointer, and so we use r1. For all 15296 // other functions, this decision must be delayed until during PEI. 15297 unsigned FrameReg; 15298 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 15299 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 15300 else 15301 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 15302 15303 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 15304 PtrVT); 15305 while (Depth--) 15306 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 15307 FrameAddr, MachinePointerInfo()); 15308 return FrameAddr; 15309 } 15310 15311 // FIXME? Maybe this could be a TableGen attribute on some registers and 15312 // this table could be generated automatically from RegInfo. 15313 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT, 15314 const MachineFunction &MF) const { 15315 bool isPPC64 = Subtarget.isPPC64(); 15316 15317 bool is64Bit = isPPC64 && VT == LLT::scalar(64); 15318 if (!is64Bit && VT != LLT::scalar(32)) 15319 report_fatal_error("Invalid register global variable type"); 15320 15321 Register Reg = StringSwitch<Register>(RegName) 15322 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 15323 .Case("r2", isPPC64 ? Register() : PPC::R2) 15324 .Case("r13", (is64Bit ? PPC::X13 : PPC::R13)) 15325 .Default(Register()); 15326 15327 if (Reg) 15328 return Reg; 15329 report_fatal_error("Invalid register name global variable"); 15330 } 15331 15332 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const { 15333 // 32-bit SVR4 ABI access everything as got-indirect. 15334 if (Subtarget.is32BitELFABI()) 15335 return true; 15336 15337 // AIX accesses everything indirectly through the TOC, which is similar to 15338 // the GOT. 15339 if (Subtarget.isAIXABI()) 15340 return true; 15341 15342 CodeModel::Model CModel = getTargetMachine().getCodeModel(); 15343 // If it is small or large code model, module locals are accessed 15344 // indirectly by loading their address from .toc/.got. 15345 if (CModel == CodeModel::Small || CModel == CodeModel::Large) 15346 return true; 15347 15348 // JumpTable and BlockAddress are accessed as got-indirect. 15349 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA)) 15350 return true; 15351 15352 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) 15353 return Subtarget.isGVIndirectSymbol(G->getGlobal()); 15354 15355 return false; 15356 } 15357 15358 bool 15359 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 15360 // The PowerPC target isn't yet aware of offsets. 15361 return false; 15362 } 15363 15364 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 15365 const CallInst &I, 15366 MachineFunction &MF, 15367 unsigned Intrinsic) const { 15368 switch (Intrinsic) { 15369 case Intrinsic::ppc_altivec_lvx: 15370 case Intrinsic::ppc_altivec_lvxl: 15371 case Intrinsic::ppc_altivec_lvebx: 15372 case Intrinsic::ppc_altivec_lvehx: 15373 case Intrinsic::ppc_altivec_lvewx: 15374 case Intrinsic::ppc_vsx_lxvd2x: 15375 case Intrinsic::ppc_vsx_lxvw4x: 15376 case Intrinsic::ppc_vsx_lxvd2x_be: 15377 case Intrinsic::ppc_vsx_lxvw4x_be: 15378 case Intrinsic::ppc_vsx_lxvl: 15379 case Intrinsic::ppc_vsx_lxvll: { 15380 EVT VT; 15381 switch (Intrinsic) { 15382 case Intrinsic::ppc_altivec_lvebx: 15383 VT = MVT::i8; 15384 break; 15385 case Intrinsic::ppc_altivec_lvehx: 15386 VT = MVT::i16; 15387 break; 15388 case Intrinsic::ppc_altivec_lvewx: 15389 VT = MVT::i32; 15390 break; 15391 case Intrinsic::ppc_vsx_lxvd2x: 15392 case Intrinsic::ppc_vsx_lxvd2x_be: 15393 VT = MVT::v2f64; 15394 break; 15395 default: 15396 VT = MVT::v4i32; 15397 break; 15398 } 15399 15400 Info.opc = ISD::INTRINSIC_W_CHAIN; 15401 Info.memVT = VT; 15402 Info.ptrVal = I.getArgOperand(0); 15403 Info.offset = -VT.getStoreSize()+1; 15404 Info.size = 2*VT.getStoreSize()-1; 15405 Info.align = Align(1); 15406 Info.flags = MachineMemOperand::MOLoad; 15407 return true; 15408 } 15409 case Intrinsic::ppc_altivec_stvx: 15410 case Intrinsic::ppc_altivec_stvxl: 15411 case Intrinsic::ppc_altivec_stvebx: 15412 case Intrinsic::ppc_altivec_stvehx: 15413 case Intrinsic::ppc_altivec_stvewx: 15414 case Intrinsic::ppc_vsx_stxvd2x: 15415 case Intrinsic::ppc_vsx_stxvw4x: 15416 case Intrinsic::ppc_vsx_stxvd2x_be: 15417 case Intrinsic::ppc_vsx_stxvw4x_be: 15418 case Intrinsic::ppc_vsx_stxvl: 15419 case Intrinsic::ppc_vsx_stxvll: { 15420 EVT VT; 15421 switch (Intrinsic) { 15422 case Intrinsic::ppc_altivec_stvebx: 15423 VT = MVT::i8; 15424 break; 15425 case Intrinsic::ppc_altivec_stvehx: 15426 VT = MVT::i16; 15427 break; 15428 case Intrinsic::ppc_altivec_stvewx: 15429 VT = MVT::i32; 15430 break; 15431 case Intrinsic::ppc_vsx_stxvd2x: 15432 case Intrinsic::ppc_vsx_stxvd2x_be: 15433 VT = MVT::v2f64; 15434 break; 15435 default: 15436 VT = MVT::v4i32; 15437 break; 15438 } 15439 15440 Info.opc = ISD::INTRINSIC_VOID; 15441 Info.memVT = VT; 15442 Info.ptrVal = I.getArgOperand(1); 15443 Info.offset = -VT.getStoreSize()+1; 15444 Info.size = 2*VT.getStoreSize()-1; 15445 Info.align = Align(1); 15446 Info.flags = MachineMemOperand::MOStore; 15447 return true; 15448 } 15449 default: 15450 break; 15451 } 15452 15453 return false; 15454 } 15455 15456 /// It returns EVT::Other if the type should be determined using generic 15457 /// target-independent logic. 15458 EVT PPCTargetLowering::getOptimalMemOpType( 15459 const MemOp &Op, const AttributeList &FuncAttributes) const { 15460 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 15461 // We should use Altivec/VSX loads and stores when available. For unaligned 15462 // addresses, unaligned VSX loads are only fast starting with the P8. 15463 if (Subtarget.hasAltivec() && Op.size() >= 16 && 15464 (Op.isAligned(Align(16)) || 15465 ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 15466 return MVT::v4i32; 15467 } 15468 15469 if (Subtarget.isPPC64()) { 15470 return MVT::i64; 15471 } 15472 15473 return MVT::i32; 15474 } 15475 15476 /// Returns true if it is beneficial to convert a load of a constant 15477 /// to just the constant itself. 15478 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 15479 Type *Ty) const { 15480 assert(Ty->isIntegerTy()); 15481 15482 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 15483 return !(BitSize == 0 || BitSize > 64); 15484 } 15485 15486 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 15487 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 15488 return false; 15489 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 15490 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 15491 return NumBits1 == 64 && NumBits2 == 32; 15492 } 15493 15494 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 15495 if (!VT1.isInteger() || !VT2.isInteger()) 15496 return false; 15497 unsigned NumBits1 = VT1.getSizeInBits(); 15498 unsigned NumBits2 = VT2.getSizeInBits(); 15499 return NumBits1 == 64 && NumBits2 == 32; 15500 } 15501 15502 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 15503 // Generally speaking, zexts are not free, but they are free when they can be 15504 // folded with other operations. 15505 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 15506 EVT MemVT = LD->getMemoryVT(); 15507 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 15508 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 15509 (LD->getExtensionType() == ISD::NON_EXTLOAD || 15510 LD->getExtensionType() == ISD::ZEXTLOAD)) 15511 return true; 15512 } 15513 15514 // FIXME: Add other cases... 15515 // - 32-bit shifts with a zext to i64 15516 // - zext after ctlz, bswap, etc. 15517 // - zext after and by a constant mask 15518 15519 return TargetLowering::isZExtFree(Val, VT2); 15520 } 15521 15522 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 15523 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 15524 "invalid fpext types"); 15525 // Extending to float128 is not free. 15526 if (DestVT == MVT::f128) 15527 return false; 15528 return true; 15529 } 15530 15531 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 15532 return isInt<16>(Imm) || isUInt<16>(Imm); 15533 } 15534 15535 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 15536 return isInt<16>(Imm) || isUInt<16>(Imm); 15537 } 15538 15539 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 15540 unsigned, 15541 unsigned, 15542 MachineMemOperand::Flags, 15543 bool *Fast) const { 15544 if (DisablePPCUnaligned) 15545 return false; 15546 15547 // PowerPC supports unaligned memory access for simple non-vector types. 15548 // Although accessing unaligned addresses is not as efficient as accessing 15549 // aligned addresses, it is generally more efficient than manual expansion, 15550 // and generally only traps for software emulation when crossing page 15551 // boundaries. 15552 15553 if (!VT.isSimple()) 15554 return false; 15555 15556 if (VT.isFloatingPoint() && !VT.isVector() && 15557 !Subtarget.allowsUnalignedFPAccess()) 15558 return false; 15559 15560 if (VT.getSimpleVT().isVector()) { 15561 if (Subtarget.hasVSX()) { 15562 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 15563 VT != MVT::v4f32 && VT != MVT::v4i32) 15564 return false; 15565 } else { 15566 return false; 15567 } 15568 } 15569 15570 if (VT == MVT::ppcf128) 15571 return false; 15572 15573 if (Fast) 15574 *Fast = true; 15575 15576 return true; 15577 } 15578 15579 bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 15580 SDValue C) const { 15581 // Check integral scalar types. 15582 if (!VT.isScalarInteger()) 15583 return false; 15584 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 15585 if (!ConstNode->getAPIntValue().isSignedIntN(64)) 15586 return false; 15587 // This transformation will generate >= 2 operations. But the following 15588 // cases will generate <= 2 instructions during ISEL. So exclude them. 15589 // 1. If the constant multiplier fits 16 bits, it can be handled by one 15590 // HW instruction, ie. MULLI 15591 // 2. If the multiplier after shifted fits 16 bits, an extra shift 15592 // instruction is needed than case 1, ie. MULLI and RLDICR 15593 int64_t Imm = ConstNode->getSExtValue(); 15594 unsigned Shift = countTrailingZeros<uint64_t>(Imm); 15595 Imm >>= Shift; 15596 if (isInt<16>(Imm)) 15597 return false; 15598 uint64_t UImm = static_cast<uint64_t>(Imm); 15599 if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) || 15600 isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm)) 15601 return true; 15602 } 15603 return false; 15604 } 15605 15606 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 15607 EVT VT) const { 15608 return isFMAFasterThanFMulAndFAdd( 15609 MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext())); 15610 } 15611 15612 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F, 15613 Type *Ty) const { 15614 switch (Ty->getScalarType()->getTypeID()) { 15615 case Type::FloatTyID: 15616 case Type::DoubleTyID: 15617 return true; 15618 case Type::FP128TyID: 15619 return Subtarget.hasP9Vector(); 15620 default: 15621 return false; 15622 } 15623 } 15624 15625 // FIXME: add more patterns which are not profitable to hoist. 15626 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const { 15627 if (!I->hasOneUse()) 15628 return true; 15629 15630 Instruction *User = I->user_back(); 15631 assert(User && "A single use instruction with no uses."); 15632 15633 switch (I->getOpcode()) { 15634 case Instruction::FMul: { 15635 // Don't break FMA, PowerPC prefers FMA. 15636 if (User->getOpcode() != Instruction::FSub && 15637 User->getOpcode() != Instruction::FAdd) 15638 return true; 15639 15640 const TargetOptions &Options = getTargetMachine().Options; 15641 const Function *F = I->getFunction(); 15642 const DataLayout &DL = F->getParent()->getDataLayout(); 15643 Type *Ty = User->getOperand(0)->getType(); 15644 15645 return !( 15646 isFMAFasterThanFMulAndFAdd(*F, Ty) && 15647 isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) && 15648 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath)); 15649 } 15650 case Instruction::Load: { 15651 // Don't break "store (load float*)" pattern, this pattern will be combined 15652 // to "store (load int32)" in later InstCombine pass. See function 15653 // combineLoadToOperationType. On PowerPC, loading a float point takes more 15654 // cycles than loading a 32 bit integer. 15655 LoadInst *LI = cast<LoadInst>(I); 15656 // For the loads that combineLoadToOperationType does nothing, like 15657 // ordered load, it should be profitable to hoist them. 15658 // For swifterror load, it can only be used for pointer to pointer type, so 15659 // later type check should get rid of this case. 15660 if (!LI->isUnordered()) 15661 return true; 15662 15663 if (User->getOpcode() != Instruction::Store) 15664 return true; 15665 15666 if (I->getType()->getTypeID() != Type::FloatTyID) 15667 return true; 15668 15669 return false; 15670 } 15671 default: 15672 return true; 15673 } 15674 return true; 15675 } 15676 15677 const MCPhysReg * 15678 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 15679 // LR is a callee-save register, but we must treat it as clobbered by any call 15680 // site. Hence we include LR in the scratch registers, which are in turn added 15681 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 15682 // to CTR, which is used by any indirect call. 15683 static const MCPhysReg ScratchRegs[] = { 15684 PPC::X12, PPC::LR8, PPC::CTR8, 0 15685 }; 15686 15687 return ScratchRegs; 15688 } 15689 15690 Register PPCTargetLowering::getExceptionPointerRegister( 15691 const Constant *PersonalityFn) const { 15692 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 15693 } 15694 15695 Register PPCTargetLowering::getExceptionSelectorRegister( 15696 const Constant *PersonalityFn) const { 15697 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 15698 } 15699 15700 bool 15701 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 15702 EVT VT , unsigned DefinedValues) const { 15703 if (VT == MVT::v2i64) 15704 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 15705 15706 if (Subtarget.hasVSX()) 15707 return true; 15708 15709 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 15710 } 15711 15712 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 15713 if (DisableILPPref || Subtarget.enableMachineScheduler()) 15714 return TargetLowering::getSchedulingPreference(N); 15715 15716 return Sched::ILP; 15717 } 15718 15719 // Create a fast isel object. 15720 FastISel * 15721 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 15722 const TargetLibraryInfo *LibInfo) const { 15723 return PPC::createFastISel(FuncInfo, LibInfo); 15724 } 15725 15726 // 'Inverted' means the FMA opcode after negating one multiplicand. 15727 // For example, (fma -a b c) = (fnmsub a b c) 15728 static unsigned invertFMAOpcode(unsigned Opc) { 15729 switch (Opc) { 15730 default: 15731 llvm_unreachable("Invalid FMA opcode for PowerPC!"); 15732 case ISD::FMA: 15733 return PPCISD::FNMSUB; 15734 case PPCISD::FNMSUB: 15735 return ISD::FMA; 15736 } 15737 } 15738 15739 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 15740 bool LegalOps, bool OptForSize, 15741 NegatibleCost &Cost, 15742 unsigned Depth) const { 15743 if (Depth > SelectionDAG::MaxRecursionDepth) 15744 return SDValue(); 15745 15746 unsigned Opc = Op.getOpcode(); 15747 EVT VT = Op.getValueType(); 15748 SDNodeFlags Flags = Op.getNode()->getFlags(); 15749 15750 switch (Opc) { 15751 case PPCISD::FNMSUB: 15752 if (!Op.hasOneUse() || !isTypeLegal(VT)) 15753 break; 15754 15755 const TargetOptions &Options = getTargetMachine().Options; 15756 SDValue N0 = Op.getOperand(0); 15757 SDValue N1 = Op.getOperand(1); 15758 SDValue N2 = Op.getOperand(2); 15759 SDLoc Loc(Op); 15760 15761 NegatibleCost N2Cost = NegatibleCost::Expensive; 15762 SDValue NegN2 = 15763 getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1); 15764 15765 if (!NegN2) 15766 return SDValue(); 15767 15768 // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c)) 15769 // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c)) 15770 // These transformations may change sign of zeroes. For example, 15771 // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1. 15772 if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) { 15773 // Try and choose the cheaper one to negate. 15774 NegatibleCost N0Cost = NegatibleCost::Expensive; 15775 SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize, 15776 N0Cost, Depth + 1); 15777 15778 NegatibleCost N1Cost = NegatibleCost::Expensive; 15779 SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize, 15780 N1Cost, Depth + 1); 15781 15782 if (NegN0 && N0Cost <= N1Cost) { 15783 Cost = std::min(N0Cost, N2Cost); 15784 return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags); 15785 } else if (NegN1) { 15786 Cost = std::min(N1Cost, N2Cost); 15787 return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags); 15788 } 15789 } 15790 15791 // (fneg (fnmsub a b c)) => (fma a b (fneg c)) 15792 if (isOperationLegal(ISD::FMA, VT)) { 15793 Cost = N2Cost; 15794 return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags); 15795 } 15796 15797 break; 15798 } 15799 15800 return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize, 15801 Cost, Depth); 15802 } 15803 15804 // Override to enable LOAD_STACK_GUARD lowering on Linux. 15805 bool PPCTargetLowering::useLoadStackGuardNode() const { 15806 if (!Subtarget.isTargetLinux()) 15807 return TargetLowering::useLoadStackGuardNode(); 15808 return true; 15809 } 15810 15811 // Override to disable global variable loading on Linux. 15812 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 15813 if (!Subtarget.isTargetLinux()) 15814 return TargetLowering::insertSSPDeclarations(M); 15815 } 15816 15817 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 15818 bool ForCodeSize) const { 15819 if (!VT.isSimple() || !Subtarget.hasVSX()) 15820 return false; 15821 15822 switch(VT.getSimpleVT().SimpleTy) { 15823 default: 15824 // For FP types that are currently not supported by PPC backend, return 15825 // false. Examples: f16, f80. 15826 return false; 15827 case MVT::f32: 15828 case MVT::f64: 15829 if (Subtarget.hasPrefixInstrs()) { 15830 // With prefixed instructions, we can materialize anything that can be 15831 // represented with a 32-bit immediate, not just positive zero. 15832 APFloat APFloatOfImm = Imm; 15833 return convertToNonDenormSingle(APFloatOfImm); 15834 } 15835 LLVM_FALLTHROUGH; 15836 case MVT::ppcf128: 15837 return Imm.isPosZero(); 15838 } 15839 } 15840 15841 // For vector shift operation op, fold 15842 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 15843 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 15844 SelectionDAG &DAG) { 15845 SDValue N0 = N->getOperand(0); 15846 SDValue N1 = N->getOperand(1); 15847 EVT VT = N0.getValueType(); 15848 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 15849 unsigned Opcode = N->getOpcode(); 15850 unsigned TargetOpcode; 15851 15852 switch (Opcode) { 15853 default: 15854 llvm_unreachable("Unexpected shift operation"); 15855 case ISD::SHL: 15856 TargetOpcode = PPCISD::SHL; 15857 break; 15858 case ISD::SRL: 15859 TargetOpcode = PPCISD::SRL; 15860 break; 15861 case ISD::SRA: 15862 TargetOpcode = PPCISD::SRA; 15863 break; 15864 } 15865 15866 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 15867 N1->getOpcode() == ISD::AND) 15868 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 15869 if (Mask->getZExtValue() == OpSizeInBits - 1) 15870 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 15871 15872 return SDValue(); 15873 } 15874 15875 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 15876 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15877 return Value; 15878 15879 SDValue N0 = N->getOperand(0); 15880 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 15881 if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() || 15882 N0.getOpcode() != ISD::SIGN_EXTEND || 15883 N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr || 15884 N->getValueType(0) != MVT::i64) 15885 return SDValue(); 15886 15887 // We can't save an operation here if the value is already extended, and 15888 // the existing shift is easier to combine. 15889 SDValue ExtsSrc = N0.getOperand(0); 15890 if (ExtsSrc.getOpcode() == ISD::TRUNCATE && 15891 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext) 15892 return SDValue(); 15893 15894 SDLoc DL(N0); 15895 SDValue ShiftBy = SDValue(CN1, 0); 15896 // We want the shift amount to be i32 on the extswli, but the shift could 15897 // have an i64. 15898 if (ShiftBy.getValueType() == MVT::i64) 15899 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32); 15900 15901 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0), 15902 ShiftBy); 15903 } 15904 15905 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 15906 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15907 return Value; 15908 15909 return SDValue(); 15910 } 15911 15912 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 15913 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15914 return Value; 15915 15916 return SDValue(); 15917 } 15918 15919 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1)) 15920 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0)) 15921 // When C is zero, the equation (addi Z, -C) can be simplified to Z 15922 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types 15923 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, 15924 const PPCSubtarget &Subtarget) { 15925 if (!Subtarget.isPPC64()) 15926 return SDValue(); 15927 15928 SDValue LHS = N->getOperand(0); 15929 SDValue RHS = N->getOperand(1); 15930 15931 auto isZextOfCompareWithConstant = [](SDValue Op) { 15932 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() || 15933 Op.getValueType() != MVT::i64) 15934 return false; 15935 15936 SDValue Cmp = Op.getOperand(0); 15937 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() || 15938 Cmp.getOperand(0).getValueType() != MVT::i64) 15939 return false; 15940 15941 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) { 15942 int64_t NegConstant = 0 - Constant->getSExtValue(); 15943 // Due to the limitations of the addi instruction, 15944 // -C is required to be [-32768, 32767]. 15945 return isInt<16>(NegConstant); 15946 } 15947 15948 return false; 15949 }; 15950 15951 bool LHSHasPattern = isZextOfCompareWithConstant(LHS); 15952 bool RHSHasPattern = isZextOfCompareWithConstant(RHS); 15953 15954 // If there is a pattern, canonicalize a zext operand to the RHS. 15955 if (LHSHasPattern && !RHSHasPattern) 15956 std::swap(LHS, RHS); 15957 else if (!LHSHasPattern && !RHSHasPattern) 15958 return SDValue(); 15959 15960 SDLoc DL(N); 15961 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue); 15962 SDValue Cmp = RHS.getOperand(0); 15963 SDValue Z = Cmp.getOperand(0); 15964 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1)); 15965 15966 assert(Constant && "Constant Should not be a null pointer."); 15967 int64_t NegConstant = 0 - Constant->getSExtValue(); 15968 15969 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) { 15970 default: break; 15971 case ISD::SETNE: { 15972 // when C == 0 15973 // --> addze X, (addic Z, -1).carry 15974 // / 15975 // add X, (zext(setne Z, C))-- 15976 // \ when -32768 <= -C <= 32767 && C != 0 15977 // --> addze X, (addic (addi Z, -C), -1).carry 15978 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15979 DAG.getConstant(NegConstant, DL, MVT::i64)); 15980 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15981 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15982 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64)); 15983 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15984 SDValue(Addc.getNode(), 1)); 15985 } 15986 case ISD::SETEQ: { 15987 // when C == 0 15988 // --> addze X, (subfic Z, 0).carry 15989 // / 15990 // add X, (zext(sete Z, C))-- 15991 // \ when -32768 <= -C <= 32767 && C != 0 15992 // --> addze X, (subfic (addi Z, -C), 0).carry 15993 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15994 DAG.getConstant(NegConstant, DL, MVT::i64)); 15995 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15996 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15997 DAG.getConstant(0, DL, MVT::i64), AddOrZ); 15998 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15999 SDValue(Subc.getNode(), 1)); 16000 } 16001 } 16002 16003 return SDValue(); 16004 } 16005 16006 // Transform 16007 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to 16008 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2)) 16009 // In this case both C1 and C2 must be known constants. 16010 // C1+C2 must fit into a 34 bit signed integer. 16011 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG, 16012 const PPCSubtarget &Subtarget) { 16013 if (!Subtarget.isUsingPCRelativeCalls()) 16014 return SDValue(); 16015 16016 // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node. 16017 // If we find that node try to cast the Global Address and the Constant. 16018 SDValue LHS = N->getOperand(0); 16019 SDValue RHS = N->getOperand(1); 16020 16021 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR) 16022 std::swap(LHS, RHS); 16023 16024 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR) 16025 return SDValue(); 16026 16027 // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node. 16028 GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0)); 16029 ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS); 16030 16031 // Check that both casts succeeded. 16032 if (!GSDN || !ConstNode) 16033 return SDValue(); 16034 16035 int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue(); 16036 SDLoc DL(GSDN); 16037 16038 // The signed int offset needs to fit in 34 bits. 16039 if (!isInt<34>(NewOffset)) 16040 return SDValue(); 16041 16042 // The new global address is a copy of the old global address except 16043 // that it has the updated Offset. 16044 SDValue GA = 16045 DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0), 16046 NewOffset, GSDN->getTargetFlags()); 16047 SDValue MatPCRel = 16048 DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA); 16049 return MatPCRel; 16050 } 16051 16052 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const { 16053 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget)) 16054 return Value; 16055 16056 if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget)) 16057 return Value; 16058 16059 return SDValue(); 16060 } 16061 16062 // Detect TRUNCATE operations on bitcasts of float128 values. 16063 // What we are looking for here is the situtation where we extract a subset 16064 // of bits from a 128 bit float. 16065 // This can be of two forms: 16066 // 1) BITCAST of f128 feeding TRUNCATE 16067 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE 16068 // The reason this is required is because we do not have a legal i128 type 16069 // and so we want to prevent having to store the f128 and then reload part 16070 // of it. 16071 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N, 16072 DAGCombinerInfo &DCI) const { 16073 // If we are using CRBits then try that first. 16074 if (Subtarget.useCRBits()) { 16075 // Check if CRBits did anything and return that if it did. 16076 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI)) 16077 return CRTruncValue; 16078 } 16079 16080 SDLoc dl(N); 16081 SDValue Op0 = N->getOperand(0); 16082 16083 // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b) 16084 if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) { 16085 EVT VT = N->getValueType(0); 16086 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 16087 return SDValue(); 16088 SDValue Sub = Op0.getOperand(0); 16089 if (Sub.getOpcode() == ISD::SUB) { 16090 SDValue SubOp0 = Sub.getOperand(0); 16091 SDValue SubOp1 = Sub.getOperand(1); 16092 if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) && 16093 (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) { 16094 return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0), 16095 SubOp1.getOperand(0), 16096 DCI.DAG.getTargetConstant(0, dl, MVT::i32)); 16097 } 16098 } 16099 } 16100 16101 // Looking for a truncate of i128 to i64. 16102 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64) 16103 return SDValue(); 16104 16105 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0; 16106 16107 // SRL feeding TRUNCATE. 16108 if (Op0.getOpcode() == ISD::SRL) { 16109 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 16110 // The right shift has to be by 64 bits. 16111 if (!ConstNode || ConstNode->getZExtValue() != 64) 16112 return SDValue(); 16113 16114 // Switch the element number to extract. 16115 EltToExtract = EltToExtract ? 0 : 1; 16116 // Update Op0 past the SRL. 16117 Op0 = Op0.getOperand(0); 16118 } 16119 16120 // BITCAST feeding a TRUNCATE possibly via SRL. 16121 if (Op0.getOpcode() == ISD::BITCAST && 16122 Op0.getValueType() == MVT::i128 && 16123 Op0.getOperand(0).getValueType() == MVT::f128) { 16124 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0)); 16125 return DCI.DAG.getNode( 16126 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast, 16127 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32)); 16128 } 16129 return SDValue(); 16130 } 16131 16132 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const { 16133 SelectionDAG &DAG = DCI.DAG; 16134 16135 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1)); 16136 if (!ConstOpOrElement) 16137 return SDValue(); 16138 16139 // An imul is usually smaller than the alternative sequence for legal type. 16140 if (DAG.getMachineFunction().getFunction().hasMinSize() && 16141 isOperationLegal(ISD::MUL, N->getValueType(0))) 16142 return SDValue(); 16143 16144 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool { 16145 switch (this->Subtarget.getCPUDirective()) { 16146 default: 16147 // TODO: enhance the condition for subtarget before pwr8 16148 return false; 16149 case PPC::DIR_PWR8: 16150 // type mul add shl 16151 // scalar 4 1 1 16152 // vector 7 2 2 16153 return true; 16154 case PPC::DIR_PWR9: 16155 case PPC::DIR_PWR10: 16156 case PPC::DIR_PWR_FUTURE: 16157 // type mul add shl 16158 // scalar 5 2 2 16159 // vector 7 2 2 16160 16161 // The cycle RATIO of related operations are showed as a table above. 16162 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both 16163 // scalar and vector type. For 2 instrs patterns, add/sub + shl 16164 // are 4, it is always profitable; but for 3 instrs patterns 16165 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6. 16166 // So we should only do it for vector type. 16167 return IsAddOne && IsNeg ? VT.isVector() : true; 16168 } 16169 }; 16170 16171 EVT VT = N->getValueType(0); 16172 SDLoc DL(N); 16173 16174 const APInt &MulAmt = ConstOpOrElement->getAPIntValue(); 16175 bool IsNeg = MulAmt.isNegative(); 16176 APInt MulAmtAbs = MulAmt.abs(); 16177 16178 if ((MulAmtAbs - 1).isPowerOf2()) { 16179 // (mul x, 2^N + 1) => (add (shl x, N), x) 16180 // (mul x, -(2^N + 1)) => -(add (shl x, N), x) 16181 16182 if (!IsProfitable(IsNeg, true, VT)) 16183 return SDValue(); 16184 16185 SDValue Op0 = N->getOperand(0); 16186 SDValue Op1 = 16187 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 16188 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT)); 16189 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); 16190 16191 if (!IsNeg) 16192 return Res; 16193 16194 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); 16195 } else if ((MulAmtAbs + 1).isPowerOf2()) { 16196 // (mul x, 2^N - 1) => (sub (shl x, N), x) 16197 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 16198 16199 if (!IsProfitable(IsNeg, false, VT)) 16200 return SDValue(); 16201 16202 SDValue Op0 = N->getOperand(0); 16203 SDValue Op1 = 16204 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 16205 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT)); 16206 16207 if (!IsNeg) 16208 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0); 16209 else 16210 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); 16211 16212 } else { 16213 return SDValue(); 16214 } 16215 } 16216 16217 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this 16218 // in combiner since we need to check SD flags and other subtarget features. 16219 SDValue PPCTargetLowering::combineFMALike(SDNode *N, 16220 DAGCombinerInfo &DCI) const { 16221 SDValue N0 = N->getOperand(0); 16222 SDValue N1 = N->getOperand(1); 16223 SDValue N2 = N->getOperand(2); 16224 SDNodeFlags Flags = N->getFlags(); 16225 EVT VT = N->getValueType(0); 16226 SelectionDAG &DAG = DCI.DAG; 16227 const TargetOptions &Options = getTargetMachine().Options; 16228 unsigned Opc = N->getOpcode(); 16229 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize(); 16230 bool LegalOps = !DCI.isBeforeLegalizeOps(); 16231 SDLoc Loc(N); 16232 16233 if (!isOperationLegal(ISD::FMA, VT)) 16234 return SDValue(); 16235 16236 // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0 16237 // since (fnmsub a b c)=-0 while c-ab=+0. 16238 if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath) 16239 return SDValue(); 16240 16241 // (fma (fneg a) b c) => (fnmsub a b c) 16242 // (fnmsub (fneg a) b c) => (fma a b c) 16243 if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize)) 16244 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags); 16245 16246 // (fma a (fneg b) c) => (fnmsub a b c) 16247 // (fnmsub a (fneg b) c) => (fma a b c) 16248 if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize)) 16249 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags); 16250 16251 return SDValue(); 16252 } 16253 16254 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 16255 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 16256 if (!Subtarget.is64BitELFABI()) 16257 return false; 16258 16259 // If not a tail call then no need to proceed. 16260 if (!CI->isTailCall()) 16261 return false; 16262 16263 // If sibling calls have been disabled and tail-calls aren't guaranteed 16264 // there is no reason to duplicate. 16265 auto &TM = getTargetMachine(); 16266 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 16267 return false; 16268 16269 // Can't tail call a function called indirectly, or if it has variadic args. 16270 const Function *Callee = CI->getCalledFunction(); 16271 if (!Callee || Callee->isVarArg()) 16272 return false; 16273 16274 // Make sure the callee and caller calling conventions are eligible for tco. 16275 const Function *Caller = CI->getParent()->getParent(); 16276 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 16277 CI->getCallingConv())) 16278 return false; 16279 16280 // If the function is local then we have a good chance at tail-calling it 16281 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 16282 } 16283 16284 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 16285 if (!Subtarget.hasVSX()) 16286 return false; 16287 if (Subtarget.hasP9Vector() && VT == MVT::f128) 16288 return true; 16289 return VT == MVT::f32 || VT == MVT::f64 || 16290 VT == MVT::v4f32 || VT == MVT::v2f64; 16291 } 16292 16293 bool PPCTargetLowering:: 16294 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 16295 const Value *Mask = AndI.getOperand(1); 16296 // If the mask is suitable for andi. or andis. we should sink the and. 16297 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 16298 // Can't handle constants wider than 64-bits. 16299 if (CI->getBitWidth() > 64) 16300 return false; 16301 int64_t ConstVal = CI->getZExtValue(); 16302 return isUInt<16>(ConstVal) || 16303 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 16304 } 16305 16306 // For non-constant masks, we can always use the record-form and. 16307 return true; 16308 } 16309 16310 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0) 16311 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0) 16312 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0) 16313 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0) 16314 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32 16315 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const { 16316 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here"); 16317 assert(Subtarget.hasP9Altivec() && 16318 "Only combine this when P9 altivec supported!"); 16319 EVT VT = N->getValueType(0); 16320 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 16321 return SDValue(); 16322 16323 SelectionDAG &DAG = DCI.DAG; 16324 SDLoc dl(N); 16325 if (N->getOperand(0).getOpcode() == ISD::SUB) { 16326 // Even for signed integers, if it's known to be positive (as signed 16327 // integer) due to zero-extended inputs. 16328 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode(); 16329 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode(); 16330 if ((SubOpcd0 == ISD::ZERO_EXTEND || 16331 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) && 16332 (SubOpcd1 == ISD::ZERO_EXTEND || 16333 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) { 16334 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 16335 N->getOperand(0)->getOperand(0), 16336 N->getOperand(0)->getOperand(1), 16337 DAG.getTargetConstant(0, dl, MVT::i32)); 16338 } 16339 16340 // For type v4i32, it can be optimized with xvnegsp + vabsduw 16341 if (N->getOperand(0).getValueType() == MVT::v4i32 && 16342 N->getOperand(0).hasOneUse()) { 16343 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 16344 N->getOperand(0)->getOperand(0), 16345 N->getOperand(0)->getOperand(1), 16346 DAG.getTargetConstant(1, dl, MVT::i32)); 16347 } 16348 } 16349 16350 return SDValue(); 16351 } 16352 16353 // For type v4i32/v8ii16/v16i8, transform 16354 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b) 16355 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b) 16356 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b) 16357 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b) 16358 SDValue PPCTargetLowering::combineVSelect(SDNode *N, 16359 DAGCombinerInfo &DCI) const { 16360 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here"); 16361 assert(Subtarget.hasP9Altivec() && 16362 "Only combine this when P9 altivec supported!"); 16363 16364 SelectionDAG &DAG = DCI.DAG; 16365 SDLoc dl(N); 16366 SDValue Cond = N->getOperand(0); 16367 SDValue TrueOpnd = N->getOperand(1); 16368 SDValue FalseOpnd = N->getOperand(2); 16369 EVT VT = N->getOperand(1).getValueType(); 16370 16371 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB || 16372 FalseOpnd.getOpcode() != ISD::SUB) 16373 return SDValue(); 16374 16375 // ABSD only available for type v4i32/v8i16/v16i8 16376 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 16377 return SDValue(); 16378 16379 // At least to save one more dependent computation 16380 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse())) 16381 return SDValue(); 16382 16383 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 16384 16385 // Can only handle unsigned comparison here 16386 switch (CC) { 16387 default: 16388 return SDValue(); 16389 case ISD::SETUGT: 16390 case ISD::SETUGE: 16391 break; 16392 case ISD::SETULT: 16393 case ISD::SETULE: 16394 std::swap(TrueOpnd, FalseOpnd); 16395 break; 16396 } 16397 16398 SDValue CmpOpnd1 = Cond.getOperand(0); 16399 SDValue CmpOpnd2 = Cond.getOperand(1); 16400 16401 // SETCC CmpOpnd1 CmpOpnd2 cond 16402 // TrueOpnd = CmpOpnd1 - CmpOpnd2 16403 // FalseOpnd = CmpOpnd2 - CmpOpnd1 16404 if (TrueOpnd.getOperand(0) == CmpOpnd1 && 16405 TrueOpnd.getOperand(1) == CmpOpnd2 && 16406 FalseOpnd.getOperand(0) == CmpOpnd2 && 16407 FalseOpnd.getOperand(1) == CmpOpnd1) { 16408 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(), 16409 CmpOpnd1, CmpOpnd2, 16410 DAG.getTargetConstant(0, dl, MVT::i32)); 16411 } 16412 16413 return SDValue(); 16414 } 16415