1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the PPCISelLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PPCISelLowering.h" 14 #include "MCTargetDesc/PPCPredicates.h" 15 #include "PPC.h" 16 #include "PPCCCState.h" 17 #include "PPCCallingConv.h" 18 #include "PPCFrameLowering.h" 19 #include "PPCInstrInfo.h" 20 #include "PPCMachineFunctionInfo.h" 21 #include "PPCPerfectShuffle.h" 22 #include "PPCRegisterInfo.h" 23 #include "PPCSubtarget.h" 24 #include "PPCTargetMachine.h" 25 #include "llvm/ADT/APFloat.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/CodeGen/CallingConvLower.h" 38 #include "llvm/CodeGen/ISDOpcodes.h" 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineFrameInfo.h" 41 #include "llvm/CodeGen/MachineFunction.h" 42 #include "llvm/CodeGen/MachineInstr.h" 43 #include "llvm/CodeGen/MachineInstrBuilder.h" 44 #include "llvm/CodeGen/MachineJumpTableInfo.h" 45 #include "llvm/CodeGen/MachineLoopInfo.h" 46 #include "llvm/CodeGen/MachineMemOperand.h" 47 #include "llvm/CodeGen/MachineModuleInfo.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/CallSite.h" 59 #include "llvm/IR/CallingConv.h" 60 #include "llvm/IR/Constant.h" 61 #include "llvm/IR/Constants.h" 62 #include "llvm/IR/DataLayout.h" 63 #include "llvm/IR/DebugLoc.h" 64 #include "llvm/IR/DerivedTypes.h" 65 #include "llvm/IR/Function.h" 66 #include "llvm/IR/GlobalValue.h" 67 #include "llvm/IR/IRBuilder.h" 68 #include "llvm/IR/Instructions.h" 69 #include "llvm/IR/Intrinsics.h" 70 #include "llvm/IR/IntrinsicsPowerPC.h" 71 #include "llvm/IR/Module.h" 72 #include "llvm/IR/Type.h" 73 #include "llvm/IR/Use.h" 74 #include "llvm/IR/Value.h" 75 #include "llvm/MC/MCContext.h" 76 #include "llvm/MC/MCExpr.h" 77 #include "llvm/MC/MCRegisterInfo.h" 78 #include "llvm/MC/MCSymbolXCOFF.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/BranchProbability.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/CodeGen.h" 83 #include "llvm/Support/CommandLine.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/Debug.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/Format.h" 88 #include "llvm/Support/KnownBits.h" 89 #include "llvm/Support/MachineValueType.h" 90 #include "llvm/Support/MathExtras.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Target/TargetMachine.h" 93 #include "llvm/Target/TargetOptions.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <iterator> 98 #include <list> 99 #include <utility> 100 #include <vector> 101 102 using namespace llvm; 103 104 #define DEBUG_TYPE "ppc-lowering" 105 106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 108 109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 111 112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 114 115 static cl::opt<bool> DisableSCO("disable-ppc-sco", 116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 117 118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", 119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden); 120 121 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision", 122 cl::desc("enable quad precision float support on ppc"), cl::Hidden); 123 124 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables", 125 cl::desc("use absolute jump tables on ppc"), cl::Hidden); 126 127 STATISTIC(NumTailCalls, "Number of tail calls"); 128 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 129 130 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 131 132 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl); 133 134 // FIXME: Remove this once the bug has been fixed! 135 extern cl::opt<bool> ANDIGlueBug; 136 137 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 138 const PPCSubtarget &STI) 139 : TargetLowering(TM), Subtarget(STI) { 140 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 141 // arguments are at least 4/8 bytes aligned. 142 bool isPPC64 = Subtarget.isPPC64(); 143 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); 144 145 // Set up the register classes. 146 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 147 if (!useSoftFloat()) { 148 if (hasSPE()) { 149 addRegisterClass(MVT::f32, &PPC::GPRCRegClass); 150 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 151 } else { 152 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 153 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 154 } 155 } 156 157 // Match BITREVERSE to customized fast code sequence in the td file. 158 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 159 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 160 161 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 162 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 163 164 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 165 for (MVT VT : MVT::integer_valuetypes()) { 166 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 167 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 168 } 169 170 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 171 172 // PowerPC has pre-inc load and store's. 173 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 174 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 175 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 176 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 177 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 178 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 179 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 180 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 181 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 182 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 183 if (!Subtarget.hasSPE()) { 184 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 185 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 186 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 187 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 188 } 189 190 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 191 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 192 for (MVT VT : ScalarIntVTs) { 193 setOperationAction(ISD::ADDC, VT, Legal); 194 setOperationAction(ISD::ADDE, VT, Legal); 195 setOperationAction(ISD::SUBC, VT, Legal); 196 setOperationAction(ISD::SUBE, VT, Legal); 197 } 198 199 if (Subtarget.useCRBits()) { 200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 201 202 if (isPPC64 || Subtarget.hasFPCVT()) { 203 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 204 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 205 isPPC64 ? MVT::i64 : MVT::i32); 206 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 207 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 208 isPPC64 ? MVT::i64 : MVT::i32); 209 } else { 210 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 211 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 212 } 213 214 // PowerPC does not support direct load/store of condition registers. 215 setOperationAction(ISD::LOAD, MVT::i1, Custom); 216 setOperationAction(ISD::STORE, MVT::i1, Custom); 217 218 // FIXME: Remove this once the ANDI glue bug is fixed: 219 if (ANDIGlueBug) 220 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 221 222 for (MVT VT : MVT::integer_valuetypes()) { 223 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 224 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 225 setTruncStoreAction(VT, MVT::i1, Expand); 226 } 227 228 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 229 } 230 231 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 232 // PPC (the libcall is not available). 233 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 234 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 235 236 // We do not currently implement these libm ops for PowerPC. 237 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 238 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 239 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 240 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 241 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 242 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 243 244 // PowerPC has no SREM/UREM instructions unless we are on P9 245 // On P9 we may use a hardware instruction to compute the remainder. 246 // The instructions are not legalized directly because in the cases where the 247 // result of both the remainder and the division is required it is more 248 // efficient to compute the remainder from the result of the division rather 249 // than use the remainder instruction. 250 if (Subtarget.isISA3_0()) { 251 setOperationAction(ISD::SREM, MVT::i32, Custom); 252 setOperationAction(ISD::UREM, MVT::i32, Custom); 253 setOperationAction(ISD::SREM, MVT::i64, Custom); 254 setOperationAction(ISD::UREM, MVT::i64, Custom); 255 } else { 256 setOperationAction(ISD::SREM, MVT::i32, Expand); 257 setOperationAction(ISD::UREM, MVT::i32, Expand); 258 setOperationAction(ISD::SREM, MVT::i64, Expand); 259 setOperationAction(ISD::UREM, MVT::i64, Expand); 260 } 261 262 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 263 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 264 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 265 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 266 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 267 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 268 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 269 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 270 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 271 272 // We don't support sin/cos/sqrt/fmod/pow 273 setOperationAction(ISD::FSIN , MVT::f64, Expand); 274 setOperationAction(ISD::FCOS , MVT::f64, Expand); 275 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 276 setOperationAction(ISD::FREM , MVT::f64, Expand); 277 setOperationAction(ISD::FPOW , MVT::f64, Expand); 278 setOperationAction(ISD::FSIN , MVT::f32, Expand); 279 setOperationAction(ISD::FCOS , MVT::f32, Expand); 280 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 281 setOperationAction(ISD::FREM , MVT::f32, Expand); 282 setOperationAction(ISD::FPOW , MVT::f32, Expand); 283 if (Subtarget.hasSPE()) { 284 setOperationAction(ISD::FMA , MVT::f64, Expand); 285 setOperationAction(ISD::FMA , MVT::f32, Expand); 286 } else { 287 setOperationAction(ISD::FMA , MVT::f64, Legal); 288 setOperationAction(ISD::FMA , MVT::f32, Legal); 289 } 290 291 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 292 293 // If we're enabling GP optimizations, use hardware square root 294 if (!Subtarget.hasFSQRT() && 295 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 296 Subtarget.hasFRE())) 297 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 298 299 if (!Subtarget.hasFSQRT() && 300 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 301 Subtarget.hasFRES())) 302 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 303 304 if (Subtarget.hasFCPSGN()) { 305 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 306 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 307 } else { 308 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 309 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 310 } 311 312 if (Subtarget.hasFPRND()) { 313 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 314 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 315 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 316 setOperationAction(ISD::FROUND, MVT::f64, Legal); 317 318 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 319 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 320 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 321 setOperationAction(ISD::FROUND, MVT::f32, Legal); 322 } 323 324 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 325 // to speed up scalar BSWAP64. 326 // CTPOP or CTTZ were introduced in P8/P9 respectively 327 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 328 if (Subtarget.hasP9Vector()) 329 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 330 else 331 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 332 if (Subtarget.isISA3_0()) { 333 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 334 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 335 } else { 336 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 337 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 338 } 339 340 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 341 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 342 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 343 } else { 344 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 345 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 346 } 347 348 // PowerPC does not have ROTR 349 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 350 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 351 352 if (!Subtarget.useCRBits()) { 353 // PowerPC does not have Select 354 setOperationAction(ISD::SELECT, MVT::i32, Expand); 355 setOperationAction(ISD::SELECT, MVT::i64, Expand); 356 setOperationAction(ISD::SELECT, MVT::f32, Expand); 357 setOperationAction(ISD::SELECT, MVT::f64, Expand); 358 } 359 360 // PowerPC wants to turn select_cc of FP into fsel when possible. 361 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 362 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 363 364 // PowerPC wants to optimize integer setcc a bit 365 if (!Subtarget.useCRBits()) 366 setOperationAction(ISD::SETCC, MVT::i32, Custom); 367 368 // PowerPC does not have BRCOND which requires SetCC 369 if (!Subtarget.useCRBits()) 370 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 371 372 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 373 374 if (Subtarget.hasSPE()) { 375 // SPE has built-in conversions 376 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 377 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 378 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 379 } else { 380 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 381 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 382 383 // PowerPC does not have [U|S]INT_TO_FP 384 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 385 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 386 } 387 388 if (Subtarget.hasDirectMove() && isPPC64) { 389 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 390 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 391 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 392 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 393 if (TM.Options.UnsafeFPMath) { 394 setOperationAction(ISD::LRINT, MVT::f64, Legal); 395 setOperationAction(ISD::LRINT, MVT::f32, Legal); 396 setOperationAction(ISD::LLRINT, MVT::f64, Legal); 397 setOperationAction(ISD::LLRINT, MVT::f32, Legal); 398 setOperationAction(ISD::LROUND, MVT::f64, Legal); 399 setOperationAction(ISD::LROUND, MVT::f32, Legal); 400 setOperationAction(ISD::LLROUND, MVT::f64, Legal); 401 setOperationAction(ISD::LLROUND, MVT::f32, Legal); 402 } 403 } else { 404 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 405 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 406 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 407 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 408 } 409 410 // We cannot sextinreg(i1). Expand to shifts. 411 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 412 413 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 414 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 415 // support continuation, user-level threading, and etc.. As a result, no 416 // other SjLj exception interfaces are implemented and please don't build 417 // your own exception handling based on them. 418 // LLVM/Clang supports zero-cost DWARF exception handling. 419 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 420 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 421 422 // We want to legalize GlobalAddress and ConstantPool nodes into the 423 // appropriate instructions to materialize the address. 424 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 425 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 426 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 427 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 428 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 429 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 430 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 431 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 432 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 433 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 434 435 // TRAP is legal. 436 setOperationAction(ISD::TRAP, MVT::Other, Legal); 437 438 // TRAMPOLINE is custom lowered. 439 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 440 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 441 442 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 443 setOperationAction(ISD::VASTART , MVT::Other, Custom); 444 445 if (Subtarget.is64BitELFABI()) { 446 // VAARG always uses double-word chunks, so promote anything smaller. 447 setOperationAction(ISD::VAARG, MVT::i1, Promote); 448 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); 449 setOperationAction(ISD::VAARG, MVT::i8, Promote); 450 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); 451 setOperationAction(ISD::VAARG, MVT::i16, Promote); 452 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); 453 setOperationAction(ISD::VAARG, MVT::i32, Promote); 454 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); 455 setOperationAction(ISD::VAARG, MVT::Other, Expand); 456 } else if (Subtarget.is32BitELFABI()) { 457 // VAARG is custom lowered with the 32-bit SVR4 ABI. 458 setOperationAction(ISD::VAARG, MVT::Other, Custom); 459 setOperationAction(ISD::VAARG, MVT::i64, Custom); 460 } else 461 setOperationAction(ISD::VAARG, MVT::Other, Expand); 462 463 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 464 if (Subtarget.is32BitELFABI()) 465 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 466 else 467 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 468 469 // Use the default implementation. 470 setOperationAction(ISD::VAEND , MVT::Other, Expand); 471 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 472 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 473 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 474 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 475 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 476 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 477 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 478 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 479 480 // We want to custom lower some of our intrinsics. 481 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 482 483 // To handle counter-based loop conditions. 484 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 485 486 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 487 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 488 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 489 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 490 491 // Comparisons that require checking two conditions. 492 if (Subtarget.hasSPE()) { 493 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 494 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 495 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 496 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 497 } 498 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 499 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 500 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 501 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 502 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 503 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 504 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 505 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 506 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 507 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 508 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 509 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 510 511 if (Subtarget.has64BitSupport()) { 512 // They also have instructions for converting between i64 and fp. 513 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 514 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 515 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 516 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 517 // This is just the low 32 bits of a (signed) fp->i64 conversion. 518 // We cannot do this with Promote because i64 is not a legal type. 519 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 520 521 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 522 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 523 } else { 524 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 525 if (Subtarget.hasSPE()) 526 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 527 else 528 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 529 } 530 531 // With the instructions enabled under FPCVT, we can do everything. 532 if (Subtarget.hasFPCVT()) { 533 if (Subtarget.has64BitSupport()) { 534 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 535 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 536 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 537 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 538 } 539 540 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 541 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 542 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 543 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 544 } 545 546 if (Subtarget.use64BitRegs()) { 547 // 64-bit PowerPC implementations can support i64 types directly 548 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 549 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 550 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 551 // 64-bit PowerPC wants to expand i128 shifts itself. 552 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 553 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 554 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 555 } else { 556 // 32-bit PowerPC wants to expand i64 shifts itself. 557 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 558 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 559 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 560 } 561 562 if (Subtarget.hasVSX()) { 563 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); 564 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); 565 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); 566 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); 567 } 568 569 if (Subtarget.hasAltivec()) { 570 for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { 571 setOperationAction(ISD::SADDSAT, VT, Legal); 572 setOperationAction(ISD::SSUBSAT, VT, Legal); 573 setOperationAction(ISD::UADDSAT, VT, Legal); 574 setOperationAction(ISD::USUBSAT, VT, Legal); 575 } 576 // First set operation action for all vector types to expand. Then we 577 // will selectively turn on ones that can be effectively codegen'd. 578 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 579 // add/sub are legal for all supported vector VT's. 580 setOperationAction(ISD::ADD, VT, Legal); 581 setOperationAction(ISD::SUB, VT, Legal); 582 583 // For v2i64, these are only valid with P8Vector. This is corrected after 584 // the loop. 585 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { 586 setOperationAction(ISD::SMAX, VT, Legal); 587 setOperationAction(ISD::SMIN, VT, Legal); 588 setOperationAction(ISD::UMAX, VT, Legal); 589 setOperationAction(ISD::UMIN, VT, Legal); 590 } 591 else { 592 setOperationAction(ISD::SMAX, VT, Expand); 593 setOperationAction(ISD::SMIN, VT, Expand); 594 setOperationAction(ISD::UMAX, VT, Expand); 595 setOperationAction(ISD::UMIN, VT, Expand); 596 } 597 598 if (Subtarget.hasVSX()) { 599 setOperationAction(ISD::FMAXNUM, VT, Legal); 600 setOperationAction(ISD::FMINNUM, VT, Legal); 601 } 602 603 // Vector instructions introduced in P8 604 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 605 setOperationAction(ISD::CTPOP, VT, Legal); 606 setOperationAction(ISD::CTLZ, VT, Legal); 607 } 608 else { 609 setOperationAction(ISD::CTPOP, VT, Expand); 610 setOperationAction(ISD::CTLZ, VT, Expand); 611 } 612 613 // Vector instructions introduced in P9 614 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 615 setOperationAction(ISD::CTTZ, VT, Legal); 616 else 617 setOperationAction(ISD::CTTZ, VT, Expand); 618 619 // We promote all shuffles to v16i8. 620 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 621 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 622 623 // We promote all non-typed operations to v4i32. 624 setOperationAction(ISD::AND , VT, Promote); 625 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 626 setOperationAction(ISD::OR , VT, Promote); 627 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 628 setOperationAction(ISD::XOR , VT, Promote); 629 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 630 setOperationAction(ISD::LOAD , VT, Promote); 631 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 632 setOperationAction(ISD::SELECT, VT, Promote); 633 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 634 setOperationAction(ISD::VSELECT, VT, Legal); 635 setOperationAction(ISD::SELECT_CC, VT, Promote); 636 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 637 setOperationAction(ISD::STORE, VT, Promote); 638 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 639 640 // No other operations are legal. 641 setOperationAction(ISD::MUL , VT, Expand); 642 setOperationAction(ISD::SDIV, VT, Expand); 643 setOperationAction(ISD::SREM, VT, Expand); 644 setOperationAction(ISD::UDIV, VT, Expand); 645 setOperationAction(ISD::UREM, VT, Expand); 646 setOperationAction(ISD::FDIV, VT, Expand); 647 setOperationAction(ISD::FREM, VT, Expand); 648 setOperationAction(ISD::FNEG, VT, Expand); 649 setOperationAction(ISD::FSQRT, VT, Expand); 650 setOperationAction(ISD::FLOG, VT, Expand); 651 setOperationAction(ISD::FLOG10, VT, Expand); 652 setOperationAction(ISD::FLOG2, VT, Expand); 653 setOperationAction(ISD::FEXP, VT, Expand); 654 setOperationAction(ISD::FEXP2, VT, Expand); 655 setOperationAction(ISD::FSIN, VT, Expand); 656 setOperationAction(ISD::FCOS, VT, Expand); 657 setOperationAction(ISD::FABS, VT, Expand); 658 setOperationAction(ISD::FFLOOR, VT, Expand); 659 setOperationAction(ISD::FCEIL, VT, Expand); 660 setOperationAction(ISD::FTRUNC, VT, Expand); 661 setOperationAction(ISD::FRINT, VT, Expand); 662 setOperationAction(ISD::FNEARBYINT, VT, Expand); 663 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 664 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 665 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 666 setOperationAction(ISD::MULHU, VT, Expand); 667 setOperationAction(ISD::MULHS, VT, Expand); 668 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 669 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 670 setOperationAction(ISD::UDIVREM, VT, Expand); 671 setOperationAction(ISD::SDIVREM, VT, Expand); 672 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 673 setOperationAction(ISD::FPOW, VT, Expand); 674 setOperationAction(ISD::BSWAP, VT, Expand); 675 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 676 setOperationAction(ISD::ROTL, VT, Expand); 677 setOperationAction(ISD::ROTR, VT, Expand); 678 679 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 680 setTruncStoreAction(VT, InnerVT, Expand); 681 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 682 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 683 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 684 } 685 } 686 if (!Subtarget.hasP8Vector()) { 687 setOperationAction(ISD::SMAX, MVT::v2i64, Expand); 688 setOperationAction(ISD::SMIN, MVT::v2i64, Expand); 689 setOperationAction(ISD::UMAX, MVT::v2i64, Expand); 690 setOperationAction(ISD::UMIN, MVT::v2i64, Expand); 691 } 692 693 for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8}) 694 setOperationAction(ISD::ABS, VT, Custom); 695 696 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 697 // with merges, splats, etc. 698 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 699 700 // Vector truncates to sub-word integer that fit in an Altivec/VSX register 701 // are cheap, so handle them before they get expanded to scalar. 702 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); 703 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); 704 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); 705 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); 706 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); 707 708 setOperationAction(ISD::AND , MVT::v4i32, Legal); 709 setOperationAction(ISD::OR , MVT::v4i32, Legal); 710 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 711 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 712 setOperationAction(ISD::SELECT, MVT::v4i32, 713 Subtarget.useCRBits() ? Legal : Expand); 714 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 715 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 716 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 717 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 718 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 719 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 720 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 721 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 722 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 723 724 // Without hasP8Altivec set, v2i64 SMAX isn't available. 725 // But ABS custom lowering requires SMAX support. 726 if (!Subtarget.hasP8Altivec()) 727 setOperationAction(ISD::ABS, MVT::v2i64, Expand); 728 729 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). 730 if (Subtarget.hasAltivec()) 731 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) 732 setOperationAction(ISD::ROTL, VT, Legal); 733 // With hasP8Altivec set, we can lower ISD::ROTL to vrld. 734 if (Subtarget.hasP8Altivec()) 735 setOperationAction(ISD::ROTL, MVT::v2i64, Legal); 736 737 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 738 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 739 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 740 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 741 742 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 743 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 744 745 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 746 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 747 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 748 } 749 750 if (Subtarget.hasP8Altivec()) 751 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 752 else 753 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 754 755 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 756 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 757 758 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 759 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 760 761 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 762 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 763 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 764 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 765 766 // Altivec does not contain unordered floating-point compare instructions 767 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 768 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 769 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 770 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 771 772 if (Subtarget.hasVSX()) { 773 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 774 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 775 if (Subtarget.hasP8Vector()) { 776 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 777 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 778 } 779 if (Subtarget.hasDirectMove() && isPPC64) { 780 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 781 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 782 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 783 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 784 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 785 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 786 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 787 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 788 } 789 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 790 791 // The nearbyint variants are not allowed to raise the inexact exception 792 // so we can only code-gen them with unsafe math. 793 if (TM.Options.UnsafeFPMath) { 794 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 795 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 796 } 797 798 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 799 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 800 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 801 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 802 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 803 setOperationAction(ISD::FROUND, MVT::f64, Legal); 804 805 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 806 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 807 setOperationAction(ISD::FROUND, MVT::f32, Legal); 808 809 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 810 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 811 812 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 813 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 814 815 // Share the Altivec comparison restrictions. 816 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 817 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 818 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 819 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 820 821 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 822 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 823 824 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 825 826 if (Subtarget.hasP8Vector()) 827 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 828 829 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 830 831 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 832 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 833 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 834 835 if (Subtarget.hasP8Altivec()) { 836 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 837 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 838 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 839 840 // 128 bit shifts can be accomplished via 3 instructions for SHL and 841 // SRL, but not for SRA because of the instructions available: 842 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 843 // doing 844 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 845 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 846 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 847 848 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 849 } 850 else { 851 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 852 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 853 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 854 855 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 856 857 // VSX v2i64 only supports non-arithmetic operations. 858 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 859 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 860 } 861 862 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 863 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 864 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 865 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 866 867 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 868 869 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 870 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 871 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 872 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 873 874 // Custom handling for partial vectors of integers converted to 875 // floating point. We already have optimal handling for v2i32 through 876 // the DAG combine, so those aren't necessary. 877 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); 878 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 879 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); 880 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 881 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); 882 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); 883 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); 884 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 885 886 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 887 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 888 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 889 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 890 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 891 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); 892 893 if (Subtarget.hasDirectMove()) 894 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 895 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 896 897 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 898 } 899 900 if (Subtarget.hasP8Altivec()) { 901 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 902 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 903 } 904 905 if (Subtarget.hasP9Vector()) { 906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 907 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 908 909 // 128 bit shifts can be accomplished via 3 instructions for SHL and 910 // SRL, but not for SRA because of the instructions available: 911 // VS{RL} and VS{RL}O. 912 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 913 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 914 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 915 916 if (EnableQuadPrecision) { 917 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 918 setOperationAction(ISD::FADD, MVT::f128, Legal); 919 setOperationAction(ISD::FSUB, MVT::f128, Legal); 920 setOperationAction(ISD::FDIV, MVT::f128, Legal); 921 setOperationAction(ISD::FMUL, MVT::f128, Legal); 922 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 923 // No extending loads to f128 on PPC. 924 for (MVT FPT : MVT::fp_valuetypes()) 925 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 926 setOperationAction(ISD::FMA, MVT::f128, Legal); 927 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 928 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 929 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 930 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 931 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 932 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 933 934 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 935 setOperationAction(ISD::FRINT, MVT::f128, Legal); 936 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 937 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 938 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 939 setOperationAction(ISD::FROUND, MVT::f128, Legal); 940 941 setOperationAction(ISD::SELECT, MVT::f128, Expand); 942 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 943 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 944 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 945 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 946 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 947 // No implementation for these ops for PowerPC. 948 setOperationAction(ISD::FSIN , MVT::f128, Expand); 949 setOperationAction(ISD::FCOS , MVT::f128, Expand); 950 setOperationAction(ISD::FPOW, MVT::f128, Expand); 951 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 952 setOperationAction(ISD::FREM, MVT::f128, Expand); 953 } 954 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 955 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); 956 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal); 957 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal); 958 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal); 959 } 960 961 if (Subtarget.hasP9Altivec()) { 962 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 963 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 964 965 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); 966 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); 967 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); 968 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); 969 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); 970 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 971 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 972 } 973 } 974 975 if (Subtarget.hasQPX()) { 976 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 977 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 978 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 979 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 980 981 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 982 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 983 984 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 985 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 986 987 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 988 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 989 990 if (!Subtarget.useCRBits()) 991 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 992 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 993 994 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 995 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 996 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 997 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 998 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 999 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 1000 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 1001 1002 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 1003 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 1004 1005 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 1006 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 1007 1008 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 1009 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 1010 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 1011 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 1012 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 1013 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 1014 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 1015 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 1016 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 1017 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 1018 1019 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 1020 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 1021 1022 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 1023 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 1024 1025 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 1026 1027 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 1028 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 1029 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 1030 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 1031 1032 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 1033 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 1034 1035 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 1036 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 1037 1038 if (!Subtarget.useCRBits()) 1039 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 1040 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 1041 1042 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 1043 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 1044 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 1045 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 1046 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 1047 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 1048 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 1049 1050 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 1051 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 1052 1053 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 1054 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 1055 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 1056 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 1057 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 1058 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 1059 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 1060 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 1061 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 1062 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 1063 1064 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 1065 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 1066 1067 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 1068 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 1069 1070 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 1071 1072 setOperationAction(ISD::AND , MVT::v4i1, Legal); 1073 setOperationAction(ISD::OR , MVT::v4i1, Legal); 1074 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 1075 1076 if (!Subtarget.useCRBits()) 1077 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 1078 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 1079 1080 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 1081 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 1082 1083 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 1084 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 1085 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 1086 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 1087 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 1088 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 1089 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 1090 1091 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 1092 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 1093 1094 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 1095 1096 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1097 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 1098 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 1099 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 1100 1101 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 1102 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 1103 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 1104 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 1105 1106 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 1107 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 1108 1109 // These need to set FE_INEXACT, and so cannot be vectorized here. 1110 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 1111 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 1112 1113 if (TM.Options.UnsafeFPMath) { 1114 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1115 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1116 1117 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 1118 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 1119 } else { 1120 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 1121 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 1122 1123 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 1124 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 1125 } 1126 } 1127 1128 if (Subtarget.has64BitSupport()) 1129 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1130 1131 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1132 1133 if (!isPPC64) { 1134 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1135 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1136 } 1137 1138 setBooleanContents(ZeroOrOneBooleanContent); 1139 1140 if (Subtarget.hasAltivec()) { 1141 // Altivec instructions set fields to all zeros or all ones. 1142 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1143 } 1144 1145 if (!isPPC64) { 1146 // These libcalls are not available in 32-bit. 1147 setLibcallName(RTLIB::SHL_I128, nullptr); 1148 setLibcallName(RTLIB::SRL_I128, nullptr); 1149 setLibcallName(RTLIB::SRA_I128, nullptr); 1150 } 1151 1152 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1153 1154 // We have target-specific dag combine patterns for the following nodes: 1155 setTargetDAGCombine(ISD::ADD); 1156 setTargetDAGCombine(ISD::SHL); 1157 setTargetDAGCombine(ISD::SRA); 1158 setTargetDAGCombine(ISD::SRL); 1159 setTargetDAGCombine(ISD::MUL); 1160 setTargetDAGCombine(ISD::SINT_TO_FP); 1161 setTargetDAGCombine(ISD::BUILD_VECTOR); 1162 if (Subtarget.hasFPCVT()) 1163 setTargetDAGCombine(ISD::UINT_TO_FP); 1164 setTargetDAGCombine(ISD::LOAD); 1165 setTargetDAGCombine(ISD::STORE); 1166 setTargetDAGCombine(ISD::BR_CC); 1167 if (Subtarget.useCRBits()) 1168 setTargetDAGCombine(ISD::BRCOND); 1169 setTargetDAGCombine(ISD::BSWAP); 1170 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1171 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1172 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1173 1174 setTargetDAGCombine(ISD::SIGN_EXTEND); 1175 setTargetDAGCombine(ISD::ZERO_EXTEND); 1176 setTargetDAGCombine(ISD::ANY_EXTEND); 1177 1178 setTargetDAGCombine(ISD::TRUNCATE); 1179 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1180 1181 1182 if (Subtarget.useCRBits()) { 1183 setTargetDAGCombine(ISD::TRUNCATE); 1184 setTargetDAGCombine(ISD::SETCC); 1185 setTargetDAGCombine(ISD::SELECT_CC); 1186 } 1187 1188 // Use reciprocal estimates. 1189 if (TM.Options.UnsafeFPMath) { 1190 setTargetDAGCombine(ISD::FDIV); 1191 setTargetDAGCombine(ISD::FSQRT); 1192 } 1193 1194 if (Subtarget.hasP9Altivec()) { 1195 setTargetDAGCombine(ISD::ABS); 1196 setTargetDAGCombine(ISD::VSELECT); 1197 } 1198 1199 if (EnableQuadPrecision) { 1200 setLibcallName(RTLIB::LOG_F128, "logf128"); 1201 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1202 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1203 setLibcallName(RTLIB::EXP_F128, "expf128"); 1204 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1205 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1206 setLibcallName(RTLIB::COS_F128, "cosf128"); 1207 setLibcallName(RTLIB::POW_F128, "powf128"); 1208 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1209 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1210 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 1211 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1212 } 1213 1214 // With 32 condition bits, we don't need to sink (and duplicate) compares 1215 // aggressively in CodeGenPrep. 1216 if (Subtarget.useCRBits()) { 1217 setHasMultipleConditionRegisters(); 1218 setJumpIsExpensive(); 1219 } 1220 1221 setMinFunctionAlignment(Align(4)); 1222 1223 switch (Subtarget.getCPUDirective()) { 1224 default: break; 1225 case PPC::DIR_970: 1226 case PPC::DIR_A2: 1227 case PPC::DIR_E500: 1228 case PPC::DIR_E500mc: 1229 case PPC::DIR_E5500: 1230 case PPC::DIR_PWR4: 1231 case PPC::DIR_PWR5: 1232 case PPC::DIR_PWR5X: 1233 case PPC::DIR_PWR6: 1234 case PPC::DIR_PWR6X: 1235 case PPC::DIR_PWR7: 1236 case PPC::DIR_PWR8: 1237 case PPC::DIR_PWR9: 1238 case PPC::DIR_PWR_FUTURE: 1239 setPrefLoopAlignment(Align(16)); 1240 setPrefFunctionAlignment(Align(16)); 1241 break; 1242 } 1243 1244 if (Subtarget.enableMachineScheduler()) 1245 setSchedulingPreference(Sched::Source); 1246 else 1247 setSchedulingPreference(Sched::Hybrid); 1248 1249 computeRegisterProperties(STI.getRegisterInfo()); 1250 1251 // The Freescale cores do better with aggressive inlining of memcpy and 1252 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1253 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc || 1254 Subtarget.getCPUDirective() == PPC::DIR_E5500) { 1255 MaxStoresPerMemset = 32; 1256 MaxStoresPerMemsetOptSize = 16; 1257 MaxStoresPerMemcpy = 32; 1258 MaxStoresPerMemcpyOptSize = 8; 1259 MaxStoresPerMemmove = 32; 1260 MaxStoresPerMemmoveOptSize = 8; 1261 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) { 1262 // The A2 also benefits from (very) aggressive inlining of memcpy and 1263 // friends. The overhead of a the function call, even when warm, can be 1264 // over one hundred cycles. 1265 MaxStoresPerMemset = 128; 1266 MaxStoresPerMemcpy = 128; 1267 MaxStoresPerMemmove = 128; 1268 MaxLoadsPerMemcmp = 128; 1269 } else { 1270 MaxLoadsPerMemcmp = 8; 1271 MaxLoadsPerMemcmpOptSize = 4; 1272 } 1273 } 1274 1275 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1276 /// the desired ByVal argument alignment. 1277 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1278 unsigned MaxMaxAlign) { 1279 if (MaxAlign == MaxMaxAlign) 1280 return; 1281 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1282 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1283 MaxAlign = 32; 1284 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1285 MaxAlign = 16; 1286 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1287 unsigned EltAlign = 0; 1288 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1289 if (EltAlign > MaxAlign) 1290 MaxAlign = EltAlign; 1291 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1292 for (auto *EltTy : STy->elements()) { 1293 unsigned EltAlign = 0; 1294 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1295 if (EltAlign > MaxAlign) 1296 MaxAlign = EltAlign; 1297 if (MaxAlign == MaxMaxAlign) 1298 break; 1299 } 1300 } 1301 } 1302 1303 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1304 /// function arguments in the caller parameter area. 1305 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1306 const DataLayout &DL) const { 1307 // 16byte and wider vectors are passed on 16byte boundary. 1308 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1309 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1310 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1311 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1312 return Align; 1313 } 1314 1315 bool PPCTargetLowering::useSoftFloat() const { 1316 return Subtarget.useSoftFloat(); 1317 } 1318 1319 bool PPCTargetLowering::hasSPE() const { 1320 return Subtarget.hasSPE(); 1321 } 1322 1323 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { 1324 return VT.isScalarInteger(); 1325 } 1326 1327 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1328 switch ((PPCISD::NodeType)Opcode) { 1329 case PPCISD::FIRST_NUMBER: break; 1330 case PPCISD::FSEL: return "PPCISD::FSEL"; 1331 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP"; 1332 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP"; 1333 case PPCISD::FCFID: return "PPCISD::FCFID"; 1334 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1335 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1336 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1337 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1338 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1339 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1340 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1341 case PPCISD::FP_TO_UINT_IN_VSR: 1342 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1343 case PPCISD::FP_TO_SINT_IN_VSR: 1344 return "PPCISD::FP_TO_SINT_IN_VSR"; 1345 case PPCISD::FRE: return "PPCISD::FRE"; 1346 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1347 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1348 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1349 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1350 case PPCISD::VPERM: return "PPCISD::VPERM"; 1351 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1352 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1353 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1354 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1355 case PPCISD::CMPB: return "PPCISD::CMPB"; 1356 case PPCISD::Hi: return "PPCISD::Hi"; 1357 case PPCISD::Lo: return "PPCISD::Lo"; 1358 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1359 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1360 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1361 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1362 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1363 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1364 case PPCISD::SRL: return "PPCISD::SRL"; 1365 case PPCISD::SRA: return "PPCISD::SRA"; 1366 case PPCISD::SHL: return "PPCISD::SHL"; 1367 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1368 case PPCISD::CALL: return "PPCISD::CALL"; 1369 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1370 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1371 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1372 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1373 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1374 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1375 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1376 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1377 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1378 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1379 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1380 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1381 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1382 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1383 case PPCISD::ANDI_rec_1_EQ_BIT: 1384 return "PPCISD::ANDI_rec_1_EQ_BIT"; 1385 case PPCISD::ANDI_rec_1_GT_BIT: 1386 return "PPCISD::ANDI_rec_1_GT_BIT"; 1387 case PPCISD::VCMP: return "PPCISD::VCMP"; 1388 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1389 case PPCISD::LBRX: return "PPCISD::LBRX"; 1390 case PPCISD::STBRX: return "PPCISD::STBRX"; 1391 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1392 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1393 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1394 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1395 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1396 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1397 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1398 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1399 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE"; 1400 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE"; 1401 case PPCISD::ST_VSR_SCAL_INT: 1402 return "PPCISD::ST_VSR_SCAL_INT"; 1403 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1404 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1405 case PPCISD::BDZ: return "PPCISD::BDZ"; 1406 case PPCISD::MFFS: return "PPCISD::MFFS"; 1407 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1408 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1409 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1410 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1411 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1412 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1413 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1414 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1415 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1416 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1417 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1418 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1419 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1420 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1421 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1422 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1423 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1424 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1425 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1426 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1427 case PPCISD::SC: return "PPCISD::SC"; 1428 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1429 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1430 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1431 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1432 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1433 case PPCISD::VABSD: return "PPCISD::VABSD"; 1434 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1435 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1436 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1437 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1438 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1439 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1440 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1441 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64"; 1442 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE"; 1443 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; 1444 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; 1445 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; 1446 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; 1447 } 1448 return nullptr; 1449 } 1450 1451 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1452 EVT VT) const { 1453 if (!VT.isVector()) 1454 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1455 1456 if (Subtarget.hasQPX()) 1457 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1458 1459 return VT.changeVectorElementTypeToInteger(); 1460 } 1461 1462 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1463 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1464 return true; 1465 } 1466 1467 //===----------------------------------------------------------------------===// 1468 // Node matching predicates, for use by the tblgen matching code. 1469 //===----------------------------------------------------------------------===// 1470 1471 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1472 static bool isFloatingPointZero(SDValue Op) { 1473 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1474 return CFP->getValueAPF().isZero(); 1475 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1476 // Maybe this has already been legalized into the constant pool? 1477 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1478 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1479 return CFP->getValueAPF().isZero(); 1480 } 1481 return false; 1482 } 1483 1484 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1485 /// true if Op is undef or if it matches the specified value. 1486 static bool isConstantOrUndef(int Op, int Val) { 1487 return Op < 0 || Op == Val; 1488 } 1489 1490 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1491 /// VPKUHUM instruction. 1492 /// The ShuffleKind distinguishes between big-endian operations with 1493 /// two different inputs (0), either-endian operations with two identical 1494 /// inputs (1), and little-endian operations with two different inputs (2). 1495 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1496 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1497 SelectionDAG &DAG) { 1498 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1499 if (ShuffleKind == 0) { 1500 if (IsLE) 1501 return false; 1502 for (unsigned i = 0; i != 16; ++i) 1503 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1504 return false; 1505 } else if (ShuffleKind == 2) { 1506 if (!IsLE) 1507 return false; 1508 for (unsigned i = 0; i != 16; ++i) 1509 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1510 return false; 1511 } else if (ShuffleKind == 1) { 1512 unsigned j = IsLE ? 0 : 1; 1513 for (unsigned i = 0; i != 8; ++i) 1514 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1515 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1516 return false; 1517 } 1518 return true; 1519 } 1520 1521 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1522 /// VPKUWUM instruction. 1523 /// The ShuffleKind distinguishes between big-endian operations with 1524 /// two different inputs (0), either-endian operations with two identical 1525 /// inputs (1), and little-endian operations with two different inputs (2). 1526 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1527 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1528 SelectionDAG &DAG) { 1529 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1530 if (ShuffleKind == 0) { 1531 if (IsLE) 1532 return false; 1533 for (unsigned i = 0; i != 16; i += 2) 1534 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1535 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1536 return false; 1537 } else if (ShuffleKind == 2) { 1538 if (!IsLE) 1539 return false; 1540 for (unsigned i = 0; i != 16; i += 2) 1541 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1542 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1543 return false; 1544 } else if (ShuffleKind == 1) { 1545 unsigned j = IsLE ? 0 : 2; 1546 for (unsigned i = 0; i != 8; i += 2) 1547 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1548 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1549 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1550 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1551 return false; 1552 } 1553 return true; 1554 } 1555 1556 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1557 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1558 /// current subtarget. 1559 /// 1560 /// The ShuffleKind distinguishes between big-endian operations with 1561 /// two different inputs (0), either-endian operations with two identical 1562 /// inputs (1), and little-endian operations with two different inputs (2). 1563 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1564 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1565 SelectionDAG &DAG) { 1566 const PPCSubtarget& Subtarget = 1567 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1568 if (!Subtarget.hasP8Vector()) 1569 return false; 1570 1571 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1572 if (ShuffleKind == 0) { 1573 if (IsLE) 1574 return false; 1575 for (unsigned i = 0; i != 16; i += 4) 1576 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1577 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1578 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1579 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1580 return false; 1581 } else if (ShuffleKind == 2) { 1582 if (!IsLE) 1583 return false; 1584 for (unsigned i = 0; i != 16; i += 4) 1585 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1586 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1587 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1588 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1589 return false; 1590 } else if (ShuffleKind == 1) { 1591 unsigned j = IsLE ? 0 : 4; 1592 for (unsigned i = 0; i != 8; i += 4) 1593 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1594 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1595 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1596 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1597 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1598 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1599 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1600 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1601 return false; 1602 } 1603 return true; 1604 } 1605 1606 /// isVMerge - Common function, used to match vmrg* shuffles. 1607 /// 1608 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1609 unsigned LHSStart, unsigned RHSStart) { 1610 if (N->getValueType(0) != MVT::v16i8) 1611 return false; 1612 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1613 "Unsupported merge size!"); 1614 1615 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1616 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1617 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1618 LHSStart+j+i*UnitSize) || 1619 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1620 RHSStart+j+i*UnitSize)) 1621 return false; 1622 } 1623 return true; 1624 } 1625 1626 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1627 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1628 /// The ShuffleKind distinguishes between big-endian merges with two 1629 /// different inputs (0), either-endian merges with two identical inputs (1), 1630 /// and little-endian merges with two different inputs (2). For the latter, 1631 /// the input operands are swapped (see PPCInstrAltivec.td). 1632 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1633 unsigned ShuffleKind, SelectionDAG &DAG) { 1634 if (DAG.getDataLayout().isLittleEndian()) { 1635 if (ShuffleKind == 1) // unary 1636 return isVMerge(N, UnitSize, 0, 0); 1637 else if (ShuffleKind == 2) // swapped 1638 return isVMerge(N, UnitSize, 0, 16); 1639 else 1640 return false; 1641 } else { 1642 if (ShuffleKind == 1) // unary 1643 return isVMerge(N, UnitSize, 8, 8); 1644 else if (ShuffleKind == 0) // normal 1645 return isVMerge(N, UnitSize, 8, 24); 1646 else 1647 return false; 1648 } 1649 } 1650 1651 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1652 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1653 /// The ShuffleKind distinguishes between big-endian merges with two 1654 /// different inputs (0), either-endian merges with two identical inputs (1), 1655 /// and little-endian merges with two different inputs (2). For the latter, 1656 /// the input operands are swapped (see PPCInstrAltivec.td). 1657 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1658 unsigned ShuffleKind, SelectionDAG &DAG) { 1659 if (DAG.getDataLayout().isLittleEndian()) { 1660 if (ShuffleKind == 1) // unary 1661 return isVMerge(N, UnitSize, 8, 8); 1662 else if (ShuffleKind == 2) // swapped 1663 return isVMerge(N, UnitSize, 8, 24); 1664 else 1665 return false; 1666 } else { 1667 if (ShuffleKind == 1) // unary 1668 return isVMerge(N, UnitSize, 0, 0); 1669 else if (ShuffleKind == 0) // normal 1670 return isVMerge(N, UnitSize, 0, 16); 1671 else 1672 return false; 1673 } 1674 } 1675 1676 /** 1677 * Common function used to match vmrgew and vmrgow shuffles 1678 * 1679 * The indexOffset determines whether to look for even or odd words in 1680 * the shuffle mask. This is based on the of the endianness of the target 1681 * machine. 1682 * - Little Endian: 1683 * - Use offset of 0 to check for odd elements 1684 * - Use offset of 4 to check for even elements 1685 * - Big Endian: 1686 * - Use offset of 0 to check for even elements 1687 * - Use offset of 4 to check for odd elements 1688 * A detailed description of the vector element ordering for little endian and 1689 * big endian can be found at 1690 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1691 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1692 * compiler differences mean to you 1693 * 1694 * The mask to the shuffle vector instruction specifies the indices of the 1695 * elements from the two input vectors to place in the result. The elements are 1696 * numbered in array-access order, starting with the first vector. These vectors 1697 * are always of type v16i8, thus each vector will contain 16 elements of size 1698 * 8. More info on the shuffle vector can be found in the 1699 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1700 * Language Reference. 1701 * 1702 * The RHSStartValue indicates whether the same input vectors are used (unary) 1703 * or two different input vectors are used, based on the following: 1704 * - If the instruction uses the same vector for both inputs, the range of the 1705 * indices will be 0 to 15. In this case, the RHSStart value passed should 1706 * be 0. 1707 * - If the instruction has two different vectors then the range of the 1708 * indices will be 0 to 31. In this case, the RHSStart value passed should 1709 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1710 * to 31 specify elements in the second vector). 1711 * 1712 * \param[in] N The shuffle vector SD Node to analyze 1713 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1714 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1715 * vector to the shuffle_vector instruction 1716 * \return true iff this shuffle vector represents an even or odd word merge 1717 */ 1718 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1719 unsigned RHSStartValue) { 1720 if (N->getValueType(0) != MVT::v16i8) 1721 return false; 1722 1723 for (unsigned i = 0; i < 2; ++i) 1724 for (unsigned j = 0; j < 4; ++j) 1725 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1726 i*RHSStartValue+j+IndexOffset) || 1727 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1728 i*RHSStartValue+j+IndexOffset+8)) 1729 return false; 1730 return true; 1731 } 1732 1733 /** 1734 * Determine if the specified shuffle mask is suitable for the vmrgew or 1735 * vmrgow instructions. 1736 * 1737 * \param[in] N The shuffle vector SD Node to analyze 1738 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1739 * \param[in] ShuffleKind Identify the type of merge: 1740 * - 0 = big-endian merge with two different inputs; 1741 * - 1 = either-endian merge with two identical inputs; 1742 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1743 * little-endian merges). 1744 * \param[in] DAG The current SelectionDAG 1745 * \return true iff this shuffle mask 1746 */ 1747 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1748 unsigned ShuffleKind, SelectionDAG &DAG) { 1749 if (DAG.getDataLayout().isLittleEndian()) { 1750 unsigned indexOffset = CheckEven ? 4 : 0; 1751 if (ShuffleKind == 1) // Unary 1752 return isVMerge(N, indexOffset, 0); 1753 else if (ShuffleKind == 2) // swapped 1754 return isVMerge(N, indexOffset, 16); 1755 else 1756 return false; 1757 } 1758 else { 1759 unsigned indexOffset = CheckEven ? 0 : 4; 1760 if (ShuffleKind == 1) // Unary 1761 return isVMerge(N, indexOffset, 0); 1762 else if (ShuffleKind == 0) // Normal 1763 return isVMerge(N, indexOffset, 16); 1764 else 1765 return false; 1766 } 1767 return false; 1768 } 1769 1770 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1771 /// amount, otherwise return -1. 1772 /// The ShuffleKind distinguishes between big-endian operations with two 1773 /// different inputs (0), either-endian operations with two identical inputs 1774 /// (1), and little-endian operations with two different inputs (2). For the 1775 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1776 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1777 SelectionDAG &DAG) { 1778 if (N->getValueType(0) != MVT::v16i8) 1779 return -1; 1780 1781 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1782 1783 // Find the first non-undef value in the shuffle mask. 1784 unsigned i; 1785 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1786 /*search*/; 1787 1788 if (i == 16) return -1; // all undef. 1789 1790 // Otherwise, check to see if the rest of the elements are consecutively 1791 // numbered from this value. 1792 unsigned ShiftAmt = SVOp->getMaskElt(i); 1793 if (ShiftAmt < i) return -1; 1794 1795 ShiftAmt -= i; 1796 bool isLE = DAG.getDataLayout().isLittleEndian(); 1797 1798 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1799 // Check the rest of the elements to see if they are consecutive. 1800 for (++i; i != 16; ++i) 1801 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1802 return -1; 1803 } else if (ShuffleKind == 1) { 1804 // Check the rest of the elements to see if they are consecutive. 1805 for (++i; i != 16; ++i) 1806 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1807 return -1; 1808 } else 1809 return -1; 1810 1811 if (isLE) 1812 ShiftAmt = 16 - ShiftAmt; 1813 1814 return ShiftAmt; 1815 } 1816 1817 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1818 /// specifies a splat of a single element that is suitable for input to 1819 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.). 1820 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1821 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && 1822 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes"); 1823 1824 // The consecutive indices need to specify an element, not part of two 1825 // different elements. So abandon ship early if this isn't the case. 1826 if (N->getMaskElt(0) % EltSize != 0) 1827 return false; 1828 1829 // This is a splat operation if each element of the permute is the same, and 1830 // if the value doesn't reference the second vector. 1831 unsigned ElementBase = N->getMaskElt(0); 1832 1833 // FIXME: Handle UNDEF elements too! 1834 if (ElementBase >= 16) 1835 return false; 1836 1837 // Check that the indices are consecutive, in the case of a multi-byte element 1838 // splatted with a v16i8 mask. 1839 for (unsigned i = 1; i != EltSize; ++i) 1840 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1841 return false; 1842 1843 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1844 if (N->getMaskElt(i) < 0) continue; 1845 for (unsigned j = 0; j != EltSize; ++j) 1846 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1847 return false; 1848 } 1849 return true; 1850 } 1851 1852 /// Check that the mask is shuffling N byte elements. Within each N byte 1853 /// element of the mask, the indices could be either in increasing or 1854 /// decreasing order as long as they are consecutive. 1855 /// \param[in] N the shuffle vector SD Node to analyze 1856 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1857 /// Word/DoubleWord/QuadWord). 1858 /// \param[in] StepLen the delta indices number among the N byte element, if 1859 /// the mask is in increasing/decreasing order then it is 1/-1. 1860 /// \return true iff the mask is shuffling N byte elements. 1861 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1862 int StepLen) { 1863 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1864 "Unexpected element width."); 1865 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1866 1867 unsigned NumOfElem = 16 / Width; 1868 unsigned MaskVal[16]; // Width is never greater than 16 1869 for (unsigned i = 0; i < NumOfElem; ++i) { 1870 MaskVal[0] = N->getMaskElt(i * Width); 1871 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1872 return false; 1873 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1874 return false; 1875 } 1876 1877 for (unsigned int j = 1; j < Width; ++j) { 1878 MaskVal[j] = N->getMaskElt(i * Width + j); 1879 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1880 return false; 1881 } 1882 } 1883 } 1884 1885 return true; 1886 } 1887 1888 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1889 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1890 if (!isNByteElemShuffleMask(N, 4, 1)) 1891 return false; 1892 1893 // Now we look at mask elements 0,4,8,12 1894 unsigned M0 = N->getMaskElt(0) / 4; 1895 unsigned M1 = N->getMaskElt(4) / 4; 1896 unsigned M2 = N->getMaskElt(8) / 4; 1897 unsigned M3 = N->getMaskElt(12) / 4; 1898 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1899 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1900 1901 // Below, let H and L be arbitrary elements of the shuffle mask 1902 // where H is in the range [4,7] and L is in the range [0,3]. 1903 // H, 1, 2, 3 or L, 5, 6, 7 1904 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1905 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1906 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1907 InsertAtByte = IsLE ? 12 : 0; 1908 Swap = M0 < 4; 1909 return true; 1910 } 1911 // 0, H, 2, 3 or 4, L, 6, 7 1912 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1913 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1914 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1915 InsertAtByte = IsLE ? 8 : 4; 1916 Swap = M1 < 4; 1917 return true; 1918 } 1919 // 0, 1, H, 3 or 4, 5, L, 7 1920 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1921 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1922 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1923 InsertAtByte = IsLE ? 4 : 8; 1924 Swap = M2 < 4; 1925 return true; 1926 } 1927 // 0, 1, 2, H or 4, 5, 6, L 1928 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1929 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1930 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1931 InsertAtByte = IsLE ? 0 : 12; 1932 Swap = M3 < 4; 1933 return true; 1934 } 1935 1936 // If both vector operands for the shuffle are the same vector, the mask will 1937 // contain only elements from the first one and the second one will be undef. 1938 if (N->getOperand(1).isUndef()) { 1939 ShiftElts = 0; 1940 Swap = true; 1941 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1942 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1943 InsertAtByte = IsLE ? 12 : 0; 1944 return true; 1945 } 1946 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1947 InsertAtByte = IsLE ? 8 : 4; 1948 return true; 1949 } 1950 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1951 InsertAtByte = IsLE ? 4 : 8; 1952 return true; 1953 } 1954 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1955 InsertAtByte = IsLE ? 0 : 12; 1956 return true; 1957 } 1958 } 1959 1960 return false; 1961 } 1962 1963 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1964 bool &Swap, bool IsLE) { 1965 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1966 // Ensure each byte index of the word is consecutive. 1967 if (!isNByteElemShuffleMask(N, 4, 1)) 1968 return false; 1969 1970 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1971 unsigned M0 = N->getMaskElt(0) / 4; 1972 unsigned M1 = N->getMaskElt(4) / 4; 1973 unsigned M2 = N->getMaskElt(8) / 4; 1974 unsigned M3 = N->getMaskElt(12) / 4; 1975 1976 // If both vector operands for the shuffle are the same vector, the mask will 1977 // contain only elements from the first one and the second one will be undef. 1978 if (N->getOperand(1).isUndef()) { 1979 assert(M0 < 4 && "Indexing into an undef vector?"); 1980 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1981 return false; 1982 1983 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1984 Swap = false; 1985 return true; 1986 } 1987 1988 // Ensure each word index of the ShuffleVector Mask is consecutive. 1989 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1990 return false; 1991 1992 if (IsLE) { 1993 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1994 // Input vectors don't need to be swapped if the leading element 1995 // of the result is one of the 3 left elements of the second vector 1996 // (or if there is no shift to be done at all). 1997 Swap = false; 1998 ShiftElts = (8 - M0) % 8; 1999 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 2000 // Input vectors need to be swapped if the leading element 2001 // of the result is one of the 3 left elements of the first vector 2002 // (or if we're shifting by 4 - thereby simply swapping the vectors). 2003 Swap = true; 2004 ShiftElts = (4 - M0) % 4; 2005 } 2006 2007 return true; 2008 } else { // BE 2009 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 2010 // Input vectors don't need to be swapped if the leading element 2011 // of the result is one of the 4 elements of the first vector. 2012 Swap = false; 2013 ShiftElts = M0; 2014 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 2015 // Input vectors need to be swapped if the leading element 2016 // of the result is one of the 4 elements of the right vector. 2017 Swap = true; 2018 ShiftElts = M0 - 4; 2019 } 2020 2021 return true; 2022 } 2023 } 2024 2025 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 2026 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2027 2028 if (!isNByteElemShuffleMask(N, Width, -1)) 2029 return false; 2030 2031 for (int i = 0; i < 16; i += Width) 2032 if (N->getMaskElt(i) != i + Width - 1) 2033 return false; 2034 2035 return true; 2036 } 2037 2038 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 2039 return isXXBRShuffleMaskHelper(N, 2); 2040 } 2041 2042 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 2043 return isXXBRShuffleMaskHelper(N, 4); 2044 } 2045 2046 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 2047 return isXXBRShuffleMaskHelper(N, 8); 2048 } 2049 2050 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 2051 return isXXBRShuffleMaskHelper(N, 16); 2052 } 2053 2054 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 2055 /// if the inputs to the instruction should be swapped and set \p DM to the 2056 /// value for the immediate. 2057 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 2058 /// AND element 0 of the result comes from the first input (LE) or second input 2059 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 2060 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 2061 /// mask. 2062 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 2063 bool &Swap, bool IsLE) { 2064 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2065 2066 // Ensure each byte index of the double word is consecutive. 2067 if (!isNByteElemShuffleMask(N, 8, 1)) 2068 return false; 2069 2070 unsigned M0 = N->getMaskElt(0) / 8; 2071 unsigned M1 = N->getMaskElt(8) / 8; 2072 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 2073 2074 // If both vector operands for the shuffle are the same vector, the mask will 2075 // contain only elements from the first one and the second one will be undef. 2076 if (N->getOperand(1).isUndef()) { 2077 if ((M0 | M1) < 2) { 2078 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 2079 Swap = false; 2080 return true; 2081 } else 2082 return false; 2083 } 2084 2085 if (IsLE) { 2086 if (M0 > 1 && M1 < 2) { 2087 Swap = false; 2088 } else if (M0 < 2 && M1 > 1) { 2089 M0 = (M0 + 2) % 4; 2090 M1 = (M1 + 2) % 4; 2091 Swap = true; 2092 } else 2093 return false; 2094 2095 // Note: if control flow comes here that means Swap is already set above 2096 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2097 return true; 2098 } else { // BE 2099 if (M0 < 2 && M1 > 1) { 2100 Swap = false; 2101 } else if (M0 > 1 && M1 < 2) { 2102 M0 = (M0 + 2) % 4; 2103 M1 = (M1 + 2) % 4; 2104 Swap = true; 2105 } else 2106 return false; 2107 2108 // Note: if control flow comes here that means Swap is already set above 2109 DM = (M0 << 1) + (M1 & 1); 2110 return true; 2111 } 2112 } 2113 2114 2115 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is 2116 /// appropriate for PPC mnemonics (which have a big endian bias - namely 2117 /// elements are counted from the left of the vector register). 2118 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, 2119 SelectionDAG &DAG) { 2120 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2121 assert(isSplatShuffleMask(SVOp, EltSize)); 2122 if (DAG.getDataLayout().isLittleEndian()) 2123 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2124 else 2125 return SVOp->getMaskElt(0) / EltSize; 2126 } 2127 2128 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2129 /// by using a vspltis[bhw] instruction of the specified element size, return 2130 /// the constant being splatted. The ByteSize field indicates the number of 2131 /// bytes of each element [124] -> [bhw]. 2132 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2133 SDValue OpVal(nullptr, 0); 2134 2135 // If ByteSize of the splat is bigger than the element size of the 2136 // build_vector, then we have a case where we are checking for a splat where 2137 // multiple elements of the buildvector are folded together into a single 2138 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2139 unsigned EltSize = 16/N->getNumOperands(); 2140 if (EltSize < ByteSize) { 2141 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2142 SDValue UniquedVals[4]; 2143 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2144 2145 // See if all of the elements in the buildvector agree across. 2146 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2147 if (N->getOperand(i).isUndef()) continue; 2148 // If the element isn't a constant, bail fully out. 2149 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2150 2151 if (!UniquedVals[i&(Multiple-1)].getNode()) 2152 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2153 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2154 return SDValue(); // no match. 2155 } 2156 2157 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2158 // either constant or undef values that are identical for each chunk. See 2159 // if these chunks can form into a larger vspltis*. 2160 2161 // Check to see if all of the leading entries are either 0 or -1. If 2162 // neither, then this won't fit into the immediate field. 2163 bool LeadingZero = true; 2164 bool LeadingOnes = true; 2165 for (unsigned i = 0; i != Multiple-1; ++i) { 2166 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2167 2168 LeadingZero &= isNullConstant(UniquedVals[i]); 2169 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2170 } 2171 // Finally, check the least significant entry. 2172 if (LeadingZero) { 2173 if (!UniquedVals[Multiple-1].getNode()) 2174 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2175 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2176 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2177 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2178 } 2179 if (LeadingOnes) { 2180 if (!UniquedVals[Multiple-1].getNode()) 2181 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2182 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2183 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2184 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2185 } 2186 2187 return SDValue(); 2188 } 2189 2190 // Check to see if this buildvec has a single non-undef value in its elements. 2191 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2192 if (N->getOperand(i).isUndef()) continue; 2193 if (!OpVal.getNode()) 2194 OpVal = N->getOperand(i); 2195 else if (OpVal != N->getOperand(i)) 2196 return SDValue(); 2197 } 2198 2199 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2200 2201 unsigned ValSizeInBytes = EltSize; 2202 uint64_t Value = 0; 2203 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2204 Value = CN->getZExtValue(); 2205 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2206 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2207 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2208 } 2209 2210 // If the splat value is larger than the element value, then we can never do 2211 // this splat. The only case that we could fit the replicated bits into our 2212 // immediate field for would be zero, and we prefer to use vxor for it. 2213 if (ValSizeInBytes < ByteSize) return SDValue(); 2214 2215 // If the element value is larger than the splat value, check if it consists 2216 // of a repeated bit pattern of size ByteSize. 2217 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2218 return SDValue(); 2219 2220 // Properly sign extend the value. 2221 int MaskVal = SignExtend32(Value, ByteSize * 8); 2222 2223 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2224 if (MaskVal == 0) return SDValue(); 2225 2226 // Finally, if this value fits in a 5 bit sext field, return it 2227 if (SignExtend32<5>(MaskVal) == MaskVal) 2228 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2229 return SDValue(); 2230 } 2231 2232 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2233 /// amount, otherwise return -1. 2234 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2235 EVT VT = N->getValueType(0); 2236 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2237 return -1; 2238 2239 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2240 2241 // Find the first non-undef value in the shuffle mask. 2242 unsigned i; 2243 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2244 /*search*/; 2245 2246 if (i == 4) return -1; // all undef. 2247 2248 // Otherwise, check to see if the rest of the elements are consecutively 2249 // numbered from this value. 2250 unsigned ShiftAmt = SVOp->getMaskElt(i); 2251 if (ShiftAmt < i) return -1; 2252 ShiftAmt -= i; 2253 2254 // Check the rest of the elements to see if they are consecutive. 2255 for (++i; i != 4; ++i) 2256 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2257 return -1; 2258 2259 return ShiftAmt; 2260 } 2261 2262 //===----------------------------------------------------------------------===// 2263 // Addressing Mode Selection 2264 //===----------------------------------------------------------------------===// 2265 2266 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2267 /// or 64-bit immediate, and if the value can be accurately represented as a 2268 /// sign extension from a 16-bit value. If so, this returns true and the 2269 /// immediate. 2270 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2271 if (!isa<ConstantSDNode>(N)) 2272 return false; 2273 2274 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2275 if (N->getValueType(0) == MVT::i32) 2276 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2277 else 2278 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2279 } 2280 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2281 return isIntS16Immediate(Op.getNode(), Imm); 2282 } 2283 2284 2285 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can 2286 /// be represented as an indexed [r+r] operation. 2287 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, 2288 SDValue &Index, 2289 SelectionDAG &DAG) const { 2290 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 2291 UI != E; ++UI) { 2292 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) { 2293 if (Memop->getMemoryVT() == MVT::f64) { 2294 Base = N.getOperand(0); 2295 Index = N.getOperand(1); 2296 return true; 2297 } 2298 } 2299 } 2300 return false; 2301 } 2302 2303 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2304 /// can be represented as an indexed [r+r] operation. Returns false if it 2305 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is 2306 /// non-zero and N can be represented by a base register plus a signed 16-bit 2307 /// displacement, make a more precise judgement by checking (displacement % \p 2308 /// EncodingAlignment). 2309 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2310 SDValue &Index, SelectionDAG &DAG, 2311 unsigned EncodingAlignment) const { 2312 int16_t imm = 0; 2313 if (N.getOpcode() == ISD::ADD) { 2314 // Is there any SPE load/store (f64), which can't handle 16bit offset? 2315 // SPE load/store can only handle 8-bit offsets. 2316 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) 2317 return true; 2318 if (isIntS16Immediate(N.getOperand(1), imm) && 2319 (!EncodingAlignment || !(imm % EncodingAlignment))) 2320 return false; // r+i 2321 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2322 return false; // r+i 2323 2324 Base = N.getOperand(0); 2325 Index = N.getOperand(1); 2326 return true; 2327 } else if (N.getOpcode() == ISD::OR) { 2328 if (isIntS16Immediate(N.getOperand(1), imm) && 2329 (!EncodingAlignment || !(imm % EncodingAlignment))) 2330 return false; // r+i can fold it if we can. 2331 2332 // If this is an or of disjoint bitfields, we can codegen this as an add 2333 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2334 // disjoint. 2335 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2336 2337 if (LHSKnown.Zero.getBoolValue()) { 2338 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); 2339 // If all of the bits are known zero on the LHS or RHS, the add won't 2340 // carry. 2341 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2342 Base = N.getOperand(0); 2343 Index = N.getOperand(1); 2344 return true; 2345 } 2346 } 2347 } 2348 2349 return false; 2350 } 2351 2352 // If we happen to be doing an i64 load or store into a stack slot that has 2353 // less than a 4-byte alignment, then the frame-index elimination may need to 2354 // use an indexed load or store instruction (because the offset may not be a 2355 // multiple of 4). The extra register needed to hold the offset comes from the 2356 // register scavenger, and it is possible that the scavenger will need to use 2357 // an emergency spill slot. As a result, we need to make sure that a spill slot 2358 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2359 // stack slot. 2360 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2361 // FIXME: This does not handle the LWA case. 2362 if (VT != MVT::i64) 2363 return; 2364 2365 // NOTE: We'll exclude negative FIs here, which come from argument 2366 // lowering, because there are no known test cases triggering this problem 2367 // using packed structures (or similar). We can remove this exclusion if 2368 // we find such a test case. The reason why this is so test-case driven is 2369 // because this entire 'fixup' is only to prevent crashes (from the 2370 // register scavenger) on not-really-valid inputs. For example, if we have: 2371 // %a = alloca i1 2372 // %b = bitcast i1* %a to i64* 2373 // store i64* a, i64 b 2374 // then the store should really be marked as 'align 1', but is not. If it 2375 // were marked as 'align 1' then the indexed form would have been 2376 // instruction-selected initially, and the problem this 'fixup' is preventing 2377 // won't happen regardless. 2378 if (FrameIdx < 0) 2379 return; 2380 2381 MachineFunction &MF = DAG.getMachineFunction(); 2382 MachineFrameInfo &MFI = MF.getFrameInfo(); 2383 2384 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2385 if (Align >= 4) 2386 return; 2387 2388 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2389 FuncInfo->setHasNonRISpills(); 2390 } 2391 2392 /// Returns true if the address N can be represented by a base register plus 2393 /// a signed 16-bit displacement [r+imm], and if it is not better 2394 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept 2395 /// displacements that are multiples of that value. 2396 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2397 SDValue &Base, 2398 SelectionDAG &DAG, 2399 unsigned EncodingAlignment) const { 2400 // FIXME dl should come from parent load or store, not from address 2401 SDLoc dl(N); 2402 // If this can be more profitably realized as r+r, fail. 2403 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment)) 2404 return false; 2405 2406 if (N.getOpcode() == ISD::ADD) { 2407 int16_t imm = 0; 2408 if (isIntS16Immediate(N.getOperand(1), imm) && 2409 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { 2410 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2411 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2412 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2413 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2414 } else { 2415 Base = N.getOperand(0); 2416 } 2417 return true; // [r+i] 2418 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2419 // Match LOAD (ADD (X, Lo(G))). 2420 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2421 && "Cannot handle constant offsets yet!"); 2422 Disp = N.getOperand(1).getOperand(0); // The global address. 2423 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2424 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2425 Disp.getOpcode() == ISD::TargetConstantPool || 2426 Disp.getOpcode() == ISD::TargetJumpTable); 2427 Base = N.getOperand(0); 2428 return true; // [&g+r] 2429 } 2430 } else if (N.getOpcode() == ISD::OR) { 2431 int16_t imm = 0; 2432 if (isIntS16Immediate(N.getOperand(1), imm) && 2433 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { 2434 // If this is an or of disjoint bitfields, we can codegen this as an add 2435 // (for better address arithmetic) if the LHS and RHS of the OR are 2436 // provably disjoint. 2437 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2438 2439 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2440 // If all of the bits are known zero on the LHS or RHS, the add won't 2441 // carry. 2442 if (FrameIndexSDNode *FI = 2443 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2444 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2445 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2446 } else { 2447 Base = N.getOperand(0); 2448 } 2449 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2450 return true; 2451 } 2452 } 2453 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2454 // Loading from a constant address. 2455 2456 // If this address fits entirely in a 16-bit sext immediate field, codegen 2457 // this as "d, 0" 2458 int16_t Imm; 2459 if (isIntS16Immediate(CN, Imm) && 2460 (!EncodingAlignment || (Imm % EncodingAlignment) == 0)) { 2461 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2462 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2463 CN->getValueType(0)); 2464 return true; 2465 } 2466 2467 // Handle 32-bit sext immediates with LIS + addr mode. 2468 if ((CN->getValueType(0) == MVT::i32 || 2469 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2470 (!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) { 2471 int Addr = (int)CN->getZExtValue(); 2472 2473 // Otherwise, break this down into an LIS + disp. 2474 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2475 2476 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2477 MVT::i32); 2478 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2479 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2480 return true; 2481 } 2482 } 2483 2484 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2485 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2486 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2487 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2488 } else 2489 Base = N; 2490 return true; // [r+0] 2491 } 2492 2493 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2494 /// represented as an indexed [r+r] operation. 2495 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2496 SDValue &Index, 2497 SelectionDAG &DAG) const { 2498 // Check to see if we can easily represent this as an [r+r] address. This 2499 // will fail if it thinks that the address is more profitably represented as 2500 // reg+imm, e.g. where imm = 0. 2501 if (SelectAddressRegReg(N, Base, Index, DAG)) 2502 return true; 2503 2504 // If the address is the result of an add, we will utilize the fact that the 2505 // address calculation includes an implicit add. However, we can reduce 2506 // register pressure if we do not materialize a constant just for use as the 2507 // index register. We only get rid of the add if it is not an add of a 2508 // value and a 16-bit signed constant and both have a single use. 2509 int16_t imm = 0; 2510 if (N.getOpcode() == ISD::ADD && 2511 (!isIntS16Immediate(N.getOperand(1), imm) || 2512 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2513 Base = N.getOperand(0); 2514 Index = N.getOperand(1); 2515 return true; 2516 } 2517 2518 // Otherwise, do it the hard way, using R0 as the base register. 2519 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2520 N.getValueType()); 2521 Index = N; 2522 return true; 2523 } 2524 2525 /// Returns true if we should use a direct load into vector instruction 2526 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. 2527 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { 2528 2529 // If there are any other uses other than scalar to vector, then we should 2530 // keep it as a scalar load -> direct move pattern to prevent multiple 2531 // loads. 2532 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 2533 if (!LD) 2534 return false; 2535 2536 EVT MemVT = LD->getMemoryVT(); 2537 if (!MemVT.isSimple()) 2538 return false; 2539 switch(MemVT.getSimpleVT().SimpleTy) { 2540 case MVT::i64: 2541 break; 2542 case MVT::i32: 2543 if (!ST.hasP8Vector()) 2544 return false; 2545 break; 2546 case MVT::i16: 2547 case MVT::i8: 2548 if (!ST.hasP9Vector()) 2549 return false; 2550 break; 2551 default: 2552 return false; 2553 } 2554 2555 SDValue LoadedVal(N, 0); 2556 if (!LoadedVal.hasOneUse()) 2557 return false; 2558 2559 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); 2560 UI != UE; ++UI) 2561 if (UI.getUse().get().getResNo() == 0 && 2562 UI->getOpcode() != ISD::SCALAR_TO_VECTOR) 2563 return false; 2564 2565 return true; 2566 } 2567 2568 /// getPreIndexedAddressParts - returns true by value, base pointer and 2569 /// offset pointer and addressing mode by reference if the node's address 2570 /// can be legally represented as pre-indexed load / store address. 2571 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2572 SDValue &Offset, 2573 ISD::MemIndexedMode &AM, 2574 SelectionDAG &DAG) const { 2575 if (DisablePPCPreinc) return false; 2576 2577 bool isLoad = true; 2578 SDValue Ptr; 2579 EVT VT; 2580 unsigned Alignment; 2581 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2582 Ptr = LD->getBasePtr(); 2583 VT = LD->getMemoryVT(); 2584 Alignment = LD->getAlignment(); 2585 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2586 Ptr = ST->getBasePtr(); 2587 VT = ST->getMemoryVT(); 2588 Alignment = ST->getAlignment(); 2589 isLoad = false; 2590 } else 2591 return false; 2592 2593 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector 2594 // instructions because we can fold these into a more efficient instruction 2595 // instead, (such as LXSD). 2596 if (isLoad && usePartialVectorLoads(N, Subtarget)) { 2597 return false; 2598 } 2599 2600 // PowerPC doesn't have preinc load/store instructions for vectors (except 2601 // for QPX, which does have preinc r+r forms). 2602 if (VT.isVector()) { 2603 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2604 return false; 2605 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2606 AM = ISD::PRE_INC; 2607 return true; 2608 } 2609 } 2610 2611 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2612 // Common code will reject creating a pre-inc form if the base pointer 2613 // is a frame index, or if N is a store and the base pointer is either 2614 // the same as or a predecessor of the value being stored. Check for 2615 // those situations here, and try with swapped Base/Offset instead. 2616 bool Swap = false; 2617 2618 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2619 Swap = true; 2620 else if (!isLoad) { 2621 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2622 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2623 Swap = true; 2624 } 2625 2626 if (Swap) 2627 std::swap(Base, Offset); 2628 2629 AM = ISD::PRE_INC; 2630 return true; 2631 } 2632 2633 // LDU/STU can only handle immediates that are a multiple of 4. 2634 if (VT != MVT::i64) { 2635 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2636 return false; 2637 } else { 2638 // LDU/STU need an address with at least 4-byte alignment. 2639 if (Alignment < 4) 2640 return false; 2641 2642 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2643 return false; 2644 } 2645 2646 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2647 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2648 // sext i32 to i64 when addr mode is r+i. 2649 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2650 LD->getExtensionType() == ISD::SEXTLOAD && 2651 isa<ConstantSDNode>(Offset)) 2652 return false; 2653 } 2654 2655 AM = ISD::PRE_INC; 2656 return true; 2657 } 2658 2659 //===----------------------------------------------------------------------===// 2660 // LowerOperation implementation 2661 //===----------------------------------------------------------------------===// 2662 2663 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2664 /// and LoOpFlags to the target MO flags. 2665 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2666 unsigned &HiOpFlags, unsigned &LoOpFlags, 2667 const GlobalValue *GV = nullptr) { 2668 HiOpFlags = PPCII::MO_HA; 2669 LoOpFlags = PPCII::MO_LO; 2670 2671 // Don't use the pic base if not in PIC relocation model. 2672 if (IsPIC) { 2673 HiOpFlags |= PPCII::MO_PIC_FLAG; 2674 LoOpFlags |= PPCII::MO_PIC_FLAG; 2675 } 2676 } 2677 2678 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2679 SelectionDAG &DAG) { 2680 SDLoc DL(HiPart); 2681 EVT PtrVT = HiPart.getValueType(); 2682 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2683 2684 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2685 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2686 2687 // With PIC, the first instruction is actually "GR+hi(&G)". 2688 if (isPIC) 2689 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2690 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2691 2692 // Generate non-pic code that has direct accesses to the constant pool. 2693 // The address of the global is just (hi(&g)+lo(&g)). 2694 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2695 } 2696 2697 static void setUsesTOCBasePtr(MachineFunction &MF) { 2698 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2699 FuncInfo->setUsesTOCBasePtr(); 2700 } 2701 2702 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2703 setUsesTOCBasePtr(DAG.getMachineFunction()); 2704 } 2705 2706 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, 2707 SDValue GA) const { 2708 const bool Is64Bit = Subtarget.isPPC64(); 2709 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2710 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) 2711 : Subtarget.isAIXABI() 2712 ? DAG.getRegister(PPC::R2, VT) 2713 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2714 SDValue Ops[] = { GA, Reg }; 2715 return DAG.getMemIntrinsicNode( 2716 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2717 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, 2718 MachineMemOperand::MOLoad); 2719 } 2720 2721 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2722 SelectionDAG &DAG) const { 2723 EVT PtrVT = Op.getValueType(); 2724 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2725 const Constant *C = CP->getConstVal(); 2726 2727 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2728 // The actual address of the GlobalValue is stored in the TOC. 2729 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2730 setUsesTOCBasePtr(DAG); 2731 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2732 return getTOCEntry(DAG, SDLoc(CP), GA); 2733 } 2734 2735 unsigned MOHiFlag, MOLoFlag; 2736 bool IsPIC = isPositionIndependent(); 2737 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2738 2739 if (IsPIC && Subtarget.isSVR4ABI()) { 2740 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2741 PPCII::MO_PIC_FLAG); 2742 return getTOCEntry(DAG, SDLoc(CP), GA); 2743 } 2744 2745 SDValue CPIHi = 2746 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2747 SDValue CPILo = 2748 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2749 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2750 } 2751 2752 // For 64-bit PowerPC, prefer the more compact relative encodings. 2753 // This trades 32 bits per jump table entry for one or two instructions 2754 // on the jump site. 2755 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2756 if (isJumpTableRelative()) 2757 return MachineJumpTableInfo::EK_LabelDifference32; 2758 2759 return TargetLowering::getJumpTableEncoding(); 2760 } 2761 2762 bool PPCTargetLowering::isJumpTableRelative() const { 2763 if (UseAbsoluteJumpTables) 2764 return false; 2765 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) 2766 return true; 2767 return TargetLowering::isJumpTableRelative(); 2768 } 2769 2770 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2771 SelectionDAG &DAG) const { 2772 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 2773 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2774 2775 switch (getTargetMachine().getCodeModel()) { 2776 case CodeModel::Small: 2777 case CodeModel::Medium: 2778 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2779 default: 2780 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2781 getPointerTy(DAG.getDataLayout())); 2782 } 2783 } 2784 2785 const MCExpr * 2786 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2787 unsigned JTI, 2788 MCContext &Ctx) const { 2789 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 2790 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2791 2792 switch (getTargetMachine().getCodeModel()) { 2793 case CodeModel::Small: 2794 case CodeModel::Medium: 2795 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2796 default: 2797 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2798 } 2799 } 2800 2801 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2802 EVT PtrVT = Op.getValueType(); 2803 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2804 2805 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2806 // The actual address of the GlobalValue is stored in the TOC. 2807 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2808 setUsesTOCBasePtr(DAG); 2809 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2810 return getTOCEntry(DAG, SDLoc(JT), GA); 2811 } 2812 2813 unsigned MOHiFlag, MOLoFlag; 2814 bool IsPIC = isPositionIndependent(); 2815 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2816 2817 if (IsPIC && Subtarget.isSVR4ABI()) { 2818 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2819 PPCII::MO_PIC_FLAG); 2820 return getTOCEntry(DAG, SDLoc(GA), GA); 2821 } 2822 2823 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2824 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2825 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2826 } 2827 2828 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2829 SelectionDAG &DAG) const { 2830 EVT PtrVT = Op.getValueType(); 2831 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2832 const BlockAddress *BA = BASDN->getBlockAddress(); 2833 2834 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2835 // The actual BlockAddress is stored in the TOC. 2836 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2837 setUsesTOCBasePtr(DAG); 2838 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2839 return getTOCEntry(DAG, SDLoc(BASDN), GA); 2840 } 2841 2842 // 32-bit position-independent ELF stores the BlockAddress in the .got. 2843 if (Subtarget.is32BitELFABI() && isPositionIndependent()) 2844 return getTOCEntry( 2845 DAG, SDLoc(BASDN), 2846 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); 2847 2848 unsigned MOHiFlag, MOLoFlag; 2849 bool IsPIC = isPositionIndependent(); 2850 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2851 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2852 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2853 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2854 } 2855 2856 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2857 SelectionDAG &DAG) const { 2858 // FIXME: TLS addresses currently use medium model code sequences, 2859 // which is the most useful form. Eventually support for small and 2860 // large models could be added if users need it, at the cost of 2861 // additional complexity. 2862 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2863 if (DAG.getTarget().useEmulatedTLS()) 2864 return LowerToTLSEmulatedModel(GA, DAG); 2865 2866 SDLoc dl(GA); 2867 const GlobalValue *GV = GA->getGlobal(); 2868 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2869 bool is64bit = Subtarget.isPPC64(); 2870 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 2871 PICLevel::Level picLevel = M->getPICLevel(); 2872 2873 const TargetMachine &TM = getTargetMachine(); 2874 TLSModel::Model Model = TM.getTLSModel(GV); 2875 2876 if (Model == TLSModel::LocalExec) { 2877 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2878 PPCII::MO_TPREL_HA); 2879 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2880 PPCII::MO_TPREL_LO); 2881 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2882 : DAG.getRegister(PPC::R2, MVT::i32); 2883 2884 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2885 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2886 } 2887 2888 if (Model == TLSModel::InitialExec) { 2889 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2890 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2891 PPCII::MO_TLS); 2892 SDValue GOTPtr; 2893 if (is64bit) { 2894 setUsesTOCBasePtr(DAG); 2895 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2896 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2897 PtrVT, GOTReg, TGA); 2898 } else { 2899 if (!TM.isPositionIndependent()) 2900 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2901 else if (picLevel == PICLevel::SmallPIC) 2902 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2903 else 2904 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2905 } 2906 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2907 PtrVT, TGA, GOTPtr); 2908 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2909 } 2910 2911 if (Model == TLSModel::GeneralDynamic) { 2912 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2913 SDValue GOTPtr; 2914 if (is64bit) { 2915 setUsesTOCBasePtr(DAG); 2916 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2917 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2918 GOTReg, TGA); 2919 } else { 2920 if (picLevel == PICLevel::SmallPIC) 2921 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2922 else 2923 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2924 } 2925 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2926 GOTPtr, TGA, TGA); 2927 } 2928 2929 if (Model == TLSModel::LocalDynamic) { 2930 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2931 SDValue GOTPtr; 2932 if (is64bit) { 2933 setUsesTOCBasePtr(DAG); 2934 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2935 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2936 GOTReg, TGA); 2937 } else { 2938 if (picLevel == PICLevel::SmallPIC) 2939 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2940 else 2941 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2942 } 2943 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2944 PtrVT, GOTPtr, TGA, TGA); 2945 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2946 PtrVT, TLSAddr, TGA); 2947 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2948 } 2949 2950 llvm_unreachable("Unknown TLS model!"); 2951 } 2952 2953 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2954 SelectionDAG &DAG) const { 2955 EVT PtrVT = Op.getValueType(); 2956 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2957 SDLoc DL(GSDN); 2958 const GlobalValue *GV = GSDN->getGlobal(); 2959 2960 // 64-bit SVR4 ABI & AIX ABI code is always position-independent. 2961 // The actual address of the GlobalValue is stored in the TOC. 2962 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2963 setUsesTOCBasePtr(DAG); 2964 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2965 return getTOCEntry(DAG, DL, GA); 2966 } 2967 2968 unsigned MOHiFlag, MOLoFlag; 2969 bool IsPIC = isPositionIndependent(); 2970 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2971 2972 if (IsPIC && Subtarget.isSVR4ABI()) { 2973 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2974 GSDN->getOffset(), 2975 PPCII::MO_PIC_FLAG); 2976 return getTOCEntry(DAG, DL, GA); 2977 } 2978 2979 SDValue GAHi = 2980 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2981 SDValue GALo = 2982 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2983 2984 return LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2985 } 2986 2987 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2988 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2989 SDLoc dl(Op); 2990 2991 if (Op.getValueType() == MVT::v2i64) { 2992 // When the operands themselves are v2i64 values, we need to do something 2993 // special because VSX has no underlying comparison operations for these. 2994 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2995 // Equality can be handled by casting to the legal type for Altivec 2996 // comparisons, everything else needs to be expanded. 2997 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2998 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2999 DAG.getSetCC(dl, MVT::v4i32, 3000 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 3001 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 3002 CC)); 3003 } 3004 3005 return SDValue(); 3006 } 3007 3008 // We handle most of these in the usual way. 3009 return Op; 3010 } 3011 3012 // If we're comparing for equality to zero, expose the fact that this is 3013 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 3014 // fold the new nodes. 3015 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 3016 return V; 3017 3018 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3019 // Leave comparisons against 0 and -1 alone for now, since they're usually 3020 // optimized. FIXME: revisit this when we can custom lower all setcc 3021 // optimizations. 3022 if (C->isAllOnesValue() || C->isNullValue()) 3023 return SDValue(); 3024 } 3025 3026 // If we have an integer seteq/setne, turn it into a compare against zero 3027 // by xor'ing the rhs with the lhs, which is faster than setting a 3028 // condition register, reading it back out, and masking the correct bit. The 3029 // normal approach here uses sub to do this instead of xor. Using xor exposes 3030 // the result to other bit-twiddling opportunities. 3031 EVT LHSVT = Op.getOperand(0).getValueType(); 3032 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 3033 EVT VT = Op.getValueType(); 3034 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 3035 Op.getOperand(1)); 3036 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 3037 } 3038 return SDValue(); 3039 } 3040 3041 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 3042 SDNode *Node = Op.getNode(); 3043 EVT VT = Node->getValueType(0); 3044 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3045 SDValue InChain = Node->getOperand(0); 3046 SDValue VAListPtr = Node->getOperand(1); 3047 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 3048 SDLoc dl(Node); 3049 3050 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 3051 3052 // gpr_index 3053 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3054 VAListPtr, MachinePointerInfo(SV), MVT::i8); 3055 InChain = GprIndex.getValue(1); 3056 3057 if (VT == MVT::i64) { 3058 // Check if GprIndex is even 3059 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 3060 DAG.getConstant(1, dl, MVT::i32)); 3061 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 3062 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 3063 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 3064 DAG.getConstant(1, dl, MVT::i32)); 3065 // Align GprIndex to be even if it isn't 3066 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 3067 GprIndex); 3068 } 3069 3070 // fpr index is 1 byte after gpr 3071 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3072 DAG.getConstant(1, dl, MVT::i32)); 3073 3074 // fpr 3075 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3076 FprPtr, MachinePointerInfo(SV), MVT::i8); 3077 InChain = FprIndex.getValue(1); 3078 3079 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3080 DAG.getConstant(8, dl, MVT::i32)); 3081 3082 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3083 DAG.getConstant(4, dl, MVT::i32)); 3084 3085 // areas 3086 SDValue OverflowArea = 3087 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 3088 InChain = OverflowArea.getValue(1); 3089 3090 SDValue RegSaveArea = 3091 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 3092 InChain = RegSaveArea.getValue(1); 3093 3094 // select overflow_area if index > 8 3095 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 3096 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 3097 3098 // adjustment constant gpr_index * 4/8 3099 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 3100 VT.isInteger() ? GprIndex : FprIndex, 3101 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 3102 MVT::i32)); 3103 3104 // OurReg = RegSaveArea + RegConstant 3105 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 3106 RegConstant); 3107 3108 // Floating types are 32 bytes into RegSaveArea 3109 if (VT.isFloatingPoint()) 3110 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 3111 DAG.getConstant(32, dl, MVT::i32)); 3112 3113 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 3114 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 3115 VT.isInteger() ? GprIndex : FprIndex, 3116 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 3117 MVT::i32)); 3118 3119 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 3120 VT.isInteger() ? VAListPtr : FprPtr, 3121 MachinePointerInfo(SV), MVT::i8); 3122 3123 // determine if we should load from reg_save_area or overflow_area 3124 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 3125 3126 // increase overflow_area by 4/8 if gpr/fpr > 8 3127 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 3128 DAG.getConstant(VT.isInteger() ? 4 : 8, 3129 dl, MVT::i32)); 3130 3131 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 3132 OverflowAreaPlusN); 3133 3134 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 3135 MachinePointerInfo(), MVT::i32); 3136 3137 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 3138 } 3139 3140 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 3141 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 3142 3143 // We have to copy the entire va_list struct: 3144 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 3145 return DAG.getMemcpy(Op.getOperand(0), Op, 3146 Op.getOperand(1), Op.getOperand(2), 3147 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 3148 false, MachinePointerInfo(), MachinePointerInfo()); 3149 } 3150 3151 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 3152 SelectionDAG &DAG) const { 3153 if (Subtarget.isAIXABI()) 3154 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX."); 3155 3156 return Op.getOperand(0); 3157 } 3158 3159 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 3160 SelectionDAG &DAG) const { 3161 if (Subtarget.isAIXABI()) 3162 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX."); 3163 3164 SDValue Chain = Op.getOperand(0); 3165 SDValue Trmp = Op.getOperand(1); // trampoline 3166 SDValue FPtr = Op.getOperand(2); // nested function 3167 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 3168 SDLoc dl(Op); 3169 3170 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3171 bool isPPC64 = (PtrVT == MVT::i64); 3172 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3173 3174 TargetLowering::ArgListTy Args; 3175 TargetLowering::ArgListEntry Entry; 3176 3177 Entry.Ty = IntPtrTy; 3178 Entry.Node = Trmp; Args.push_back(Entry); 3179 3180 // TrampSize == (isPPC64 ? 48 : 40); 3181 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3182 isPPC64 ? MVT::i64 : MVT::i32); 3183 Args.push_back(Entry); 3184 3185 Entry.Node = FPtr; Args.push_back(Entry); 3186 Entry.Node = Nest; Args.push_back(Entry); 3187 3188 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3189 TargetLowering::CallLoweringInfo CLI(DAG); 3190 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3191 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3192 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3193 3194 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3195 return CallResult.second; 3196 } 3197 3198 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3199 MachineFunction &MF = DAG.getMachineFunction(); 3200 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3201 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3202 3203 SDLoc dl(Op); 3204 3205 if (Subtarget.isPPC64()) { 3206 // vastart just stores the address of the VarArgsFrameIndex slot into the 3207 // memory location argument. 3208 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3209 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3210 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3211 MachinePointerInfo(SV)); 3212 } 3213 3214 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3215 // We suppose the given va_list is already allocated. 3216 // 3217 // typedef struct { 3218 // char gpr; /* index into the array of 8 GPRs 3219 // * stored in the register save area 3220 // * gpr=0 corresponds to r3, 3221 // * gpr=1 to r4, etc. 3222 // */ 3223 // char fpr; /* index into the array of 8 FPRs 3224 // * stored in the register save area 3225 // * fpr=0 corresponds to f1, 3226 // * fpr=1 to f2, etc. 3227 // */ 3228 // char *overflow_arg_area; 3229 // /* location on stack that holds 3230 // * the next overflow argument 3231 // */ 3232 // char *reg_save_area; 3233 // /* where r3:r10 and f1:f8 (if saved) 3234 // * are stored 3235 // */ 3236 // } va_list[1]; 3237 3238 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3239 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3240 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3241 PtrVT); 3242 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3243 PtrVT); 3244 3245 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3246 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3247 3248 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3249 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3250 3251 uint64_t FPROffset = 1; 3252 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3253 3254 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3255 3256 // Store first byte : number of int regs 3257 SDValue firstStore = 3258 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3259 MachinePointerInfo(SV), MVT::i8); 3260 uint64_t nextOffset = FPROffset; 3261 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3262 ConstFPROffset); 3263 3264 // Store second byte : number of float regs 3265 SDValue secondStore = 3266 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3267 MachinePointerInfo(SV, nextOffset), MVT::i8); 3268 nextOffset += StackOffset; 3269 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3270 3271 // Store second word : arguments given on stack 3272 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3273 MachinePointerInfo(SV, nextOffset)); 3274 nextOffset += FrameOffset; 3275 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3276 3277 // Store third word : arguments given in registers 3278 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3279 MachinePointerInfo(SV, nextOffset)); 3280 } 3281 3282 /// FPR - The set of FP registers that should be allocated for arguments 3283 /// on Darwin and AIX. 3284 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3285 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3286 PPC::F11, PPC::F12, PPC::F13}; 3287 3288 /// QFPR - The set of QPX registers that should be allocated for arguments. 3289 static const MCPhysReg QFPR[] = { 3290 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3291 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3292 3293 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3294 /// the stack. 3295 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3296 unsigned PtrByteSize) { 3297 unsigned ArgSize = ArgVT.getStoreSize(); 3298 if (Flags.isByVal()) 3299 ArgSize = Flags.getByValSize(); 3300 3301 // Round up to multiples of the pointer size, except for array members, 3302 // which are always packed. 3303 if (!Flags.isInConsecutiveRegs()) 3304 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3305 3306 return ArgSize; 3307 } 3308 3309 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3310 /// on the stack. 3311 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3312 ISD::ArgFlagsTy Flags, 3313 unsigned PtrByteSize) { 3314 unsigned Align = PtrByteSize; 3315 3316 // Altivec parameters are padded to a 16 byte boundary. 3317 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3318 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3319 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3320 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3321 Align = 16; 3322 // QPX vector types stored in double-precision are padded to a 32 byte 3323 // boundary. 3324 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3325 Align = 32; 3326 3327 // ByVal parameters are aligned as requested. 3328 if (Flags.isByVal()) { 3329 unsigned BVAlign = Flags.getByValAlign(); 3330 if (BVAlign > PtrByteSize) { 3331 if (BVAlign % PtrByteSize != 0) 3332 llvm_unreachable( 3333 "ByVal alignment is not a multiple of the pointer size"); 3334 3335 Align = BVAlign; 3336 } 3337 } 3338 3339 // Array members are always packed to their original alignment. 3340 if (Flags.isInConsecutiveRegs()) { 3341 // If the array member was split into multiple registers, the first 3342 // needs to be aligned to the size of the full type. (Except for 3343 // ppcf128, which is only aligned as its f64 components.) 3344 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3345 Align = OrigVT.getStoreSize(); 3346 else 3347 Align = ArgVT.getStoreSize(); 3348 } 3349 3350 return Align; 3351 } 3352 3353 /// CalculateStackSlotUsed - Return whether this argument will use its 3354 /// stack slot (instead of being passed in registers). ArgOffset, 3355 /// AvailableFPRs, and AvailableVRs must hold the current argument 3356 /// position, and will be updated to account for this argument. 3357 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3358 ISD::ArgFlagsTy Flags, 3359 unsigned PtrByteSize, 3360 unsigned LinkageSize, 3361 unsigned ParamAreaSize, 3362 unsigned &ArgOffset, 3363 unsigned &AvailableFPRs, 3364 unsigned &AvailableVRs, bool HasQPX) { 3365 bool UseMemory = false; 3366 3367 // Respect alignment of argument on the stack. 3368 unsigned Align = 3369 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3370 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3371 // If there's no space left in the argument save area, we must 3372 // use memory (this check also catches zero-sized arguments). 3373 if (ArgOffset >= LinkageSize + ParamAreaSize) 3374 UseMemory = true; 3375 3376 // Allocate argument on the stack. 3377 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3378 if (Flags.isInConsecutiveRegsLast()) 3379 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3380 // If we overran the argument save area, we must use memory 3381 // (this check catches arguments passed partially in memory) 3382 if (ArgOffset > LinkageSize + ParamAreaSize) 3383 UseMemory = true; 3384 3385 // However, if the argument is actually passed in an FPR or a VR, 3386 // we don't use memory after all. 3387 if (!Flags.isByVal()) { 3388 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3389 // QPX registers overlap with the scalar FP registers. 3390 (HasQPX && (ArgVT == MVT::v4f32 || 3391 ArgVT == MVT::v4f64 || 3392 ArgVT == MVT::v4i1))) 3393 if (AvailableFPRs > 0) { 3394 --AvailableFPRs; 3395 return false; 3396 } 3397 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3398 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3399 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3400 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3401 if (AvailableVRs > 0) { 3402 --AvailableVRs; 3403 return false; 3404 } 3405 } 3406 3407 return UseMemory; 3408 } 3409 3410 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3411 /// ensure minimum alignment required for target. 3412 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3413 unsigned NumBytes) { 3414 unsigned TargetAlign = Lowering->getStackAlignment(); 3415 unsigned AlignMask = TargetAlign - 1; 3416 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3417 return NumBytes; 3418 } 3419 3420 SDValue PPCTargetLowering::LowerFormalArguments( 3421 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3422 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3423 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3424 if (Subtarget.isAIXABI()) 3425 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG, 3426 InVals); 3427 if (Subtarget.is64BitELFABI()) 3428 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3429 InVals); 3430 if (Subtarget.is32BitELFABI()) 3431 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3432 InVals); 3433 3434 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG, 3435 InVals); 3436 } 3437 3438 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3439 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3440 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3441 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3442 3443 // 32-bit SVR4 ABI Stack Frame Layout: 3444 // +-----------------------------------+ 3445 // +--> | Back chain | 3446 // | +-----------------------------------+ 3447 // | | Floating-point register save area | 3448 // | +-----------------------------------+ 3449 // | | General register save area | 3450 // | +-----------------------------------+ 3451 // | | CR save word | 3452 // | +-----------------------------------+ 3453 // | | VRSAVE save word | 3454 // | +-----------------------------------+ 3455 // | | Alignment padding | 3456 // | +-----------------------------------+ 3457 // | | Vector register save area | 3458 // | +-----------------------------------+ 3459 // | | Local variable space | 3460 // | +-----------------------------------+ 3461 // | | Parameter list area | 3462 // | +-----------------------------------+ 3463 // | | LR save word | 3464 // | +-----------------------------------+ 3465 // SP--> +--- | Back chain | 3466 // +-----------------------------------+ 3467 // 3468 // Specifications: 3469 // System V Application Binary Interface PowerPC Processor Supplement 3470 // AltiVec Technology Programming Interface Manual 3471 3472 MachineFunction &MF = DAG.getMachineFunction(); 3473 MachineFrameInfo &MFI = MF.getFrameInfo(); 3474 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3475 3476 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3477 // Potential tail calls could cause overwriting of argument stack slots. 3478 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3479 (CallConv == CallingConv::Fast)); 3480 unsigned PtrByteSize = 4; 3481 3482 // Assign locations to all of the incoming arguments. 3483 SmallVector<CCValAssign, 16> ArgLocs; 3484 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3485 *DAG.getContext()); 3486 3487 // Reserve space for the linkage area on the stack. 3488 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3489 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3490 if (useSoftFloat()) 3491 CCInfo.PreAnalyzeFormalArguments(Ins); 3492 3493 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3494 CCInfo.clearWasPPCF128(); 3495 3496 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3497 CCValAssign &VA = ArgLocs[i]; 3498 3499 // Arguments stored in registers. 3500 if (VA.isRegLoc()) { 3501 const TargetRegisterClass *RC; 3502 EVT ValVT = VA.getValVT(); 3503 3504 switch (ValVT.getSimpleVT().SimpleTy) { 3505 default: 3506 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3507 case MVT::i1: 3508 case MVT::i32: 3509 RC = &PPC::GPRCRegClass; 3510 break; 3511 case MVT::f32: 3512 if (Subtarget.hasP8Vector()) 3513 RC = &PPC::VSSRCRegClass; 3514 else if (Subtarget.hasSPE()) 3515 RC = &PPC::GPRCRegClass; 3516 else 3517 RC = &PPC::F4RCRegClass; 3518 break; 3519 case MVT::f64: 3520 if (Subtarget.hasVSX()) 3521 RC = &PPC::VSFRCRegClass; 3522 else if (Subtarget.hasSPE()) 3523 // SPE passes doubles in GPR pairs. 3524 RC = &PPC::GPRCRegClass; 3525 else 3526 RC = &PPC::F8RCRegClass; 3527 break; 3528 case MVT::v16i8: 3529 case MVT::v8i16: 3530 case MVT::v4i32: 3531 RC = &PPC::VRRCRegClass; 3532 break; 3533 case MVT::v4f32: 3534 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3535 break; 3536 case MVT::v2f64: 3537 case MVT::v2i64: 3538 RC = &PPC::VRRCRegClass; 3539 break; 3540 case MVT::v4f64: 3541 RC = &PPC::QFRCRegClass; 3542 break; 3543 case MVT::v4i1: 3544 RC = &PPC::QBRCRegClass; 3545 break; 3546 } 3547 3548 SDValue ArgValue; 3549 // Transform the arguments stored in physical registers into 3550 // virtual ones. 3551 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) { 3552 assert(i + 1 < e && "No second half of double precision argument"); 3553 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC); 3554 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC); 3555 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32); 3556 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32); 3557 if (!Subtarget.isLittleEndian()) 3558 std::swap (ArgValueLo, ArgValueHi); 3559 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo, 3560 ArgValueHi); 3561 } else { 3562 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3563 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3564 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3565 if (ValVT == MVT::i1) 3566 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3567 } 3568 3569 InVals.push_back(ArgValue); 3570 } else { 3571 // Argument stored in memory. 3572 assert(VA.isMemLoc()); 3573 3574 // Get the extended size of the argument type in stack 3575 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3576 // Get the actual size of the argument type 3577 unsigned ObjSize = VA.getValVT().getStoreSize(); 3578 unsigned ArgOffset = VA.getLocMemOffset(); 3579 // Stack objects in PPC32 are right justified. 3580 ArgOffset += ArgSize - ObjSize; 3581 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); 3582 3583 // Create load nodes to retrieve arguments from the stack. 3584 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3585 InVals.push_back( 3586 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3587 } 3588 } 3589 3590 // Assign locations to all of the incoming aggregate by value arguments. 3591 // Aggregates passed by value are stored in the local variable space of the 3592 // caller's stack frame, right above the parameter list area. 3593 SmallVector<CCValAssign, 16> ByValArgLocs; 3594 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3595 ByValArgLocs, *DAG.getContext()); 3596 3597 // Reserve stack space for the allocations in CCInfo. 3598 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3599 3600 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3601 3602 // Area that is at least reserved in the caller of this function. 3603 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3604 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3605 3606 // Set the size that is at least reserved in caller of this function. Tail 3607 // call optimized function's reserved stack space needs to be aligned so that 3608 // taking the difference between two stack areas will result in an aligned 3609 // stack. 3610 MinReservedArea = 3611 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3612 FuncInfo->setMinReservedArea(MinReservedArea); 3613 3614 SmallVector<SDValue, 8> MemOps; 3615 3616 // If the function takes variable number of arguments, make a frame index for 3617 // the start of the first vararg value... for expansion of llvm.va_start. 3618 if (isVarArg) { 3619 static const MCPhysReg GPArgRegs[] = { 3620 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3621 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3622 }; 3623 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3624 3625 static const MCPhysReg FPArgRegs[] = { 3626 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3627 PPC::F8 3628 }; 3629 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3630 3631 if (useSoftFloat() || hasSPE()) 3632 NumFPArgRegs = 0; 3633 3634 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3635 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3636 3637 // Make room for NumGPArgRegs and NumFPArgRegs. 3638 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3639 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3640 3641 FuncInfo->setVarArgsStackOffset( 3642 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3643 CCInfo.getNextStackOffset(), true)); 3644 3645 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3646 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3647 3648 // The fixed integer arguments of a variadic function are stored to the 3649 // VarArgsFrameIndex on the stack so that they may be loaded by 3650 // dereferencing the result of va_next. 3651 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3652 // Get an existing live-in vreg, or add a new one. 3653 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3654 if (!VReg) 3655 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3656 3657 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3658 SDValue Store = 3659 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3660 MemOps.push_back(Store); 3661 // Increment the address by four for the next argument to store 3662 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3663 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3664 } 3665 3666 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3667 // is set. 3668 // The double arguments are stored to the VarArgsFrameIndex 3669 // on the stack. 3670 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3671 // Get an existing live-in vreg, or add a new one. 3672 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3673 if (!VReg) 3674 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3675 3676 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3677 SDValue Store = 3678 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3679 MemOps.push_back(Store); 3680 // Increment the address by eight for the next argument to store 3681 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3682 PtrVT); 3683 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3684 } 3685 } 3686 3687 if (!MemOps.empty()) 3688 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3689 3690 return Chain; 3691 } 3692 3693 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3694 // value to MVT::i64 and then truncate to the correct register size. 3695 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3696 EVT ObjectVT, SelectionDAG &DAG, 3697 SDValue ArgVal, 3698 const SDLoc &dl) const { 3699 if (Flags.isSExt()) 3700 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3701 DAG.getValueType(ObjectVT)); 3702 else if (Flags.isZExt()) 3703 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3704 DAG.getValueType(ObjectVT)); 3705 3706 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3707 } 3708 3709 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3710 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3711 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3712 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3713 // TODO: add description of PPC stack frame format, or at least some docs. 3714 // 3715 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3716 bool isLittleEndian = Subtarget.isLittleEndian(); 3717 MachineFunction &MF = DAG.getMachineFunction(); 3718 MachineFrameInfo &MFI = MF.getFrameInfo(); 3719 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3720 3721 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3722 "fastcc not supported on varargs functions"); 3723 3724 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3725 // Potential tail calls could cause overwriting of argument stack slots. 3726 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3727 (CallConv == CallingConv::Fast)); 3728 unsigned PtrByteSize = 8; 3729 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3730 3731 static const MCPhysReg GPR[] = { 3732 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3733 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3734 }; 3735 static const MCPhysReg VR[] = { 3736 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3737 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3738 }; 3739 3740 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3741 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3742 const unsigned Num_VR_Regs = array_lengthof(VR); 3743 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3744 3745 // Do a first pass over the arguments to determine whether the ABI 3746 // guarantees that our caller has allocated the parameter save area 3747 // on its stack frame. In the ELFv1 ABI, this is always the case; 3748 // in the ELFv2 ABI, it is true if this is a vararg function or if 3749 // any parameter is located in a stack slot. 3750 3751 bool HasParameterArea = !isELFv2ABI || isVarArg; 3752 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3753 unsigned NumBytes = LinkageSize; 3754 unsigned AvailableFPRs = Num_FPR_Regs; 3755 unsigned AvailableVRs = Num_VR_Regs; 3756 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3757 if (Ins[i].Flags.isNest()) 3758 continue; 3759 3760 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3761 PtrByteSize, LinkageSize, ParamAreaSize, 3762 NumBytes, AvailableFPRs, AvailableVRs, 3763 Subtarget.hasQPX())) 3764 HasParameterArea = true; 3765 } 3766 3767 // Add DAG nodes to load the arguments or copy them out of registers. On 3768 // entry to a function on PPC, the arguments start after the linkage area, 3769 // although the first ones are often in registers. 3770 3771 unsigned ArgOffset = LinkageSize; 3772 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3773 unsigned &QFPR_idx = FPR_idx; 3774 SmallVector<SDValue, 8> MemOps; 3775 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3776 unsigned CurArgIdx = 0; 3777 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3778 SDValue ArgVal; 3779 bool needsLoad = false; 3780 EVT ObjectVT = Ins[ArgNo].VT; 3781 EVT OrigVT = Ins[ArgNo].ArgVT; 3782 unsigned ObjSize = ObjectVT.getStoreSize(); 3783 unsigned ArgSize = ObjSize; 3784 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3785 if (Ins[ArgNo].isOrigArg()) { 3786 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3787 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3788 } 3789 // We re-align the argument offset for each argument, except when using the 3790 // fast calling convention, when we need to make sure we do that only when 3791 // we'll actually use a stack slot. 3792 unsigned CurArgOffset, Align; 3793 auto ComputeArgOffset = [&]() { 3794 /* Respect alignment of argument on the stack. */ 3795 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3796 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3797 CurArgOffset = ArgOffset; 3798 }; 3799 3800 if (CallConv != CallingConv::Fast) { 3801 ComputeArgOffset(); 3802 3803 /* Compute GPR index associated with argument offset. */ 3804 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3805 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3806 } 3807 3808 // FIXME the codegen can be much improved in some cases. 3809 // We do not have to keep everything in memory. 3810 if (Flags.isByVal()) { 3811 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3812 3813 if (CallConv == CallingConv::Fast) 3814 ComputeArgOffset(); 3815 3816 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3817 ObjSize = Flags.getByValSize(); 3818 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3819 // Empty aggregate parameters do not take up registers. Examples: 3820 // struct { } a; 3821 // union { } b; 3822 // int c[0]; 3823 // etc. However, we have to provide a place-holder in InVals, so 3824 // pretend we have an 8-byte item at the current address for that 3825 // purpose. 3826 if (!ObjSize) { 3827 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3828 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3829 InVals.push_back(FIN); 3830 continue; 3831 } 3832 3833 // Create a stack object covering all stack doublewords occupied 3834 // by the argument. If the argument is (fully or partially) on 3835 // the stack, or if the argument is fully in registers but the 3836 // caller has allocated the parameter save anyway, we can refer 3837 // directly to the caller's stack frame. Otherwise, create a 3838 // local copy in our own frame. 3839 int FI; 3840 if (HasParameterArea || 3841 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3842 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3843 else 3844 FI = MFI.CreateStackObject(ArgSize, Align, false); 3845 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3846 3847 // Handle aggregates smaller than 8 bytes. 3848 if (ObjSize < PtrByteSize) { 3849 // The value of the object is its address, which differs from the 3850 // address of the enclosing doubleword on big-endian systems. 3851 SDValue Arg = FIN; 3852 if (!isLittleEndian) { 3853 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3854 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3855 } 3856 InVals.push_back(Arg); 3857 3858 if (GPR_idx != Num_GPR_Regs) { 3859 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3860 FuncInfo->addLiveInAttr(VReg, Flags); 3861 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3862 SDValue Store; 3863 3864 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3865 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3866 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3867 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3868 MachinePointerInfo(&*FuncArg), ObjType); 3869 } else { 3870 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3871 // store the whole register as-is to the parameter save area 3872 // slot. 3873 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3874 MachinePointerInfo(&*FuncArg)); 3875 } 3876 3877 MemOps.push_back(Store); 3878 } 3879 // Whether we copied from a register or not, advance the offset 3880 // into the parameter save area by a full doubleword. 3881 ArgOffset += PtrByteSize; 3882 continue; 3883 } 3884 3885 // The value of the object is its address, which is the address of 3886 // its first stack doubleword. 3887 InVals.push_back(FIN); 3888 3889 // Store whatever pieces of the object are in registers to memory. 3890 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3891 if (GPR_idx == Num_GPR_Regs) 3892 break; 3893 3894 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3895 FuncInfo->addLiveInAttr(VReg, Flags); 3896 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3897 SDValue Addr = FIN; 3898 if (j) { 3899 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3900 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3901 } 3902 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3903 MachinePointerInfo(&*FuncArg, j)); 3904 MemOps.push_back(Store); 3905 ++GPR_idx; 3906 } 3907 ArgOffset += ArgSize; 3908 continue; 3909 } 3910 3911 switch (ObjectVT.getSimpleVT().SimpleTy) { 3912 default: llvm_unreachable("Unhandled argument type!"); 3913 case MVT::i1: 3914 case MVT::i32: 3915 case MVT::i64: 3916 if (Flags.isNest()) { 3917 // The 'nest' parameter, if any, is passed in R11. 3918 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3919 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3920 3921 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3922 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3923 3924 break; 3925 } 3926 3927 // These can be scalar arguments or elements of an integer array type 3928 // passed directly. Clang may use those instead of "byval" aggregate 3929 // types to avoid forcing arguments to memory unnecessarily. 3930 if (GPR_idx != Num_GPR_Regs) { 3931 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3932 FuncInfo->addLiveInAttr(VReg, Flags); 3933 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3934 3935 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3936 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3937 // value to MVT::i64 and then truncate to the correct register size. 3938 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3939 } else { 3940 if (CallConv == CallingConv::Fast) 3941 ComputeArgOffset(); 3942 3943 needsLoad = true; 3944 ArgSize = PtrByteSize; 3945 } 3946 if (CallConv != CallingConv::Fast || needsLoad) 3947 ArgOffset += 8; 3948 break; 3949 3950 case MVT::f32: 3951 case MVT::f64: 3952 // These can be scalar arguments or elements of a float array type 3953 // passed directly. The latter are used to implement ELFv2 homogenous 3954 // float aggregates. 3955 if (FPR_idx != Num_FPR_Regs) { 3956 unsigned VReg; 3957 3958 if (ObjectVT == MVT::f32) 3959 VReg = MF.addLiveIn(FPR[FPR_idx], 3960 Subtarget.hasP8Vector() 3961 ? &PPC::VSSRCRegClass 3962 : &PPC::F4RCRegClass); 3963 else 3964 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3965 ? &PPC::VSFRCRegClass 3966 : &PPC::F8RCRegClass); 3967 3968 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3969 ++FPR_idx; 3970 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3971 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3972 // once we support fp <-> gpr moves. 3973 3974 // This can only ever happen in the presence of f32 array types, 3975 // since otherwise we never run out of FPRs before running out 3976 // of GPRs. 3977 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3978 FuncInfo->addLiveInAttr(VReg, Flags); 3979 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3980 3981 if (ObjectVT == MVT::f32) { 3982 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3983 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3984 DAG.getConstant(32, dl, MVT::i32)); 3985 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3986 } 3987 3988 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3989 } else { 3990 if (CallConv == CallingConv::Fast) 3991 ComputeArgOffset(); 3992 3993 needsLoad = true; 3994 } 3995 3996 // When passing an array of floats, the array occupies consecutive 3997 // space in the argument area; only round up to the next doubleword 3998 // at the end of the array. Otherwise, each float takes 8 bytes. 3999 if (CallConv != CallingConv::Fast || needsLoad) { 4000 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 4001 ArgOffset += ArgSize; 4002 if (Flags.isInConsecutiveRegsLast()) 4003 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4004 } 4005 break; 4006 case MVT::v4f32: 4007 case MVT::v4i32: 4008 case MVT::v8i16: 4009 case MVT::v16i8: 4010 case MVT::v2f64: 4011 case MVT::v2i64: 4012 case MVT::v1i128: 4013 case MVT::f128: 4014 if (!Subtarget.hasQPX()) { 4015 // These can be scalar arguments or elements of a vector array type 4016 // passed directly. The latter are used to implement ELFv2 homogenous 4017 // vector aggregates. 4018 if (VR_idx != Num_VR_Regs) { 4019 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4020 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4021 ++VR_idx; 4022 } else { 4023 if (CallConv == CallingConv::Fast) 4024 ComputeArgOffset(); 4025 needsLoad = true; 4026 } 4027 if (CallConv != CallingConv::Fast || needsLoad) 4028 ArgOffset += 16; 4029 break; 4030 } // not QPX 4031 4032 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 4033 "Invalid QPX parameter type"); 4034 LLVM_FALLTHROUGH; 4035 4036 case MVT::v4f64: 4037 case MVT::v4i1: 4038 // QPX vectors are treated like their scalar floating-point subregisters 4039 // (except that they're larger). 4040 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 4041 if (QFPR_idx != Num_QFPR_Regs) { 4042 const TargetRegisterClass *RC; 4043 switch (ObjectVT.getSimpleVT().SimpleTy) { 4044 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 4045 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 4046 default: RC = &PPC::QBRCRegClass; break; 4047 } 4048 4049 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 4050 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4051 ++QFPR_idx; 4052 } else { 4053 if (CallConv == CallingConv::Fast) 4054 ComputeArgOffset(); 4055 needsLoad = true; 4056 } 4057 if (CallConv != CallingConv::Fast || needsLoad) 4058 ArgOffset += Sz; 4059 break; 4060 } 4061 4062 // We need to load the argument to a virtual register if we determined 4063 // above that we ran out of physical registers of the appropriate type. 4064 if (needsLoad) { 4065 if (ObjSize < ArgSize && !isLittleEndian) 4066 CurArgOffset += ArgSize - ObjSize; 4067 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 4068 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4069 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4070 } 4071 4072 InVals.push_back(ArgVal); 4073 } 4074 4075 // Area that is at least reserved in the caller of this function. 4076 unsigned MinReservedArea; 4077 if (HasParameterArea) 4078 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 4079 else 4080 MinReservedArea = LinkageSize; 4081 4082 // Set the size that is at least reserved in caller of this function. Tail 4083 // call optimized functions' reserved stack space needs to be aligned so that 4084 // taking the difference between two stack areas will result in an aligned 4085 // stack. 4086 MinReservedArea = 4087 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4088 FuncInfo->setMinReservedArea(MinReservedArea); 4089 4090 // If the function takes variable number of arguments, make a frame index for 4091 // the start of the first vararg value... for expansion of llvm.va_start. 4092 if (isVarArg) { 4093 int Depth = ArgOffset; 4094 4095 FuncInfo->setVarArgsFrameIndex( 4096 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4097 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4098 4099 // If this function is vararg, store any remaining integer argument regs 4100 // to their spots on the stack so that they may be loaded by dereferencing 4101 // the result of va_next. 4102 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4103 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4104 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4105 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4106 SDValue Store = 4107 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4108 MemOps.push_back(Store); 4109 // Increment the address by four for the next argument to store 4110 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4111 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4112 } 4113 } 4114 4115 if (!MemOps.empty()) 4116 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4117 4118 return Chain; 4119 } 4120 4121 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 4122 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4123 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4124 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4125 // TODO: add description of PPC stack frame format, or at least some docs. 4126 // 4127 MachineFunction &MF = DAG.getMachineFunction(); 4128 MachineFrameInfo &MFI = MF.getFrameInfo(); 4129 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4130 4131 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4132 bool isPPC64 = PtrVT == MVT::i64; 4133 // Potential tail calls could cause overwriting of argument stack slots. 4134 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4135 (CallConv == CallingConv::Fast)); 4136 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4137 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4138 unsigned ArgOffset = LinkageSize; 4139 // Area that is at least reserved in caller of this function. 4140 unsigned MinReservedArea = ArgOffset; 4141 4142 static const MCPhysReg GPR_32[] = { // 32-bit registers. 4143 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4144 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4145 }; 4146 static const MCPhysReg GPR_64[] = { // 64-bit registers. 4147 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4148 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4149 }; 4150 static const MCPhysReg VR[] = { 4151 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4152 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4153 }; 4154 4155 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 4156 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4157 const unsigned Num_VR_Regs = array_lengthof( VR); 4158 4159 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4160 4161 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 4162 4163 // In 32-bit non-varargs functions, the stack space for vectors is after the 4164 // stack space for non-vectors. We do not use this space unless we have 4165 // too many vectors to fit in registers, something that only occurs in 4166 // constructed examples:), but we have to walk the arglist to figure 4167 // that out...for the pathological case, compute VecArgOffset as the 4168 // start of the vector parameter area. Computing VecArgOffset is the 4169 // entire point of the following loop. 4170 unsigned VecArgOffset = ArgOffset; 4171 if (!isVarArg && !isPPC64) { 4172 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 4173 ++ArgNo) { 4174 EVT ObjectVT = Ins[ArgNo].VT; 4175 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4176 4177 if (Flags.isByVal()) { 4178 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 4179 unsigned ObjSize = Flags.getByValSize(); 4180 unsigned ArgSize = 4181 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4182 VecArgOffset += ArgSize; 4183 continue; 4184 } 4185 4186 switch(ObjectVT.getSimpleVT().SimpleTy) { 4187 default: llvm_unreachable("Unhandled argument type!"); 4188 case MVT::i1: 4189 case MVT::i32: 4190 case MVT::f32: 4191 VecArgOffset += 4; 4192 break; 4193 case MVT::i64: // PPC64 4194 case MVT::f64: 4195 // FIXME: We are guaranteed to be !isPPC64 at this point. 4196 // Does MVT::i64 apply? 4197 VecArgOffset += 8; 4198 break; 4199 case MVT::v4f32: 4200 case MVT::v4i32: 4201 case MVT::v8i16: 4202 case MVT::v16i8: 4203 // Nothing to do, we're only looking at Nonvector args here. 4204 break; 4205 } 4206 } 4207 } 4208 // We've found where the vector parameter area in memory is. Skip the 4209 // first 12 parameters; these don't use that memory. 4210 VecArgOffset = ((VecArgOffset+15)/16)*16; 4211 VecArgOffset += 12*16; 4212 4213 // Add DAG nodes to load the arguments or copy them out of registers. On 4214 // entry to a function on PPC, the arguments start after the linkage area, 4215 // although the first ones are often in registers. 4216 4217 SmallVector<SDValue, 8> MemOps; 4218 unsigned nAltivecParamsAtEnd = 0; 4219 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4220 unsigned CurArgIdx = 0; 4221 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4222 SDValue ArgVal; 4223 bool needsLoad = false; 4224 EVT ObjectVT = Ins[ArgNo].VT; 4225 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 4226 unsigned ArgSize = ObjSize; 4227 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4228 if (Ins[ArgNo].isOrigArg()) { 4229 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4230 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4231 } 4232 unsigned CurArgOffset = ArgOffset; 4233 4234 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4235 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4236 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4237 if (isVarArg || isPPC64) { 4238 MinReservedArea = ((MinReservedArea+15)/16)*16; 4239 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4240 Flags, 4241 PtrByteSize); 4242 } else nAltivecParamsAtEnd++; 4243 } else 4244 // Calculate min reserved area. 4245 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4246 Flags, 4247 PtrByteSize); 4248 4249 // FIXME the codegen can be much improved in some cases. 4250 // We do not have to keep everything in memory. 4251 if (Flags.isByVal()) { 4252 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4253 4254 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4255 ObjSize = Flags.getByValSize(); 4256 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4257 // Objects of size 1 and 2 are right justified, everything else is 4258 // left justified. This means the memory address is adjusted forwards. 4259 if (ObjSize==1 || ObjSize==2) { 4260 CurArgOffset = CurArgOffset + (4 - ObjSize); 4261 } 4262 // The value of the object is its address. 4263 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4264 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4265 InVals.push_back(FIN); 4266 if (ObjSize==1 || ObjSize==2) { 4267 if (GPR_idx != Num_GPR_Regs) { 4268 unsigned VReg; 4269 if (isPPC64) 4270 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4271 else 4272 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4273 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4274 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4275 SDValue Store = 4276 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4277 MachinePointerInfo(&*FuncArg), ObjType); 4278 MemOps.push_back(Store); 4279 ++GPR_idx; 4280 } 4281 4282 ArgOffset += PtrByteSize; 4283 4284 continue; 4285 } 4286 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4287 // Store whatever pieces of the object are in registers 4288 // to memory. ArgOffset will be the address of the beginning 4289 // of the object. 4290 if (GPR_idx != Num_GPR_Regs) { 4291 unsigned VReg; 4292 if (isPPC64) 4293 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4294 else 4295 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4296 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4297 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4298 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4299 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4300 MachinePointerInfo(&*FuncArg, j)); 4301 MemOps.push_back(Store); 4302 ++GPR_idx; 4303 ArgOffset += PtrByteSize; 4304 } else { 4305 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4306 break; 4307 } 4308 } 4309 continue; 4310 } 4311 4312 switch (ObjectVT.getSimpleVT().SimpleTy) { 4313 default: llvm_unreachable("Unhandled argument type!"); 4314 case MVT::i1: 4315 case MVT::i32: 4316 if (!isPPC64) { 4317 if (GPR_idx != Num_GPR_Regs) { 4318 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4319 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4320 4321 if (ObjectVT == MVT::i1) 4322 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4323 4324 ++GPR_idx; 4325 } else { 4326 needsLoad = true; 4327 ArgSize = PtrByteSize; 4328 } 4329 // All int arguments reserve stack space in the Darwin ABI. 4330 ArgOffset += PtrByteSize; 4331 break; 4332 } 4333 LLVM_FALLTHROUGH; 4334 case MVT::i64: // PPC64 4335 if (GPR_idx != Num_GPR_Regs) { 4336 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4337 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4338 4339 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4340 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4341 // value to MVT::i64 and then truncate to the correct register size. 4342 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4343 4344 ++GPR_idx; 4345 } else { 4346 needsLoad = true; 4347 ArgSize = PtrByteSize; 4348 } 4349 // All int arguments reserve stack space in the Darwin ABI. 4350 ArgOffset += 8; 4351 break; 4352 4353 case MVT::f32: 4354 case MVT::f64: 4355 // Every 4 bytes of argument space consumes one of the GPRs available for 4356 // argument passing. 4357 if (GPR_idx != Num_GPR_Regs) { 4358 ++GPR_idx; 4359 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4360 ++GPR_idx; 4361 } 4362 if (FPR_idx != Num_FPR_Regs) { 4363 unsigned VReg; 4364 4365 if (ObjectVT == MVT::f32) 4366 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4367 else 4368 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4369 4370 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4371 ++FPR_idx; 4372 } else { 4373 needsLoad = true; 4374 } 4375 4376 // All FP arguments reserve stack space in the Darwin ABI. 4377 ArgOffset += isPPC64 ? 8 : ObjSize; 4378 break; 4379 case MVT::v4f32: 4380 case MVT::v4i32: 4381 case MVT::v8i16: 4382 case MVT::v16i8: 4383 // Note that vector arguments in registers don't reserve stack space, 4384 // except in varargs functions. 4385 if (VR_idx != Num_VR_Regs) { 4386 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4387 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4388 if (isVarArg) { 4389 while ((ArgOffset % 16) != 0) { 4390 ArgOffset += PtrByteSize; 4391 if (GPR_idx != Num_GPR_Regs) 4392 GPR_idx++; 4393 } 4394 ArgOffset += 16; 4395 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4396 } 4397 ++VR_idx; 4398 } else { 4399 if (!isVarArg && !isPPC64) { 4400 // Vectors go after all the nonvectors. 4401 CurArgOffset = VecArgOffset; 4402 VecArgOffset += 16; 4403 } else { 4404 // Vectors are aligned. 4405 ArgOffset = ((ArgOffset+15)/16)*16; 4406 CurArgOffset = ArgOffset; 4407 ArgOffset += 16; 4408 } 4409 needsLoad = true; 4410 } 4411 break; 4412 } 4413 4414 // We need to load the argument to a virtual register if we determined above 4415 // that we ran out of physical registers of the appropriate type. 4416 if (needsLoad) { 4417 int FI = MFI.CreateFixedObject(ObjSize, 4418 CurArgOffset + (ArgSize - ObjSize), 4419 isImmutable); 4420 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4421 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4422 } 4423 4424 InVals.push_back(ArgVal); 4425 } 4426 4427 // Allow for Altivec parameters at the end, if needed. 4428 if (nAltivecParamsAtEnd) { 4429 MinReservedArea = ((MinReservedArea+15)/16)*16; 4430 MinReservedArea += 16*nAltivecParamsAtEnd; 4431 } 4432 4433 // Area that is at least reserved in the caller of this function. 4434 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4435 4436 // Set the size that is at least reserved in caller of this function. Tail 4437 // call optimized functions' reserved stack space needs to be aligned so that 4438 // taking the difference between two stack areas will result in an aligned 4439 // stack. 4440 MinReservedArea = 4441 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4442 FuncInfo->setMinReservedArea(MinReservedArea); 4443 4444 // If the function takes variable number of arguments, make a frame index for 4445 // the start of the first vararg value... for expansion of llvm.va_start. 4446 if (isVarArg) { 4447 int Depth = ArgOffset; 4448 4449 FuncInfo->setVarArgsFrameIndex( 4450 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4451 Depth, true)); 4452 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4453 4454 // If this function is vararg, store any remaining integer argument regs 4455 // to their spots on the stack so that they may be loaded by dereferencing 4456 // the result of va_next. 4457 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4458 unsigned VReg; 4459 4460 if (isPPC64) 4461 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4462 else 4463 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4464 4465 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4466 SDValue Store = 4467 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4468 MemOps.push_back(Store); 4469 // Increment the address by four for the next argument to store 4470 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4471 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4472 } 4473 } 4474 4475 if (!MemOps.empty()) 4476 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4477 4478 return Chain; 4479 } 4480 4481 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4482 /// adjusted to accommodate the arguments for the tailcall. 4483 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4484 unsigned ParamSize) { 4485 4486 if (!isTailCall) return 0; 4487 4488 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4489 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4490 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4491 // Remember only if the new adjustment is bigger. 4492 if (SPDiff < FI->getTailCallSPDelta()) 4493 FI->setTailCallSPDelta(SPDiff); 4494 4495 return SPDiff; 4496 } 4497 4498 static bool isFunctionGlobalAddress(SDValue Callee); 4499 4500 static bool 4501 callsShareTOCBase(const Function *Caller, SDValue Callee, 4502 const TargetMachine &TM) { 4503 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols 4504 // don't have enough information to determine if the caller and calle share 4505 // the same TOC base, so we have to pessimistically assume they don't for 4506 // correctness. 4507 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4508 if (!G) 4509 return false; 4510 4511 const GlobalValue *GV = G->getGlobal(); 4512 // The medium and large code models are expected to provide a sufficiently 4513 // large TOC to provide all data addressing needs of a module with a 4514 // single TOC. Since each module will be addressed with a single TOC then we 4515 // only need to check that caller and callee don't cross dso boundaries. 4516 if (CodeModel::Medium == TM.getCodeModel() || 4517 CodeModel::Large == TM.getCodeModel()) 4518 return TM.shouldAssumeDSOLocal(*Caller->getParent(), GV); 4519 4520 // Otherwise we need to ensure callee and caller are in the same section, 4521 // since the linker may allocate multiple TOCs, and we don't know which 4522 // sections will belong to the same TOC base. 4523 4524 if (!GV->isStrongDefinitionForLinker()) 4525 return false; 4526 4527 // Any explicitly-specified sections and section prefixes must also match. 4528 // Also, if we're using -ffunction-sections, then each function is always in 4529 // a different section (the same is true for COMDAT functions). 4530 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4531 GV->getSection() != Caller->getSection()) 4532 return false; 4533 if (const auto *F = dyn_cast<Function>(GV)) { 4534 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4535 return false; 4536 } 4537 4538 // If the callee might be interposed, then we can't assume the ultimate call 4539 // target will be in the same section. Even in cases where we can assume that 4540 // interposition won't happen, in any case where the linker might insert a 4541 // stub to allow for interposition, we must generate code as though 4542 // interposition might occur. To understand why this matters, consider a 4543 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4544 // in the same section, but a is in a different module (i.e. has a different 4545 // TOC base pointer). If the linker allows for interposition between b and c, 4546 // then it will generate a stub for the call edge between b and c which will 4547 // save the TOC pointer into the designated stack slot allocated by b. If we 4548 // return true here, and therefore allow a tail call between b and c, that 4549 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4550 // pointer into the stack slot allocated by a (where the a -> b stub saved 4551 // a's TOC base pointer). If we're not considering a tail call, but rather, 4552 // whether a nop is needed after the call instruction in b, because the linker 4553 // will insert a stub, it might complain about a missing nop if we omit it 4554 // (although many don't complain in this case). 4555 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4556 return false; 4557 4558 return true; 4559 } 4560 4561 static bool 4562 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4563 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4564 assert(Subtarget.is64BitELFABI()); 4565 4566 const unsigned PtrByteSize = 8; 4567 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4568 4569 static const MCPhysReg GPR[] = { 4570 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4571 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4572 }; 4573 static const MCPhysReg VR[] = { 4574 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4575 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4576 }; 4577 4578 const unsigned NumGPRs = array_lengthof(GPR); 4579 const unsigned NumFPRs = 13; 4580 const unsigned NumVRs = array_lengthof(VR); 4581 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4582 4583 unsigned NumBytes = LinkageSize; 4584 unsigned AvailableFPRs = NumFPRs; 4585 unsigned AvailableVRs = NumVRs; 4586 4587 for (const ISD::OutputArg& Param : Outs) { 4588 if (Param.Flags.isNest()) continue; 4589 4590 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4591 PtrByteSize, LinkageSize, ParamAreaSize, 4592 NumBytes, AvailableFPRs, AvailableVRs, 4593 Subtarget.hasQPX())) 4594 return true; 4595 } 4596 return false; 4597 } 4598 4599 static bool 4600 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4601 if (CS.arg_size() != CallerFn->arg_size()) 4602 return false; 4603 4604 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4605 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4606 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4607 4608 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4609 const Value* CalleeArg = *CalleeArgIter; 4610 const Value* CallerArg = &(*CallerArgIter); 4611 if (CalleeArg == CallerArg) 4612 continue; 4613 4614 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4615 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4616 // } 4617 // 1st argument of callee is undef and has the same type as caller. 4618 if (CalleeArg->getType() == CallerArg->getType() && 4619 isa<UndefValue>(CalleeArg)) 4620 continue; 4621 4622 return false; 4623 } 4624 4625 return true; 4626 } 4627 4628 // Returns true if TCO is possible between the callers and callees 4629 // calling conventions. 4630 static bool 4631 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4632 CallingConv::ID CalleeCC) { 4633 // Tail calls are possible with fastcc and ccc. 4634 auto isTailCallableCC = [] (CallingConv::ID CC){ 4635 return CC == CallingConv::C || CC == CallingConv::Fast; 4636 }; 4637 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4638 return false; 4639 4640 // We can safely tail call both fastcc and ccc callees from a c calling 4641 // convention caller. If the caller is fastcc, we may have less stack space 4642 // than a non-fastcc caller with the same signature so disable tail-calls in 4643 // that case. 4644 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4645 } 4646 4647 bool 4648 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4649 SDValue Callee, 4650 CallingConv::ID CalleeCC, 4651 ImmutableCallSite CS, 4652 bool isVarArg, 4653 const SmallVectorImpl<ISD::OutputArg> &Outs, 4654 const SmallVectorImpl<ISD::InputArg> &Ins, 4655 SelectionDAG& DAG) const { 4656 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4657 4658 if (DisableSCO && !TailCallOpt) return false; 4659 4660 // Variadic argument functions are not supported. 4661 if (isVarArg) return false; 4662 4663 auto &Caller = DAG.getMachineFunction().getFunction(); 4664 // Check that the calling conventions are compatible for tco. 4665 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4666 return false; 4667 4668 // Caller contains any byval parameter is not supported. 4669 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4670 return false; 4671 4672 // Callee contains any byval parameter is not supported, too. 4673 // Note: This is a quick work around, because in some cases, e.g. 4674 // caller's stack size > callee's stack size, we are still able to apply 4675 // sibling call optimization. For example, gcc is able to do SCO for caller1 4676 // in the following example, but not for caller2. 4677 // struct test { 4678 // long int a; 4679 // char ary[56]; 4680 // } gTest; 4681 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4682 // b->a = v.a; 4683 // return 0; 4684 // } 4685 // void caller1(struct test a, struct test c, struct test *b) { 4686 // callee(gTest, b); } 4687 // void caller2(struct test *b) { callee(gTest, b); } 4688 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4689 return false; 4690 4691 // If callee and caller use different calling conventions, we cannot pass 4692 // parameters on stack since offsets for the parameter area may be different. 4693 if (Caller.getCallingConv() != CalleeCC && 4694 needStackSlotPassParameters(Subtarget, Outs)) 4695 return false; 4696 4697 // No TCO/SCO on indirect call because Caller have to restore its TOC 4698 if (!isFunctionGlobalAddress(Callee) && 4699 !isa<ExternalSymbolSDNode>(Callee)) 4700 return false; 4701 4702 // If the caller and callee potentially have different TOC bases then we 4703 // cannot tail call since we need to restore the TOC pointer after the call. 4704 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4705 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4706 return false; 4707 4708 // TCO allows altering callee ABI, so we don't have to check further. 4709 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4710 return true; 4711 4712 if (DisableSCO) return false; 4713 4714 // If callee use the same argument list that caller is using, then we can 4715 // apply SCO on this case. If it is not, then we need to check if callee needs 4716 // stack for passing arguments. 4717 if (!hasSameArgumentList(&Caller, CS) && 4718 needStackSlotPassParameters(Subtarget, Outs)) { 4719 return false; 4720 } 4721 4722 return true; 4723 } 4724 4725 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4726 /// for tail call optimization. Targets which want to do tail call 4727 /// optimization should implement this function. 4728 bool 4729 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4730 CallingConv::ID CalleeCC, 4731 bool isVarArg, 4732 const SmallVectorImpl<ISD::InputArg> &Ins, 4733 SelectionDAG& DAG) const { 4734 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4735 return false; 4736 4737 // Variable argument functions are not supported. 4738 if (isVarArg) 4739 return false; 4740 4741 MachineFunction &MF = DAG.getMachineFunction(); 4742 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4743 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4744 // Functions containing by val parameters are not supported. 4745 for (unsigned i = 0; i != Ins.size(); i++) { 4746 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4747 if (Flags.isByVal()) return false; 4748 } 4749 4750 // Non-PIC/GOT tail calls are supported. 4751 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4752 return true; 4753 4754 // At the moment we can only do local tail calls (in same module, hidden 4755 // or protected) if we are generating PIC. 4756 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4757 return G->getGlobal()->hasHiddenVisibility() 4758 || G->getGlobal()->hasProtectedVisibility(); 4759 } 4760 4761 return false; 4762 } 4763 4764 /// isCallCompatibleAddress - Return the immediate to use if the specified 4765 /// 32-bit value is representable in the immediate field of a BxA instruction. 4766 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4767 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4768 if (!C) return nullptr; 4769 4770 int Addr = C->getZExtValue(); 4771 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4772 SignExtend32<26>(Addr) != Addr) 4773 return nullptr; // Top 6 bits have to be sext of immediate. 4774 4775 return DAG 4776 .getConstant( 4777 (int)C->getZExtValue() >> 2, SDLoc(Op), 4778 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4779 .getNode(); 4780 } 4781 4782 namespace { 4783 4784 struct TailCallArgumentInfo { 4785 SDValue Arg; 4786 SDValue FrameIdxOp; 4787 int FrameIdx = 0; 4788 4789 TailCallArgumentInfo() = default; 4790 }; 4791 4792 } // end anonymous namespace 4793 4794 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4795 static void StoreTailCallArgumentsToStackSlot( 4796 SelectionDAG &DAG, SDValue Chain, 4797 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4798 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4799 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4800 SDValue Arg = TailCallArgs[i].Arg; 4801 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4802 int FI = TailCallArgs[i].FrameIdx; 4803 // Store relative to framepointer. 4804 MemOpChains.push_back(DAG.getStore( 4805 Chain, dl, Arg, FIN, 4806 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4807 } 4808 } 4809 4810 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4811 /// the appropriate stack slot for the tail call optimized function call. 4812 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4813 SDValue OldRetAddr, SDValue OldFP, 4814 int SPDiff, const SDLoc &dl) { 4815 if (SPDiff) { 4816 // Calculate the new stack slot for the return address. 4817 MachineFunction &MF = DAG.getMachineFunction(); 4818 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4819 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4820 bool isPPC64 = Subtarget.isPPC64(); 4821 int SlotSize = isPPC64 ? 8 : 4; 4822 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4823 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4824 NewRetAddrLoc, true); 4825 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4826 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4827 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4828 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4829 } 4830 return Chain; 4831 } 4832 4833 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4834 /// the position of the argument. 4835 static void 4836 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4837 SDValue Arg, int SPDiff, unsigned ArgOffset, 4838 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4839 int Offset = ArgOffset + SPDiff; 4840 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4841 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4842 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4843 SDValue FIN = DAG.getFrameIndex(FI, VT); 4844 TailCallArgumentInfo Info; 4845 Info.Arg = Arg; 4846 Info.FrameIdxOp = FIN; 4847 Info.FrameIdx = FI; 4848 TailCallArguments.push_back(Info); 4849 } 4850 4851 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4852 /// stack slot. Returns the chain as result and the loaded frame pointers in 4853 /// LROpOut/FPOpout. Used when tail calling. 4854 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4855 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4856 SDValue &FPOpOut, const SDLoc &dl) const { 4857 if (SPDiff) { 4858 // Load the LR and FP stack slot for later adjusting. 4859 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4860 LROpOut = getReturnAddrFrameIndex(DAG); 4861 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4862 Chain = SDValue(LROpOut.getNode(), 1); 4863 } 4864 return Chain; 4865 } 4866 4867 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4868 /// by "Src" to address "Dst" of size "Size". Alignment information is 4869 /// specified by the specific parameter attribute. The copy will be passed as 4870 /// a byval function parameter. 4871 /// Sometimes what we are copying is the end of a larger object, the part that 4872 /// does not fit in registers. 4873 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4874 SDValue Chain, ISD::ArgFlagsTy Flags, 4875 SelectionDAG &DAG, const SDLoc &dl) { 4876 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4877 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4878 false, false, false, MachinePointerInfo(), 4879 MachinePointerInfo()); 4880 } 4881 4882 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4883 /// tail calls. 4884 static void LowerMemOpCallTo( 4885 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4886 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4887 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4888 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4889 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4890 if (!isTailCall) { 4891 if (isVector) { 4892 SDValue StackPtr; 4893 if (isPPC64) 4894 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4895 else 4896 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4897 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4898 DAG.getConstant(ArgOffset, dl, PtrVT)); 4899 } 4900 MemOpChains.push_back( 4901 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4902 // Calculate and remember argument location. 4903 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4904 TailCallArguments); 4905 } 4906 4907 static void 4908 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4909 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4910 SDValue FPOp, 4911 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4912 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4913 // might overwrite each other in case of tail call optimization. 4914 SmallVector<SDValue, 8> MemOpChains2; 4915 // Do not flag preceding copytoreg stuff together with the following stuff. 4916 InFlag = SDValue(); 4917 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4918 MemOpChains2, dl); 4919 if (!MemOpChains2.empty()) 4920 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4921 4922 // Store the return address to the appropriate stack slot. 4923 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4924 4925 // Emit callseq_end just before tailcall node. 4926 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4927 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4928 InFlag = Chain.getValue(1); 4929 } 4930 4931 // Is this global address that of a function that can be called by name? (as 4932 // opposed to something that must hold a descriptor for an indirect call). 4933 static bool isFunctionGlobalAddress(SDValue Callee) { 4934 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4935 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4936 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4937 return false; 4938 4939 return G->getGlobal()->getValueType()->isFunctionTy(); 4940 } 4941 4942 return false; 4943 } 4944 4945 SDValue PPCTargetLowering::LowerCallResult( 4946 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4947 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4948 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4949 SmallVector<CCValAssign, 16> RVLocs; 4950 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4951 *DAG.getContext()); 4952 4953 CCRetInfo.AnalyzeCallResult( 4954 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 4955 ? RetCC_PPC_Cold 4956 : RetCC_PPC); 4957 4958 // Copy all of the result registers out of their specified physreg. 4959 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4960 CCValAssign &VA = RVLocs[i]; 4961 assert(VA.isRegLoc() && "Can only return in registers!"); 4962 4963 SDValue Val; 4964 4965 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 4966 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 4967 InFlag); 4968 Chain = Lo.getValue(1); 4969 InFlag = Lo.getValue(2); 4970 VA = RVLocs[++i]; // skip ahead to next loc 4971 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 4972 InFlag); 4973 Chain = Hi.getValue(1); 4974 InFlag = Hi.getValue(2); 4975 if (!Subtarget.isLittleEndian()) 4976 std::swap (Lo, Hi); 4977 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi); 4978 } else { 4979 Val = DAG.getCopyFromReg(Chain, dl, 4980 VA.getLocReg(), VA.getLocVT(), InFlag); 4981 Chain = Val.getValue(1); 4982 InFlag = Val.getValue(2); 4983 } 4984 4985 switch (VA.getLocInfo()) { 4986 default: llvm_unreachable("Unknown loc info!"); 4987 case CCValAssign::Full: break; 4988 case CCValAssign::AExt: 4989 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4990 break; 4991 case CCValAssign::ZExt: 4992 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4993 DAG.getValueType(VA.getValVT())); 4994 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4995 break; 4996 case CCValAssign::SExt: 4997 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4998 DAG.getValueType(VA.getValVT())); 4999 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5000 break; 5001 } 5002 5003 InVals.push_back(Val); 5004 } 5005 5006 return Chain; 5007 } 5008 5009 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG, 5010 const PPCSubtarget &Subtarget, bool isPatchPoint) { 5011 // PatchPoint calls are not indirect. 5012 if (isPatchPoint) 5013 return false; 5014 5015 if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee)) 5016 return false; 5017 5018 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not 5019 // becuase the immediate function pointer points to a descriptor instead of 5020 // a function entry point. The ELFv2 ABI cannot use a BLA because the function 5021 // pointer immediate points to the global entry point, while the BLA would 5022 // need to jump to the local entry point (see rL211174). 5023 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() && 5024 isBLACompatibleAddress(Callee, DAG)) 5025 return false; 5026 5027 return true; 5028 } 5029 5030 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags, 5031 const Function &Caller, 5032 const SDValue &Callee, 5033 const PPCSubtarget &Subtarget, 5034 const TargetMachine &TM) { 5035 if (CFlags.IsTailCall) 5036 return PPCISD::TC_RETURN; 5037 5038 // This is a call through a function pointer. 5039 if (CFlags.IsIndirect) { 5040 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross 5041 // indirect calls. The save of the caller's TOC pointer to the stack will be 5042 // inserted into the DAG as part of call lowering. The restore of the TOC 5043 // pointer is modeled by using a pseudo instruction for the call opcode that 5044 // represents the 2 instruction sequence of an indirect branch and link, 5045 // immediately followed by a load of the TOC pointer from the the stack save 5046 // slot into gpr2. 5047 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) 5048 return PPCISD::BCTRL_LOAD_TOC; 5049 5050 // An indirect call that does not need a TOC restore. 5051 return PPCISD::BCTRL; 5052 } 5053 5054 // The ABIs that maintain a TOC pointer accross calls need to have a nop 5055 // immediately following the call instruction if the caller and callee may 5056 // have different TOC bases. At link time if the linker determines the calls 5057 // may not share a TOC base, the call is redirected to a trampoline inserted 5058 // by the linker. The trampoline will (among other things) save the callers 5059 // TOC pointer at an ABI designated offset in the linkage area and the linker 5060 // will rewrite the nop to be a load of the TOC pointer from the linkage area 5061 // into gpr2. 5062 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) 5063 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL 5064 : PPCISD::CALL_NOP; 5065 5066 return PPCISD::CALL; 5067 } 5068 5069 static bool isValidAIXExternalSymSDNode(StringRef SymName) { 5070 return StringSwitch<bool>(SymName) 5071 .Cases("__divdi3", "__fixunsdfdi", "__floatundidf", "__floatundisf", 5072 "__moddi3", "__udivdi3", "__umoddi3", true) 5073 .Cases("ceil", "floor", "memcpy", "memmove", "memset", "round", true) 5074 .Default(false); 5075 } 5076 5077 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, 5078 const SDLoc &dl, const PPCSubtarget &Subtarget) { 5079 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI()) 5080 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 5081 return SDValue(Dest, 0); 5082 5083 // Returns true if the callee is local, and false otherwise. 5084 auto isLocalCallee = [&]() { 5085 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 5086 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5087 const GlobalValue *GV = G ? G->getGlobal() : nullptr; 5088 5089 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) && 5090 !dyn_cast_or_null<GlobalIFunc>(GV); 5091 }; 5092 5093 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in 5094 // a static relocation model causes some versions of GNU LD (2.17.50, at 5095 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are 5096 // built with secure-PLT. 5097 bool UsePlt = 5098 Subtarget.is32BitELFABI() && !isLocalCallee() && 5099 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_; 5100 5101 // On AIX, direct function calls reference the symbol for the function's 5102 // entry point, which is named by prepending a "." before the function's 5103 // C-linkage name. 5104 const auto getAIXFuncEntryPointSymbolSDNode = 5105 [&](StringRef FuncName, bool IsDeclaration, 5106 const XCOFF::StorageClass &SC) { 5107 auto &Context = DAG.getMachineFunction().getMMI().getContext(); 5108 5109 MCSymbolXCOFF *S = cast<MCSymbolXCOFF>( 5110 Context.getOrCreateSymbol(Twine(".") + Twine(FuncName))); 5111 5112 if (IsDeclaration && !S->hasContainingCsect()) { 5113 // On AIX, an undefined symbol needs to be associated with a 5114 // MCSectionXCOFF to get the correct storage mapping class. 5115 // In this case, XCOFF::XMC_PR. 5116 MCSectionXCOFF *Sec = Context.getXCOFFSection( 5117 S->getName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC, 5118 SectionKind::getMetadata()); 5119 S->setContainingCsect(Sec); 5120 } 5121 5122 MVT PtrVT = 5123 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5124 return DAG.getMCSymbol(S, PtrVT); 5125 }; 5126 5127 if (isFunctionGlobalAddress(Callee)) { 5128 const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 5129 const GlobalValue *GV = G->getGlobal(); 5130 5131 if (!Subtarget.isAIXABI()) 5132 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0, 5133 UsePlt ? PPCII::MO_PLT : 0); 5134 5135 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX."); 5136 const GlobalObject *GO = cast<GlobalObject>(GV); 5137 const XCOFF::StorageClass SC = 5138 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GO); 5139 return getAIXFuncEntryPointSymbolSDNode(GO->getName(), GO->isDeclaration(), 5140 SC); 5141 } 5142 5143 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 5144 const char *SymName = S->getSymbol(); 5145 if (!Subtarget.isAIXABI()) 5146 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(), 5147 UsePlt ? PPCII::MO_PLT : 0); 5148 5149 // If there exists a user-declared function whose name is the same as the 5150 // ExternalSymbol's, then we pick up the user-declared version. 5151 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5152 if (const Function *F = 5153 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) { 5154 const XCOFF::StorageClass SC = 5155 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F); 5156 return getAIXFuncEntryPointSymbolSDNode(F->getName(), F->isDeclaration(), 5157 SC); 5158 } 5159 5160 // TODO: Remove this when the support for ExternalSymbolSDNode is complete. 5161 if (isValidAIXExternalSymSDNode(SymName)) { 5162 return getAIXFuncEntryPointSymbolSDNode(SymName, true, XCOFF::C_EXT); 5163 } 5164 5165 report_fatal_error("Unexpected ExternalSymbolSDNode: " + Twine(SymName)); 5166 } 5167 5168 // No transformation needed. 5169 assert(Callee.getNode() && "What no callee?"); 5170 return Callee; 5171 } 5172 5173 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) { 5174 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START && 5175 "Expected a CALLSEQ_STARTSDNode."); 5176 5177 // The last operand is the chain, except when the node has glue. If the node 5178 // has glue, then the last operand is the glue, and the chain is the second 5179 // last operand. 5180 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1); 5181 if (LastValue.getValueType() != MVT::Glue) 5182 return LastValue; 5183 5184 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2); 5185 } 5186 5187 // Creates the node that moves a functions address into the count register 5188 // to prepare for an indirect call instruction. 5189 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5190 SDValue &Glue, SDValue &Chain, 5191 const SDLoc &dl) { 5192 SDValue MTCTROps[] = {Chain, Callee, Glue}; 5193 EVT ReturnTypes[] = {MVT::Other, MVT::Glue}; 5194 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2), 5195 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2)); 5196 // The glue is the second value produced. 5197 Glue = Chain.getValue(1); 5198 } 5199 5200 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5201 SDValue &Glue, SDValue &Chain, 5202 SDValue CallSeqStart, 5203 ImmutableCallSite CS, const SDLoc &dl, 5204 bool hasNest, 5205 const PPCSubtarget &Subtarget) { 5206 // Function pointers in the 64-bit SVR4 ABI do not point to the function 5207 // entry point, but to the function descriptor (the function entry point 5208 // address is part of the function descriptor though). 5209 // The function descriptor is a three doubleword structure with the 5210 // following fields: function entry point, TOC base address and 5211 // environment pointer. 5212 // Thus for a call through a function pointer, the following actions need 5213 // to be performed: 5214 // 1. Save the TOC of the caller in the TOC save area of its stack 5215 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 5216 // 2. Load the address of the function entry point from the function 5217 // descriptor. 5218 // 3. Load the TOC of the callee from the function descriptor into r2. 5219 // 4. Load the environment pointer from the function descriptor into 5220 // r11. 5221 // 5. Branch to the function entry point address. 5222 // 6. On return of the callee, the TOC of the caller needs to be 5223 // restored (this is done in FinishCall()). 5224 // 5225 // The loads are scheduled at the beginning of the call sequence, and the 5226 // register copies are flagged together to ensure that no other 5227 // operations can be scheduled in between. E.g. without flagging the 5228 // copies together, a TOC access in the caller could be scheduled between 5229 // the assignment of the callee TOC and the branch to the callee, which leads 5230 // to incorrect code. 5231 5232 // Start by loading the function address from the descriptor. 5233 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart); 5234 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 5235 ? (MachineMemOperand::MODereferenceable | 5236 MachineMemOperand::MOInvariant) 5237 : MachineMemOperand::MONone; 5238 5239 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 5240 5241 // Registers used in building the DAG. 5242 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); 5243 const MCRegister TOCReg = Subtarget.getTOCPointerRegister(); 5244 5245 // Offsets of descriptor members. 5246 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset(); 5247 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset(); 5248 5249 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 5250 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4; 5251 5252 // One load for the functions entry point address. 5253 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI, 5254 Alignment, MMOFlags); 5255 5256 // One for loading the TOC anchor for the module that contains the called 5257 // function. 5258 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl); 5259 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff); 5260 SDValue TOCPtr = 5261 DAG.getLoad(RegVT, dl, LDChain, AddTOC, 5262 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags); 5263 5264 // One for loading the environment pointer. 5265 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl); 5266 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff); 5267 SDValue LoadEnvPtr = 5268 DAG.getLoad(RegVT, dl, LDChain, AddPtr, 5269 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags); 5270 5271 5272 // Then copy the newly loaded TOC anchor to the TOC pointer. 5273 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue); 5274 Chain = TOCVal.getValue(0); 5275 Glue = TOCVal.getValue(1); 5276 5277 // If the function call has an explicit 'nest' parameter, it takes the 5278 // place of the environment pointer. 5279 assert((!hasNest || !Subtarget.isAIXABI()) && 5280 "Nest parameter is not supported on AIX."); 5281 if (!hasNest) { 5282 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue); 5283 Chain = EnvVal.getValue(0); 5284 Glue = EnvVal.getValue(1); 5285 } 5286 5287 // The rest of the indirect call sequence is the same as the non-descriptor 5288 // DAG. 5289 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl); 5290 } 5291 5292 static void 5293 buildCallOperands(SmallVectorImpl<SDValue> &Ops, 5294 PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, 5295 SelectionDAG &DAG, 5296 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 5297 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, 5298 const PPCSubtarget &Subtarget) { 5299 const bool IsPPC64 = Subtarget.isPPC64(); 5300 // MVT for a general purpose register. 5301 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 5302 5303 // First operand is always the chain. 5304 Ops.push_back(Chain); 5305 5306 // If it's a direct call pass the callee as the second operand. 5307 if (!CFlags.IsIndirect) 5308 Ops.push_back(Callee); 5309 else { 5310 assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect."); 5311 5312 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area 5313 // on the stack (this would have been done in `LowerCall_64SVR4` or 5314 // `LowerCall_AIX`). The call instruction is a pseudo instruction that 5315 // represents both the indirect branch and a load that restores the TOC 5316 // pointer from the linkage area. The operand for the TOC restore is an add 5317 // of the TOC save offset to the stack pointer. This must be the second 5318 // operand: after the chain input but before any other variadic arguments. 5319 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 5320 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 5321 5322 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT); 5323 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5324 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5325 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff); 5326 Ops.push_back(AddTOC); 5327 } 5328 5329 // Add the register used for the environment pointer. 5330 if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest) 5331 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(), 5332 RegVT)); 5333 5334 5335 // Add CTR register as callee so a bctr can be emitted later. 5336 if (CFlags.IsTailCall) 5337 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT)); 5338 } 5339 5340 // If this is a tail call add stack pointer delta. 5341 if (CFlags.IsTailCall) 5342 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5343 5344 // Add argument registers to the end of the list so that they are known live 5345 // into the call. 5346 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5347 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5348 RegsToPass[i].second.getValueType())); 5349 5350 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is 5351 // no way to mark dependencies as implicit here. 5352 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. 5353 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && 5354 !CFlags.IsPatchPoint) 5355 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT)); 5356 5357 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5358 if (CFlags.IsVarArg && Subtarget.is32BitELFABI()) 5359 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5360 5361 // Add a register mask operand representing the call-preserved registers. 5362 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5363 const uint32_t *Mask = 5364 TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv); 5365 assert(Mask && "Missing call preserved mask for calling convention"); 5366 Ops.push_back(DAG.getRegisterMask(Mask)); 5367 5368 // If the glue is valid, it is the last operand. 5369 if (Glue.getNode()) 5370 Ops.push_back(Glue); 5371 } 5372 5373 SDValue PPCTargetLowering::FinishCall( 5374 CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, 5375 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue, 5376 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5377 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5378 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 5379 5380 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) 5381 setUsesTOCBasePtr(DAG); 5382 5383 unsigned CallOpc = 5384 getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee, 5385 Subtarget, DAG.getTarget()); 5386 5387 if (!CFlags.IsIndirect) 5388 Callee = transformCallee(Callee, DAG, dl, Subtarget); 5389 else if (Subtarget.usesFunctionDescriptors()) 5390 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CS, 5391 dl, CFlags.HasNest, Subtarget); 5392 else 5393 prepareIndirectCall(DAG, Callee, Glue, Chain, dl); 5394 5395 // Build the operand list for the call instruction. 5396 SmallVector<SDValue, 8> Ops; 5397 buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee, 5398 SPDiff, Subtarget); 5399 5400 // Emit tail call. 5401 if (CFlags.IsTailCall) { 5402 assert(((Callee.getOpcode() == ISD::Register && 5403 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5404 Callee.getOpcode() == ISD::TargetExternalSymbol || 5405 Callee.getOpcode() == ISD::TargetGlobalAddress || 5406 isa<ConstantSDNode>(Callee)) && 5407 "Expecting a global address, external symbol, absolute value or " 5408 "register"); 5409 assert(CallOpc == PPCISD::TC_RETURN && 5410 "Unexpected call opcode for a tail call."); 5411 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5412 return DAG.getNode(CallOpc, dl, MVT::Other, Ops); 5413 } 5414 5415 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}}; 5416 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops); 5417 Glue = Chain.getValue(1); 5418 5419 // When performing tail call optimization the callee pops its arguments off 5420 // the stack. Account for this here so these bytes can be pushed back on in 5421 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5422 int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast && 5423 getTargetMachine().Options.GuaranteedTailCallOpt) 5424 ? NumBytes 5425 : 0; 5426 5427 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5428 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5429 Glue, dl); 5430 Glue = Chain.getValue(1); 5431 5432 return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl, 5433 DAG, InVals); 5434 } 5435 5436 SDValue 5437 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5438 SmallVectorImpl<SDValue> &InVals) const { 5439 SelectionDAG &DAG = CLI.DAG; 5440 SDLoc &dl = CLI.DL; 5441 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5442 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5443 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5444 SDValue Chain = CLI.Chain; 5445 SDValue Callee = CLI.Callee; 5446 bool &isTailCall = CLI.IsTailCall; 5447 CallingConv::ID CallConv = CLI.CallConv; 5448 bool isVarArg = CLI.IsVarArg; 5449 bool isPatchPoint = CLI.IsPatchPoint; 5450 ImmutableCallSite CS = CLI.CS; 5451 5452 if (isTailCall) { 5453 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5454 isTailCall = false; 5455 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5456 isTailCall = 5457 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5458 isVarArg, Outs, Ins, DAG); 5459 else 5460 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5461 Ins, DAG); 5462 if (isTailCall) { 5463 ++NumTailCalls; 5464 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5465 ++NumSiblingCalls; 5466 5467 assert(isa<GlobalAddressSDNode>(Callee) && 5468 "Callee should be an llvm::Function object."); 5469 LLVM_DEBUG( 5470 const GlobalValue *GV = 5471 cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5472 const unsigned Width = 5473 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0"); 5474 dbgs() << "TCO caller: " 5475 << left_justify(DAG.getMachineFunction().getName(), Width) 5476 << ", callee linkage: " << GV->getVisibility() << ", " 5477 << GV->getLinkage() << "\n"); 5478 } 5479 } 5480 5481 if (!isTailCall && CS && CS.isMustTailCall()) 5482 report_fatal_error("failed to perform tail call elimination on a call " 5483 "site marked musttail"); 5484 5485 // When long calls (i.e. indirect calls) are always used, calls are always 5486 // made via function pointer. If we have a function name, first translate it 5487 // into a pointer. 5488 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5489 !isTailCall) 5490 Callee = LowerGlobalAddress(Callee, DAG); 5491 5492 CallFlags CFlags( 5493 CallConv, isTailCall, isVarArg, isPatchPoint, 5494 isIndirectCall(Callee, DAG, Subtarget, isPatchPoint), 5495 // hasNest 5496 Subtarget.is64BitELFABI() && 5497 any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); })); 5498 5499 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5500 return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5501 InVals, CS); 5502 5503 if (Subtarget.isSVR4ABI()) 5504 return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5505 InVals, CS); 5506 5507 if (Subtarget.isAIXABI()) 5508 return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5509 InVals, CS); 5510 5511 return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5512 InVals, CS); 5513 } 5514 5515 SDValue PPCTargetLowering::LowerCall_32SVR4( 5516 SDValue Chain, SDValue Callee, CallFlags CFlags, 5517 const SmallVectorImpl<ISD::OutputArg> &Outs, 5518 const SmallVectorImpl<SDValue> &OutVals, 5519 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5520 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5521 ImmutableCallSite CS) const { 5522 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5523 // of the 32-bit SVR4 ABI stack frame layout. 5524 5525 const CallingConv::ID CallConv = CFlags.CallConv; 5526 const bool IsVarArg = CFlags.IsVarArg; 5527 const bool IsTailCall = CFlags.IsTailCall; 5528 5529 assert((CallConv == CallingConv::C || 5530 CallConv == CallingConv::Cold || 5531 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5532 5533 unsigned PtrByteSize = 4; 5534 5535 MachineFunction &MF = DAG.getMachineFunction(); 5536 5537 // Mark this function as potentially containing a function that contains a 5538 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5539 // and restoring the callers stack pointer in this functions epilog. This is 5540 // done because by tail calling the called function might overwrite the value 5541 // in this function's (MF) stack pointer stack slot 0(SP). 5542 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5543 CallConv == CallingConv::Fast) 5544 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5545 5546 // Count how many bytes are to be pushed on the stack, including the linkage 5547 // area, parameter list area and the part of the local variable space which 5548 // contains copies of aggregates which are passed by value. 5549 5550 // Assign locations to all of the outgoing arguments. 5551 SmallVector<CCValAssign, 16> ArgLocs; 5552 PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 5553 5554 // Reserve space for the linkage area on the stack. 5555 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5556 PtrByteSize); 5557 if (useSoftFloat()) 5558 CCInfo.PreAnalyzeCallOperands(Outs); 5559 5560 if (IsVarArg) { 5561 // Handle fixed and variable vector arguments differently. 5562 // Fixed vector arguments go into registers as long as registers are 5563 // available. Variable vector arguments always go into memory. 5564 unsigned NumArgs = Outs.size(); 5565 5566 for (unsigned i = 0; i != NumArgs; ++i) { 5567 MVT ArgVT = Outs[i].VT; 5568 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5569 bool Result; 5570 5571 if (Outs[i].IsFixed) { 5572 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5573 CCInfo); 5574 } else { 5575 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5576 ArgFlags, CCInfo); 5577 } 5578 5579 if (Result) { 5580 #ifndef NDEBUG 5581 errs() << "Call operand #" << i << " has unhandled type " 5582 << EVT(ArgVT).getEVTString() << "\n"; 5583 #endif 5584 llvm_unreachable(nullptr); 5585 } 5586 } 5587 } else { 5588 // All arguments are treated the same. 5589 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5590 } 5591 CCInfo.clearWasPPCF128(); 5592 5593 // Assign locations to all of the outgoing aggregate by value arguments. 5594 SmallVector<CCValAssign, 16> ByValArgLocs; 5595 CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext()); 5596 5597 // Reserve stack space for the allocations in CCInfo. 5598 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5599 5600 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5601 5602 // Size of the linkage area, parameter list area and the part of the local 5603 // space variable where copies of aggregates which are passed by value are 5604 // stored. 5605 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5606 5607 // Calculate by how many bytes the stack has to be adjusted in case of tail 5608 // call optimization. 5609 int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes); 5610 5611 // Adjust the stack pointer for the new arguments... 5612 // These operations are automatically eliminated by the prolog/epilog pass 5613 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5614 SDValue CallSeqStart = Chain; 5615 5616 // Load the return address and frame pointer so it can be moved somewhere else 5617 // later. 5618 SDValue LROp, FPOp; 5619 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5620 5621 // Set up a copy of the stack pointer for use loading and storing any 5622 // arguments that may not fit in the registers available for argument 5623 // passing. 5624 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5625 5626 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5627 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5628 SmallVector<SDValue, 8> MemOpChains; 5629 5630 bool seenFloatArg = false; 5631 // Walk the register/memloc assignments, inserting copies/loads. 5632 // i - Tracks the index into the list of registers allocated for the call 5633 // RealArgIdx - Tracks the index into the list of actual function arguments 5634 // j - Tracks the index into the list of byval arguments 5635 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size(); 5636 i != e; 5637 ++i, ++RealArgIdx) { 5638 CCValAssign &VA = ArgLocs[i]; 5639 SDValue Arg = OutVals[RealArgIdx]; 5640 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags; 5641 5642 if (Flags.isByVal()) { 5643 // Argument is an aggregate which is passed by value, thus we need to 5644 // create a copy of it in the local variable space of the current stack 5645 // frame (which is the stack frame of the caller) and pass the address of 5646 // this copy to the callee. 5647 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5648 CCValAssign &ByValVA = ByValArgLocs[j++]; 5649 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5650 5651 // Memory reserved in the local variable space of the callers stack frame. 5652 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5653 5654 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5655 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5656 StackPtr, PtrOff); 5657 5658 // Create a copy of the argument in the local area of the current 5659 // stack frame. 5660 SDValue MemcpyCall = 5661 CreateCopyOfByValArgument(Arg, PtrOff, 5662 CallSeqStart.getNode()->getOperand(0), 5663 Flags, DAG, dl); 5664 5665 // This must go outside the CALLSEQ_START..END. 5666 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5667 SDLoc(MemcpyCall)); 5668 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5669 NewCallSeqStart.getNode()); 5670 Chain = CallSeqStart = NewCallSeqStart; 5671 5672 // Pass the address of the aggregate copy on the stack either in a 5673 // physical register or in the parameter list area of the current stack 5674 // frame to the callee. 5675 Arg = PtrOff; 5676 } 5677 5678 // When useCRBits() is true, there can be i1 arguments. 5679 // It is because getRegisterType(MVT::i1) => MVT::i1, 5680 // and for other integer types getRegisterType() => MVT::i32. 5681 // Extend i1 and ensure callee will get i32. 5682 if (Arg.getValueType() == MVT::i1) 5683 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 5684 dl, MVT::i32, Arg); 5685 5686 if (VA.isRegLoc()) { 5687 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5688 // Put argument in a physical register. 5689 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) { 5690 bool IsLE = Subtarget.isLittleEndian(); 5691 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5692 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl)); 5693 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0))); 5694 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5695 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl)); 5696 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(), 5697 SVal.getValue(0))); 5698 } else 5699 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5700 } else { 5701 // Put argument in the parameter list area of the current stack frame. 5702 assert(VA.isMemLoc()); 5703 unsigned LocMemOffset = VA.getLocMemOffset(); 5704 5705 if (!IsTailCall) { 5706 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5707 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5708 StackPtr, PtrOff); 5709 5710 MemOpChains.push_back( 5711 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5712 } else { 5713 // Calculate and remember argument location. 5714 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5715 TailCallArguments); 5716 } 5717 } 5718 } 5719 5720 if (!MemOpChains.empty()) 5721 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5722 5723 // Build a sequence of copy-to-reg nodes chained together with token chain 5724 // and flag operands which copy the outgoing args into the appropriate regs. 5725 SDValue InFlag; 5726 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5727 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5728 RegsToPass[i].second, InFlag); 5729 InFlag = Chain.getValue(1); 5730 } 5731 5732 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5733 // registers. 5734 if (IsVarArg) { 5735 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5736 SDValue Ops[] = { Chain, InFlag }; 5737 5738 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5739 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5740 5741 InFlag = Chain.getValue(1); 5742 } 5743 5744 if (IsTailCall) 5745 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5746 TailCallArguments); 5747 5748 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 5749 Callee, SPDiff, NumBytes, Ins, InVals, CS); 5750 } 5751 5752 // Copy an argument into memory, being careful to do this outside the 5753 // call sequence for the call to which the argument belongs. 5754 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5755 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5756 SelectionDAG &DAG, const SDLoc &dl) const { 5757 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5758 CallSeqStart.getNode()->getOperand(0), 5759 Flags, DAG, dl); 5760 // The MEMCPY must go outside the CALLSEQ_START..END. 5761 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5762 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5763 SDLoc(MemcpyCall)); 5764 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5765 NewCallSeqStart.getNode()); 5766 return NewCallSeqStart; 5767 } 5768 5769 SDValue PPCTargetLowering::LowerCall_64SVR4( 5770 SDValue Chain, SDValue Callee, CallFlags CFlags, 5771 const SmallVectorImpl<ISD::OutputArg> &Outs, 5772 const SmallVectorImpl<SDValue> &OutVals, 5773 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5774 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5775 ImmutableCallSite CS) const { 5776 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5777 bool isLittleEndian = Subtarget.isLittleEndian(); 5778 unsigned NumOps = Outs.size(); 5779 bool IsSibCall = false; 5780 bool IsFastCall = CFlags.CallConv == CallingConv::Fast; 5781 5782 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5783 unsigned PtrByteSize = 8; 5784 5785 MachineFunction &MF = DAG.getMachineFunction(); 5786 5787 if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5788 IsSibCall = true; 5789 5790 // Mark this function as potentially containing a function that contains a 5791 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5792 // and restoring the callers stack pointer in this functions epilog. This is 5793 // done because by tail calling the called function might overwrite the value 5794 // in this function's (MF) stack pointer stack slot 0(SP). 5795 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) 5796 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5797 5798 assert(!(IsFastCall && CFlags.IsVarArg) && 5799 "fastcc not supported on varargs functions"); 5800 5801 // Count how many bytes are to be pushed on the stack, including the linkage 5802 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5803 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5804 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5805 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5806 unsigned NumBytes = LinkageSize; 5807 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5808 unsigned &QFPR_idx = FPR_idx; 5809 5810 static const MCPhysReg GPR[] = { 5811 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5812 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5813 }; 5814 static const MCPhysReg VR[] = { 5815 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5816 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5817 }; 5818 5819 const unsigned NumGPRs = array_lengthof(GPR); 5820 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5821 const unsigned NumVRs = array_lengthof(VR); 5822 const unsigned NumQFPRs = NumFPRs; 5823 5824 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5825 // can be passed to the callee in registers. 5826 // For the fast calling convention, there is another check below. 5827 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5828 bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall; 5829 if (!HasParameterArea) { 5830 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5831 unsigned AvailableFPRs = NumFPRs; 5832 unsigned AvailableVRs = NumVRs; 5833 unsigned NumBytesTmp = NumBytes; 5834 for (unsigned i = 0; i != NumOps; ++i) { 5835 if (Outs[i].Flags.isNest()) continue; 5836 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5837 PtrByteSize, LinkageSize, ParamAreaSize, 5838 NumBytesTmp, AvailableFPRs, AvailableVRs, 5839 Subtarget.hasQPX())) 5840 HasParameterArea = true; 5841 } 5842 } 5843 5844 // When using the fast calling convention, we don't provide backing for 5845 // arguments that will be in registers. 5846 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5847 5848 // Avoid allocating parameter area for fastcc functions if all the arguments 5849 // can be passed in the registers. 5850 if (IsFastCall) 5851 HasParameterArea = false; 5852 5853 // Add up all the space actually used. 5854 for (unsigned i = 0; i != NumOps; ++i) { 5855 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5856 EVT ArgVT = Outs[i].VT; 5857 EVT OrigVT = Outs[i].ArgVT; 5858 5859 if (Flags.isNest()) 5860 continue; 5861 5862 if (IsFastCall) { 5863 if (Flags.isByVal()) { 5864 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5865 if (NumGPRsUsed > NumGPRs) 5866 HasParameterArea = true; 5867 } else { 5868 switch (ArgVT.getSimpleVT().SimpleTy) { 5869 default: llvm_unreachable("Unexpected ValueType for argument!"); 5870 case MVT::i1: 5871 case MVT::i32: 5872 case MVT::i64: 5873 if (++NumGPRsUsed <= NumGPRs) 5874 continue; 5875 break; 5876 case MVT::v4i32: 5877 case MVT::v8i16: 5878 case MVT::v16i8: 5879 case MVT::v2f64: 5880 case MVT::v2i64: 5881 case MVT::v1i128: 5882 case MVT::f128: 5883 if (++NumVRsUsed <= NumVRs) 5884 continue; 5885 break; 5886 case MVT::v4f32: 5887 // When using QPX, this is handled like a FP register, otherwise, it 5888 // is an Altivec register. 5889 if (Subtarget.hasQPX()) { 5890 if (++NumFPRsUsed <= NumFPRs) 5891 continue; 5892 } else { 5893 if (++NumVRsUsed <= NumVRs) 5894 continue; 5895 } 5896 break; 5897 case MVT::f32: 5898 case MVT::f64: 5899 case MVT::v4f64: // QPX 5900 case MVT::v4i1: // QPX 5901 if (++NumFPRsUsed <= NumFPRs) 5902 continue; 5903 break; 5904 } 5905 HasParameterArea = true; 5906 } 5907 } 5908 5909 /* Respect alignment of argument on the stack. */ 5910 unsigned Align = 5911 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5912 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5913 5914 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5915 if (Flags.isInConsecutiveRegsLast()) 5916 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5917 } 5918 5919 unsigned NumBytesActuallyUsed = NumBytes; 5920 5921 // In the old ELFv1 ABI, 5922 // the prolog code of the callee may store up to 8 GPR argument registers to 5923 // the stack, allowing va_start to index over them in memory if its varargs. 5924 // Because we cannot tell if this is needed on the caller side, we have to 5925 // conservatively assume that it is needed. As such, make sure we have at 5926 // least enough stack space for the caller to store the 8 GPRs. 5927 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5928 // really requires memory operands, e.g. a vararg function. 5929 if (HasParameterArea) 5930 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5931 else 5932 NumBytes = LinkageSize; 5933 5934 // Tail call needs the stack to be aligned. 5935 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) 5936 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5937 5938 int SPDiff = 0; 5939 5940 // Calculate by how many bytes the stack has to be adjusted in case of tail 5941 // call optimization. 5942 if (!IsSibCall) 5943 SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes); 5944 5945 // To protect arguments on the stack from being clobbered in a tail call, 5946 // force all the loads to happen before doing any other lowering. 5947 if (CFlags.IsTailCall) 5948 Chain = DAG.getStackArgumentTokenFactor(Chain); 5949 5950 // Adjust the stack pointer for the new arguments... 5951 // These operations are automatically eliminated by the prolog/epilog pass 5952 if (!IsSibCall) 5953 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5954 SDValue CallSeqStart = Chain; 5955 5956 // Load the return address and frame pointer so it can be move somewhere else 5957 // later. 5958 SDValue LROp, FPOp; 5959 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5960 5961 // Set up a copy of the stack pointer for use loading and storing any 5962 // arguments that may not fit in the registers available for argument 5963 // passing. 5964 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5965 5966 // Figure out which arguments are going to go in registers, and which in 5967 // memory. Also, if this is a vararg function, floating point operations 5968 // must be stored to our stack, and loaded into integer regs as well, if 5969 // any integer regs are available for argument passing. 5970 unsigned ArgOffset = LinkageSize; 5971 5972 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5973 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5974 5975 SmallVector<SDValue, 8> MemOpChains; 5976 for (unsigned i = 0; i != NumOps; ++i) { 5977 SDValue Arg = OutVals[i]; 5978 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5979 EVT ArgVT = Outs[i].VT; 5980 EVT OrigVT = Outs[i].ArgVT; 5981 5982 // PtrOff will be used to store the current argument to the stack if a 5983 // register cannot be found for it. 5984 SDValue PtrOff; 5985 5986 // We re-align the argument offset for each argument, except when using the 5987 // fast calling convention, when we need to make sure we do that only when 5988 // we'll actually use a stack slot. 5989 auto ComputePtrOff = [&]() { 5990 /* Respect alignment of argument on the stack. */ 5991 unsigned Align = 5992 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5993 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5994 5995 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5996 5997 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5998 }; 5999 6000 if (!IsFastCall) { 6001 ComputePtrOff(); 6002 6003 /* Compute GPR index associated with argument offset. */ 6004 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 6005 GPR_idx = std::min(GPR_idx, NumGPRs); 6006 } 6007 6008 // Promote integers to 64-bit values. 6009 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 6010 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6011 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6012 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6013 } 6014 6015 // FIXME memcpy is used way more than necessary. Correctness first. 6016 // Note: "by value" is code for passing a structure by value, not 6017 // basic types. 6018 if (Flags.isByVal()) { 6019 // Note: Size includes alignment padding, so 6020 // struct x { short a; char b; } 6021 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 6022 // These are the proper values we need for right-justifying the 6023 // aggregate in a parameter register. 6024 unsigned Size = Flags.getByValSize(); 6025 6026 // An empty aggregate parameter takes up no storage and no 6027 // registers. 6028 if (Size == 0) 6029 continue; 6030 6031 if (IsFastCall) 6032 ComputePtrOff(); 6033 6034 // All aggregates smaller than 8 bytes must be passed right-justified. 6035 if (Size==1 || Size==2 || Size==4) { 6036 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 6037 if (GPR_idx != NumGPRs) { 6038 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6039 MachinePointerInfo(), VT); 6040 MemOpChains.push_back(Load.getValue(1)); 6041 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6042 6043 ArgOffset += PtrByteSize; 6044 continue; 6045 } 6046 } 6047 6048 if (GPR_idx == NumGPRs && Size < 8) { 6049 SDValue AddPtr = PtrOff; 6050 if (!isLittleEndian) { 6051 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6052 PtrOff.getValueType()); 6053 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6054 } 6055 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6056 CallSeqStart, 6057 Flags, DAG, dl); 6058 ArgOffset += PtrByteSize; 6059 continue; 6060 } 6061 // Copy entire object into memory. There are cases where gcc-generated 6062 // code assumes it is there, even if it could be put entirely into 6063 // registers. (This is not what the doc says.) 6064 6065 // FIXME: The above statement is likely due to a misunderstanding of the 6066 // documents. All arguments must be copied into the parameter area BY 6067 // THE CALLEE in the event that the callee takes the address of any 6068 // formal argument. That has not yet been implemented. However, it is 6069 // reasonable to use the stack area as a staging area for the register 6070 // load. 6071 6072 // Skip this for small aggregates, as we will use the same slot for a 6073 // right-justified copy, below. 6074 if (Size >= 8) 6075 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6076 CallSeqStart, 6077 Flags, DAG, dl); 6078 6079 // When a register is available, pass a small aggregate right-justified. 6080 if (Size < 8 && GPR_idx != NumGPRs) { 6081 // The easiest way to get this right-justified in a register 6082 // is to copy the structure into the rightmost portion of a 6083 // local variable slot, then load the whole slot into the 6084 // register. 6085 // FIXME: The memcpy seems to produce pretty awful code for 6086 // small aggregates, particularly for packed ones. 6087 // FIXME: It would be preferable to use the slot in the 6088 // parameter save area instead of a new local variable. 6089 SDValue AddPtr = PtrOff; 6090 if (!isLittleEndian) { 6091 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 6092 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6093 } 6094 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6095 CallSeqStart, 6096 Flags, DAG, dl); 6097 6098 // Load the slot into the register. 6099 SDValue Load = 6100 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 6101 MemOpChains.push_back(Load.getValue(1)); 6102 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6103 6104 // Done with this argument. 6105 ArgOffset += PtrByteSize; 6106 continue; 6107 } 6108 6109 // For aggregates larger than PtrByteSize, copy the pieces of the 6110 // object that fit into registers from the parameter save area. 6111 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6112 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6113 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6114 if (GPR_idx != NumGPRs) { 6115 SDValue Load = 6116 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6117 MemOpChains.push_back(Load.getValue(1)); 6118 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6119 ArgOffset += PtrByteSize; 6120 } else { 6121 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6122 break; 6123 } 6124 } 6125 continue; 6126 } 6127 6128 switch (Arg.getSimpleValueType().SimpleTy) { 6129 default: llvm_unreachable("Unexpected ValueType for argument!"); 6130 case MVT::i1: 6131 case MVT::i32: 6132 case MVT::i64: 6133 if (Flags.isNest()) { 6134 // The 'nest' parameter, if any, is passed in R11. 6135 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 6136 break; 6137 } 6138 6139 // These can be scalar arguments or elements of an integer array type 6140 // passed directly. Clang may use those instead of "byval" aggregate 6141 // types to avoid forcing arguments to memory unnecessarily. 6142 if (GPR_idx != NumGPRs) { 6143 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6144 } else { 6145 if (IsFastCall) 6146 ComputePtrOff(); 6147 6148 assert(HasParameterArea && 6149 "Parameter area must exist to pass an argument in memory."); 6150 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6151 true, CFlags.IsTailCall, false, MemOpChains, 6152 TailCallArguments, dl); 6153 if (IsFastCall) 6154 ArgOffset += PtrByteSize; 6155 } 6156 if (!IsFastCall) 6157 ArgOffset += PtrByteSize; 6158 break; 6159 case MVT::f32: 6160 case MVT::f64: { 6161 // These can be scalar arguments or elements of a float array type 6162 // passed directly. The latter are used to implement ELFv2 homogenous 6163 // float aggregates. 6164 6165 // Named arguments go into FPRs first, and once they overflow, the 6166 // remaining arguments go into GPRs and then the parameter save area. 6167 // Unnamed arguments for vararg functions always go to GPRs and 6168 // then the parameter save area. For now, put all arguments to vararg 6169 // routines always in both locations (FPR *and* GPR or stack slot). 6170 bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs; 6171 bool NeededLoad = false; 6172 6173 // First load the argument into the next available FPR. 6174 if (FPR_idx != NumFPRs) 6175 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6176 6177 // Next, load the argument into GPR or stack slot if needed. 6178 if (!NeedGPROrStack) 6179 ; 6180 else if (GPR_idx != NumGPRs && !IsFastCall) { 6181 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 6182 // once we support fp <-> gpr moves. 6183 6184 // In the non-vararg case, this can only ever happen in the 6185 // presence of f32 array types, since otherwise we never run 6186 // out of FPRs before running out of GPRs. 6187 SDValue ArgVal; 6188 6189 // Double values are always passed in a single GPR. 6190 if (Arg.getValueType() != MVT::f32) { 6191 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 6192 6193 // Non-array float values are extended and passed in a GPR. 6194 } else if (!Flags.isInConsecutiveRegs()) { 6195 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6196 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6197 6198 // If we have an array of floats, we collect every odd element 6199 // together with its predecessor into one GPR. 6200 } else if (ArgOffset % PtrByteSize != 0) { 6201 SDValue Lo, Hi; 6202 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 6203 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6204 if (!isLittleEndian) 6205 std::swap(Lo, Hi); 6206 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6207 6208 // The final element, if even, goes into the first half of a GPR. 6209 } else if (Flags.isInConsecutiveRegsLast()) { 6210 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6211 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6212 if (!isLittleEndian) 6213 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 6214 DAG.getConstant(32, dl, MVT::i32)); 6215 6216 // Non-final even elements are skipped; they will be handled 6217 // together the with subsequent argument on the next go-around. 6218 } else 6219 ArgVal = SDValue(); 6220 6221 if (ArgVal.getNode()) 6222 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 6223 } else { 6224 if (IsFastCall) 6225 ComputePtrOff(); 6226 6227 // Single-precision floating-point values are mapped to the 6228 // second (rightmost) word of the stack doubleword. 6229 if (Arg.getValueType() == MVT::f32 && 6230 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 6231 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6232 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6233 } 6234 6235 assert(HasParameterArea && 6236 "Parameter area must exist to pass an argument in memory."); 6237 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6238 true, CFlags.IsTailCall, false, MemOpChains, 6239 TailCallArguments, dl); 6240 6241 NeededLoad = true; 6242 } 6243 // When passing an array of floats, the array occupies consecutive 6244 // space in the argument area; only round up to the next doubleword 6245 // at the end of the array. Otherwise, each float takes 8 bytes. 6246 if (!IsFastCall || NeededLoad) { 6247 ArgOffset += (Arg.getValueType() == MVT::f32 && 6248 Flags.isInConsecutiveRegs()) ? 4 : 8; 6249 if (Flags.isInConsecutiveRegsLast()) 6250 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6251 } 6252 break; 6253 } 6254 case MVT::v4f32: 6255 case MVT::v4i32: 6256 case MVT::v8i16: 6257 case MVT::v16i8: 6258 case MVT::v2f64: 6259 case MVT::v2i64: 6260 case MVT::v1i128: 6261 case MVT::f128: 6262 if (!Subtarget.hasQPX()) { 6263 // These can be scalar arguments or elements of a vector array type 6264 // passed directly. The latter are used to implement ELFv2 homogenous 6265 // vector aggregates. 6266 6267 // For a varargs call, named arguments go into VRs or on the stack as 6268 // usual; unnamed arguments always go to the stack or the corresponding 6269 // GPRs when within range. For now, we always put the value in both 6270 // locations (or even all three). 6271 if (CFlags.IsVarArg) { 6272 assert(HasParameterArea && 6273 "Parameter area must exist if we have a varargs call."); 6274 // We could elide this store in the case where the object fits 6275 // entirely in R registers. Maybe later. 6276 SDValue Store = 6277 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6278 MemOpChains.push_back(Store); 6279 if (VR_idx != NumVRs) { 6280 SDValue Load = 6281 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6282 MemOpChains.push_back(Load.getValue(1)); 6283 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6284 } 6285 ArgOffset += 16; 6286 for (unsigned i=0; i<16; i+=PtrByteSize) { 6287 if (GPR_idx == NumGPRs) 6288 break; 6289 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6290 DAG.getConstant(i, dl, PtrVT)); 6291 SDValue Load = 6292 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6293 MemOpChains.push_back(Load.getValue(1)); 6294 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6295 } 6296 break; 6297 } 6298 6299 // Non-varargs Altivec params go into VRs or on the stack. 6300 if (VR_idx != NumVRs) { 6301 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6302 } else { 6303 if (IsFastCall) 6304 ComputePtrOff(); 6305 6306 assert(HasParameterArea && 6307 "Parameter area must exist to pass an argument in memory."); 6308 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6309 true, CFlags.IsTailCall, true, MemOpChains, 6310 TailCallArguments, dl); 6311 if (IsFastCall) 6312 ArgOffset += 16; 6313 } 6314 6315 if (!IsFastCall) 6316 ArgOffset += 16; 6317 break; 6318 } // not QPX 6319 6320 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 6321 "Invalid QPX parameter type"); 6322 6323 LLVM_FALLTHROUGH; 6324 case MVT::v4f64: 6325 case MVT::v4i1: { 6326 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 6327 if (CFlags.IsVarArg) { 6328 assert(HasParameterArea && 6329 "Parameter area must exist if we have a varargs call."); 6330 // We could elide this store in the case where the object fits 6331 // entirely in R registers. Maybe later. 6332 SDValue Store = 6333 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6334 MemOpChains.push_back(Store); 6335 if (QFPR_idx != NumQFPRs) { 6336 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 6337 PtrOff, MachinePointerInfo()); 6338 MemOpChains.push_back(Load.getValue(1)); 6339 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 6340 } 6341 ArgOffset += (IsF32 ? 16 : 32); 6342 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 6343 if (GPR_idx == NumGPRs) 6344 break; 6345 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6346 DAG.getConstant(i, dl, PtrVT)); 6347 SDValue Load = 6348 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6349 MemOpChains.push_back(Load.getValue(1)); 6350 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6351 } 6352 break; 6353 } 6354 6355 // Non-varargs QPX params go into registers or on the stack. 6356 if (QFPR_idx != NumQFPRs) { 6357 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 6358 } else { 6359 if (IsFastCall) 6360 ComputePtrOff(); 6361 6362 assert(HasParameterArea && 6363 "Parameter area must exist to pass an argument in memory."); 6364 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6365 true, CFlags.IsTailCall, true, MemOpChains, 6366 TailCallArguments, dl); 6367 if (IsFastCall) 6368 ArgOffset += (IsF32 ? 16 : 32); 6369 } 6370 6371 if (!IsFastCall) 6372 ArgOffset += (IsF32 ? 16 : 32); 6373 break; 6374 } 6375 } 6376 } 6377 6378 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6379 "mismatch in size of parameter area"); 6380 (void)NumBytesActuallyUsed; 6381 6382 if (!MemOpChains.empty()) 6383 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6384 6385 // Check if this is an indirect call (MTCTR/BCTRL). 6386 // See prepareDescriptorIndirectCall and buildCallOperands for more 6387 // information about calls through function pointers in the 64-bit SVR4 ABI. 6388 if (CFlags.IsIndirect) { 6389 assert(!CFlags.IsTailCall && "Indirect tails calls not supported"); 6390 // Load r2 into a virtual register and store it to the TOC save area. 6391 setUsesTOCBasePtr(DAG); 6392 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6393 // TOC save area offset. 6394 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6395 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6396 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6397 Chain = DAG.getStore( 6398 Val.getValue(1), dl, Val, AddPtr, 6399 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 6400 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6401 // This does not mean the MTCTR instruction must use R12; it's easier 6402 // to model this as an extra parameter, so do that. 6403 if (isELFv2ABI && !CFlags.IsPatchPoint) 6404 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6405 } 6406 6407 // Build a sequence of copy-to-reg nodes chained together with token chain 6408 // and flag operands which copy the outgoing args into the appropriate regs. 6409 SDValue InFlag; 6410 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6411 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6412 RegsToPass[i].second, InFlag); 6413 InFlag = Chain.getValue(1); 6414 } 6415 6416 if (CFlags.IsTailCall && !IsSibCall) 6417 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6418 TailCallArguments); 6419 6420 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 6421 Callee, SPDiff, NumBytes, Ins, InVals, CS); 6422 } 6423 6424 SDValue PPCTargetLowering::LowerCall_Darwin( 6425 SDValue Chain, SDValue Callee, CallFlags CFlags, 6426 const SmallVectorImpl<ISD::OutputArg> &Outs, 6427 const SmallVectorImpl<SDValue> &OutVals, 6428 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6429 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6430 ImmutableCallSite CS) const { 6431 unsigned NumOps = Outs.size(); 6432 6433 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6434 bool isPPC64 = PtrVT == MVT::i64; 6435 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6436 6437 MachineFunction &MF = DAG.getMachineFunction(); 6438 6439 // Mark this function as potentially containing a function that contains a 6440 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6441 // and restoring the callers stack pointer in this functions epilog. This is 6442 // done because by tail calling the called function might overwrite the value 6443 // in this function's (MF) stack pointer stack slot 0(SP). 6444 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6445 CFlags.CallConv == CallingConv::Fast) 6446 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6447 6448 // Count how many bytes are to be pushed on the stack, including the linkage 6449 // area, and parameter passing area. We start with 24/48 bytes, which is 6450 // prereserved space for [SP][CR][LR][3 x unused]. 6451 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6452 unsigned NumBytes = LinkageSize; 6453 6454 // Add up all the space actually used. 6455 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6456 // they all go in registers, but we must reserve stack space for them for 6457 // possible use by the caller. In varargs or 64-bit calls, parameters are 6458 // assigned stack space in order, with padding so Altivec parameters are 6459 // 16-byte aligned. 6460 unsigned nAltivecParamsAtEnd = 0; 6461 for (unsigned i = 0; i != NumOps; ++i) { 6462 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6463 EVT ArgVT = Outs[i].VT; 6464 // Varargs Altivec parameters are padded to a 16 byte boundary. 6465 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6466 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6467 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6468 if (!CFlags.IsVarArg && !isPPC64) { 6469 // Non-varargs Altivec parameters go after all the non-Altivec 6470 // parameters; handle those later so we know how much padding we need. 6471 nAltivecParamsAtEnd++; 6472 continue; 6473 } 6474 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6475 NumBytes = ((NumBytes+15)/16)*16; 6476 } 6477 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6478 } 6479 6480 // Allow for Altivec parameters at the end, if needed. 6481 if (nAltivecParamsAtEnd) { 6482 NumBytes = ((NumBytes+15)/16)*16; 6483 NumBytes += 16*nAltivecParamsAtEnd; 6484 } 6485 6486 // The prolog code of the callee may store up to 8 GPR argument registers to 6487 // the stack, allowing va_start to index over them in memory if its varargs. 6488 // Because we cannot tell if this is needed on the caller side, we have to 6489 // conservatively assume that it is needed. As such, make sure we have at 6490 // least enough stack space for the caller to store the 8 GPRs. 6491 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6492 6493 // Tail call needs the stack to be aligned. 6494 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6495 CFlags.CallConv == CallingConv::Fast) 6496 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6497 6498 // Calculate by how many bytes the stack has to be adjusted in case of tail 6499 // call optimization. 6500 int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes); 6501 6502 // To protect arguments on the stack from being clobbered in a tail call, 6503 // force all the loads to happen before doing any other lowering. 6504 if (CFlags.IsTailCall) 6505 Chain = DAG.getStackArgumentTokenFactor(Chain); 6506 6507 // Adjust the stack pointer for the new arguments... 6508 // These operations are automatically eliminated by the prolog/epilog pass 6509 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6510 SDValue CallSeqStart = Chain; 6511 6512 // Load the return address and frame pointer so it can be move somewhere else 6513 // later. 6514 SDValue LROp, FPOp; 6515 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6516 6517 // Set up a copy of the stack pointer for use loading and storing any 6518 // arguments that may not fit in the registers available for argument 6519 // passing. 6520 SDValue StackPtr; 6521 if (isPPC64) 6522 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6523 else 6524 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6525 6526 // Figure out which arguments are going to go in registers, and which in 6527 // memory. Also, if this is a vararg function, floating point operations 6528 // must be stored to our stack, and loaded into integer regs as well, if 6529 // any integer regs are available for argument passing. 6530 unsigned ArgOffset = LinkageSize; 6531 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6532 6533 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6534 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6535 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6536 }; 6537 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6538 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6539 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6540 }; 6541 static const MCPhysReg VR[] = { 6542 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6543 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6544 }; 6545 const unsigned NumGPRs = array_lengthof(GPR_32); 6546 const unsigned NumFPRs = 13; 6547 const unsigned NumVRs = array_lengthof(VR); 6548 6549 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6550 6551 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6552 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6553 6554 SmallVector<SDValue, 8> MemOpChains; 6555 for (unsigned i = 0; i != NumOps; ++i) { 6556 SDValue Arg = OutVals[i]; 6557 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6558 6559 // PtrOff will be used to store the current argument to the stack if a 6560 // register cannot be found for it. 6561 SDValue PtrOff; 6562 6563 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6564 6565 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6566 6567 // On PPC64, promote integers to 64-bit values. 6568 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6569 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6570 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6571 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6572 } 6573 6574 // FIXME memcpy is used way more than necessary. Correctness first. 6575 // Note: "by value" is code for passing a structure by value, not 6576 // basic types. 6577 if (Flags.isByVal()) { 6578 unsigned Size = Flags.getByValSize(); 6579 // Very small objects are passed right-justified. Everything else is 6580 // passed left-justified. 6581 if (Size==1 || Size==2) { 6582 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6583 if (GPR_idx != NumGPRs) { 6584 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6585 MachinePointerInfo(), VT); 6586 MemOpChains.push_back(Load.getValue(1)); 6587 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6588 6589 ArgOffset += PtrByteSize; 6590 } else { 6591 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6592 PtrOff.getValueType()); 6593 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6594 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6595 CallSeqStart, 6596 Flags, DAG, dl); 6597 ArgOffset += PtrByteSize; 6598 } 6599 continue; 6600 } 6601 // Copy entire object into memory. There are cases where gcc-generated 6602 // code assumes it is there, even if it could be put entirely into 6603 // registers. (This is not what the doc says.) 6604 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6605 CallSeqStart, 6606 Flags, DAG, dl); 6607 6608 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6609 // copy the pieces of the object that fit into registers from the 6610 // parameter save area. 6611 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6612 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6613 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6614 if (GPR_idx != NumGPRs) { 6615 SDValue Load = 6616 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6617 MemOpChains.push_back(Load.getValue(1)); 6618 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6619 ArgOffset += PtrByteSize; 6620 } else { 6621 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6622 break; 6623 } 6624 } 6625 continue; 6626 } 6627 6628 switch (Arg.getSimpleValueType().SimpleTy) { 6629 default: llvm_unreachable("Unexpected ValueType for argument!"); 6630 case MVT::i1: 6631 case MVT::i32: 6632 case MVT::i64: 6633 if (GPR_idx != NumGPRs) { 6634 if (Arg.getValueType() == MVT::i1) 6635 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6636 6637 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6638 } else { 6639 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6640 isPPC64, CFlags.IsTailCall, false, MemOpChains, 6641 TailCallArguments, dl); 6642 } 6643 ArgOffset += PtrByteSize; 6644 break; 6645 case MVT::f32: 6646 case MVT::f64: 6647 if (FPR_idx != NumFPRs) { 6648 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6649 6650 if (CFlags.IsVarArg) { 6651 SDValue Store = 6652 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6653 MemOpChains.push_back(Store); 6654 6655 // Float varargs are always shadowed in available integer registers 6656 if (GPR_idx != NumGPRs) { 6657 SDValue Load = 6658 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6659 MemOpChains.push_back(Load.getValue(1)); 6660 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6661 } 6662 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6663 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6664 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6665 SDValue Load = 6666 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6667 MemOpChains.push_back(Load.getValue(1)); 6668 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6669 } 6670 } else { 6671 // If we have any FPRs remaining, we may also have GPRs remaining. 6672 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6673 // GPRs. 6674 if (GPR_idx != NumGPRs) 6675 ++GPR_idx; 6676 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6677 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6678 ++GPR_idx; 6679 } 6680 } else 6681 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6682 isPPC64, CFlags.IsTailCall, false, MemOpChains, 6683 TailCallArguments, dl); 6684 if (isPPC64) 6685 ArgOffset += 8; 6686 else 6687 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6688 break; 6689 case MVT::v4f32: 6690 case MVT::v4i32: 6691 case MVT::v8i16: 6692 case MVT::v16i8: 6693 if (CFlags.IsVarArg) { 6694 // These go aligned on the stack, or in the corresponding R registers 6695 // when within range. The Darwin PPC ABI doc claims they also go in 6696 // V registers; in fact gcc does this only for arguments that are 6697 // prototyped, not for those that match the ... We do it for all 6698 // arguments, seems to work. 6699 while (ArgOffset % 16 !=0) { 6700 ArgOffset += PtrByteSize; 6701 if (GPR_idx != NumGPRs) 6702 GPR_idx++; 6703 } 6704 // We could elide this store in the case where the object fits 6705 // entirely in R registers. Maybe later. 6706 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6707 DAG.getConstant(ArgOffset, dl, PtrVT)); 6708 SDValue Store = 6709 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6710 MemOpChains.push_back(Store); 6711 if (VR_idx != NumVRs) { 6712 SDValue Load = 6713 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6714 MemOpChains.push_back(Load.getValue(1)); 6715 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6716 } 6717 ArgOffset += 16; 6718 for (unsigned i=0; i<16; i+=PtrByteSize) { 6719 if (GPR_idx == NumGPRs) 6720 break; 6721 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6722 DAG.getConstant(i, dl, PtrVT)); 6723 SDValue Load = 6724 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6725 MemOpChains.push_back(Load.getValue(1)); 6726 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6727 } 6728 break; 6729 } 6730 6731 // Non-varargs Altivec params generally go in registers, but have 6732 // stack space allocated at the end. 6733 if (VR_idx != NumVRs) { 6734 // Doesn't have GPR space allocated. 6735 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6736 } else if (nAltivecParamsAtEnd==0) { 6737 // We are emitting Altivec params in order. 6738 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6739 isPPC64, CFlags.IsTailCall, true, MemOpChains, 6740 TailCallArguments, dl); 6741 ArgOffset += 16; 6742 } 6743 break; 6744 } 6745 } 6746 // If all Altivec parameters fit in registers, as they usually do, 6747 // they get stack space following the non-Altivec parameters. We 6748 // don't track this here because nobody below needs it. 6749 // If there are more Altivec parameters than fit in registers emit 6750 // the stores here. 6751 if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) { 6752 unsigned j = 0; 6753 // Offset is aligned; skip 1st 12 params which go in V registers. 6754 ArgOffset = ((ArgOffset+15)/16)*16; 6755 ArgOffset += 12*16; 6756 for (unsigned i = 0; i != NumOps; ++i) { 6757 SDValue Arg = OutVals[i]; 6758 EVT ArgType = Outs[i].VT; 6759 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6760 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6761 if (++j > NumVRs) { 6762 SDValue PtrOff; 6763 // We are emitting Altivec params in order. 6764 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6765 isPPC64, CFlags.IsTailCall, true, MemOpChains, 6766 TailCallArguments, dl); 6767 ArgOffset += 16; 6768 } 6769 } 6770 } 6771 } 6772 6773 if (!MemOpChains.empty()) 6774 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6775 6776 // On Darwin, R12 must contain the address of an indirect callee. This does 6777 // not mean the MTCTR instruction must use R12; it's easier to model this as 6778 // an extra parameter, so do that. 6779 if (CFlags.IsIndirect) { 6780 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported."); 6781 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6782 PPC::R12), Callee)); 6783 } 6784 6785 // Build a sequence of copy-to-reg nodes chained together with token chain 6786 // and flag operands which copy the outgoing args into the appropriate regs. 6787 SDValue InFlag; 6788 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6789 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6790 RegsToPass[i].second, InFlag); 6791 InFlag = Chain.getValue(1); 6792 } 6793 6794 if (CFlags.IsTailCall) 6795 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6796 TailCallArguments); 6797 6798 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 6799 Callee, SPDiff, NumBytes, Ins, InVals, CS); 6800 } 6801 6802 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, 6803 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 6804 CCState &State) { 6805 6806 if (ValVT == MVT::f128) 6807 report_fatal_error("f128 is unimplemented on AIX."); 6808 6809 if (ArgFlags.isByVal()) 6810 report_fatal_error("Passing structure by value is unimplemented."); 6811 6812 if (ArgFlags.isNest()) 6813 report_fatal_error("Nest arguments are unimplemented."); 6814 6815 if (ValVT.isVector() || LocVT.isVector()) 6816 report_fatal_error("Vector arguments are unimplemented on AIX."); 6817 6818 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>( 6819 State.getMachineFunction().getSubtarget()); 6820 const bool IsPPC64 = Subtarget.isPPC64(); 6821 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6822 6823 static const MCPhysReg GPR_32[] = {// 32-bit registers. 6824 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6825 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 6826 static const MCPhysReg GPR_64[] = {// 64-bit registers. 6827 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6828 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 6829 6830 // Arguments always reserve parameter save area. 6831 switch (ValVT.SimpleTy) { 6832 default: 6833 report_fatal_error("Unhandled value type for argument."); 6834 case MVT::i64: 6835 // i64 arguments should have been split to i32 for PPC32. 6836 assert(IsPPC64 && "PPC32 should have split i64 values."); 6837 LLVM_FALLTHROUGH; 6838 case MVT::i1: 6839 case MVT::i32: 6840 State.AllocateStack(PtrByteSize, PtrByteSize); 6841 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6842 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6843 // Promote integers if needed. 6844 if (ValVT.getSizeInBits() < RegVT.getSizeInBits()) 6845 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt 6846 : CCValAssign::LocInfo::ZExt; 6847 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6848 } 6849 else 6850 report_fatal_error("Handling of placing parameters on the stack is " 6851 "unimplemented!"); 6852 return false; 6853 6854 case MVT::f32: 6855 case MVT::f64: { 6856 // Parameter save area (PSA) is reserved even if the float passes in fpr. 6857 const unsigned StoreSize = LocVT.getStoreSize(); 6858 // Floats are always 4-byte aligned in the PSA on AIX. 6859 // This includes f64 in 64-bit mode for ABI compatibility. 6860 State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4); 6861 if (unsigned Reg = State.AllocateReg(FPR)) 6862 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6863 else 6864 report_fatal_error("Handling of placing parameters on the stack is " 6865 "unimplemented!"); 6866 6867 // AIX requires that GPRs are reserved for float arguments. 6868 // Successfully reserved GPRs are only initialized for vararg calls. 6869 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6870 for (unsigned I = 0; I < StoreSize; I += PtrByteSize) { 6871 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6872 if (State.isVarArg()) { 6873 // Custom handling is required for: 6874 // f64 in PPC32 needs to be split into 2 GPRs. 6875 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR. 6876 State.addLoc( 6877 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6878 } 6879 } else if (State.isVarArg()) { 6880 report_fatal_error("Handling of placing parameters on the stack is " 6881 "unimplemented!"); 6882 } 6883 } 6884 6885 return false; 6886 } 6887 } 6888 return true; 6889 } 6890 6891 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT, 6892 bool IsPPC64) { 6893 assert((IsPPC64 || SVT != MVT::i64) && 6894 "i64 should have been split for 32-bit codegen."); 6895 6896 switch (SVT) { 6897 default: 6898 report_fatal_error("Unexpected value type for formal argument"); 6899 case MVT::i1: 6900 case MVT::i32: 6901 case MVT::i64: 6902 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6903 case MVT::f32: 6904 return &PPC::F4RCRegClass; 6905 case MVT::f64: 6906 return &PPC::F8RCRegClass; 6907 } 6908 } 6909 6910 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, 6911 SelectionDAG &DAG, SDValue ArgValue, 6912 MVT LocVT, const SDLoc &dl) { 6913 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger()); 6914 assert(ValVT.getSizeInBits() < LocVT.getSizeInBits()); 6915 6916 if (Flags.isSExt()) 6917 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue, 6918 DAG.getValueType(ValVT)); 6919 else if (Flags.isZExt()) 6920 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue, 6921 DAG.getValueType(ValVT)); 6922 6923 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue); 6924 } 6925 6926 SDValue PPCTargetLowering::LowerFormalArguments_AIX( 6927 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 6928 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6929 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 6930 6931 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold || 6932 CallConv == CallingConv::Fast) && 6933 "Unexpected calling convention!"); 6934 6935 if (isVarArg) 6936 report_fatal_error("This call type is unimplemented on AIX."); 6937 6938 if (getTargetMachine().Options.GuaranteedTailCallOpt) 6939 report_fatal_error("Tail call support is unimplemented on AIX."); 6940 6941 if (useSoftFloat()) 6942 report_fatal_error("Soft float support is unimplemented on AIX."); 6943 6944 const PPCSubtarget &Subtarget = 6945 static_cast<const PPCSubtarget &>(DAG.getSubtarget()); 6946 if (Subtarget.hasQPX()) 6947 report_fatal_error("QPX support is not supported on AIX."); 6948 6949 const bool IsPPC64 = Subtarget.isPPC64(); 6950 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6951 6952 // Assign locations to all of the incoming arguments. 6953 SmallVector<CCValAssign, 16> ArgLocs; 6954 MachineFunction &MF = DAG.getMachineFunction(); 6955 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 6956 6957 // Reserve space for the linkage area on the stack. 6958 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6959 // On AIX a minimum of 8 words is saved to the parameter save area. 6960 const unsigned MinParameterSaveArea = 8 * PtrByteSize; 6961 CCInfo.AllocateStack(LinkageSize + MinParameterSaveArea, PtrByteSize); 6962 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX); 6963 6964 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 6965 CCValAssign &VA = ArgLocs[i]; 6966 SDValue ArgValue; 6967 ISD::ArgFlagsTy Flags = Ins[i].Flags; 6968 if (VA.isRegLoc()) { 6969 EVT ValVT = VA.getValVT(); 6970 MVT LocVT = VA.getLocVT(); 6971 MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy; 6972 unsigned VReg = 6973 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64)); 6974 ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); 6975 if (ValVT.isScalarInteger() && 6976 (ValVT.getSizeInBits() < LocVT.getSizeInBits())) { 6977 ArgValue = 6978 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl); 6979 } 6980 InVals.push_back(ArgValue); 6981 } else { 6982 report_fatal_error("Handling of formal arguments on the stack is " 6983 "unimplemented!"); 6984 } 6985 } 6986 6987 // Area that is at least reserved in the caller of this function. 6988 unsigned MinReservedArea = CCInfo.getNextStackOffset(); 6989 6990 // Set the size that is at least reserved in caller of this function. Tail 6991 // call optimized function's reserved stack space needs to be aligned so 6992 // that taking the difference between two stack areas will result in an 6993 // aligned stack. 6994 MinReservedArea = 6995 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 6996 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 6997 FuncInfo->setMinReservedArea(MinReservedArea); 6998 6999 return Chain; 7000 } 7001 7002 SDValue PPCTargetLowering::LowerCall_AIX( 7003 SDValue Chain, SDValue Callee, CallFlags CFlags, 7004 const SmallVectorImpl<ISD::OutputArg> &Outs, 7005 const SmallVectorImpl<SDValue> &OutVals, 7006 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 7007 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 7008 ImmutableCallSite CS) const { 7009 7010 assert((CFlags.CallConv == CallingConv::C || 7011 CFlags.CallConv == CallingConv::Cold || 7012 CFlags.CallConv == CallingConv::Fast) && 7013 "Unexpected calling convention!"); 7014 7015 if (CFlags.IsPatchPoint) 7016 report_fatal_error("This call type is unimplemented on AIX."); 7017 7018 const PPCSubtarget& Subtarget = 7019 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 7020 if (Subtarget.hasQPX()) 7021 report_fatal_error("QPX is not supported on AIX."); 7022 if (Subtarget.hasAltivec()) 7023 report_fatal_error("Altivec support is unimplemented on AIX."); 7024 7025 MachineFunction &MF = DAG.getMachineFunction(); 7026 SmallVector<CCValAssign, 16> ArgLocs; 7027 CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs, 7028 *DAG.getContext()); 7029 7030 // Reserve space for the linkage save area (LSA) on the stack. 7031 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA: 7032 // [SP][CR][LR][2 x reserved][TOC]. 7033 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64. 7034 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 7035 const bool IsPPC64 = Subtarget.isPPC64(); 7036 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 7037 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 7038 CCInfo.AnalyzeCallOperands(Outs, CC_AIX); 7039 7040 // The prolog code of the callee may store up to 8 GPR argument registers to 7041 // the stack, allowing va_start to index over them in memory if the callee 7042 // is variadic. 7043 // Because we cannot tell if this is needed on the caller side, we have to 7044 // conservatively assume that it is needed. As such, make sure we have at 7045 // least enough stack space for the caller to store the 8 GPRs. 7046 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize; 7047 const unsigned NumBytes = LinkageSize + MinParameterSaveAreaSize; 7048 7049 // Adjust the stack pointer for the new arguments... 7050 // These operations are automatically eliminated by the prolog/epilog pass. 7051 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 7052 SDValue CallSeqStart = Chain; 7053 7054 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 7055 7056 for (unsigned I = 0, E = ArgLocs.size(); I != E;) { 7057 CCValAssign &VA = ArgLocs[I++]; 7058 7059 if (VA.isMemLoc()) 7060 report_fatal_error("Handling of placing parameters on the stack is " 7061 "unimplemented!"); 7062 if (!VA.isRegLoc()) 7063 report_fatal_error( 7064 "Unexpected non-register location for function call argument."); 7065 7066 SDValue Arg = OutVals[VA.getValNo()]; 7067 7068 if (!VA.needsCustom()) { 7069 switch (VA.getLocInfo()) { 7070 default: 7071 report_fatal_error("Unexpected argument extension type."); 7072 case CCValAssign::Full: 7073 break; 7074 case CCValAssign::ZExt: 7075 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7076 break; 7077 case CCValAssign::SExt: 7078 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7079 break; 7080 } 7081 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 7082 7083 continue; 7084 } 7085 7086 // Custom handling is used for GPR initializations for vararg float 7087 // arguments. 7088 assert(CFlags.IsVarArg && VA.getValVT().isFloatingPoint() && 7089 VA.getLocVT().isInteger() && 7090 "Unexpected custom register handling for calling convention."); 7091 7092 SDValue ArgAsInt = 7093 DAG.getBitcast(MVT::getIntegerVT(VA.getValVT().getSizeInBits()), Arg); 7094 7095 if (Arg.getValueType().getStoreSize() == VA.getLocVT().getStoreSize()) 7096 // f32 in 32-bit GPR 7097 // f64 in 64-bit GPR 7098 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt)); 7099 else if (Arg.getValueType().getSizeInBits() < VA.getLocVT().getSizeInBits()) 7100 // f32 in 64-bit GPR. 7101 RegsToPass.push_back(std::make_pair( 7102 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, VA.getLocVT()))); 7103 else { 7104 // f64 in two 32-bit GPRs 7105 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs. 7106 assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 && 7107 "Unexpected custom register for argument!"); 7108 CCValAssign &GPR1 = VA; 7109 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt, 7110 DAG.getConstant(32, dl, MVT::i8)); 7111 RegsToPass.push_back(std::make_pair( 7112 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32))); 7113 assert(I != E && "A second custom GPR is expected!"); 7114 CCValAssign &GPR2 = ArgLocs[I++]; 7115 assert(GPR2.isRegLoc() && GPR2.getValNo() == GPR1.getValNo() && 7116 GPR2.needsCustom() && "A second custom GPR is expected!"); 7117 RegsToPass.push_back(std::make_pair( 7118 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32))); 7119 } 7120 } 7121 7122 // For indirect calls, we need to save the TOC base to the stack for 7123 // restoration after the call. 7124 if (CFlags.IsIndirect) { 7125 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported."); 7126 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister(); 7127 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 7128 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 7129 const unsigned TOCSaveOffset = 7130 Subtarget.getFrameLowering()->getTOCSaveOffset(); 7131 7132 setUsesTOCBasePtr(DAG); 7133 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT); 7134 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 7135 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT); 7136 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 7137 Chain = DAG.getStore( 7138 Val.getValue(1), dl, Val, AddPtr, 7139 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 7140 } 7141 7142 // Build a sequence of copy-to-reg nodes chained together with token chain 7143 // and flag operands which copy the outgoing args into the appropriate regs. 7144 SDValue InFlag; 7145 for (auto Reg : RegsToPass) { 7146 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag); 7147 InFlag = Chain.getValue(1); 7148 } 7149 7150 const int SPDiff = 0; 7151 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 7152 Callee, SPDiff, NumBytes, Ins, InVals, CS); 7153 } 7154 7155 bool 7156 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 7157 MachineFunction &MF, bool isVarArg, 7158 const SmallVectorImpl<ISD::OutputArg> &Outs, 7159 LLVMContext &Context) const { 7160 SmallVector<CCValAssign, 16> RVLocs; 7161 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 7162 return CCInfo.CheckReturn( 7163 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7164 ? RetCC_PPC_Cold 7165 : RetCC_PPC); 7166 } 7167 7168 SDValue 7169 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 7170 bool isVarArg, 7171 const SmallVectorImpl<ISD::OutputArg> &Outs, 7172 const SmallVectorImpl<SDValue> &OutVals, 7173 const SDLoc &dl, SelectionDAG &DAG) const { 7174 SmallVector<CCValAssign, 16> RVLocs; 7175 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 7176 *DAG.getContext()); 7177 CCInfo.AnalyzeReturn(Outs, 7178 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7179 ? RetCC_PPC_Cold 7180 : RetCC_PPC); 7181 7182 SDValue Flag; 7183 SmallVector<SDValue, 4> RetOps(1, Chain); 7184 7185 // Copy the result values into the output registers. 7186 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) { 7187 CCValAssign &VA = RVLocs[i]; 7188 assert(VA.isRegLoc() && "Can only return in registers!"); 7189 7190 SDValue Arg = OutVals[RealResIdx]; 7191 7192 switch (VA.getLocInfo()) { 7193 default: llvm_unreachable("Unknown loc info!"); 7194 case CCValAssign::Full: break; 7195 case CCValAssign::AExt: 7196 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 7197 break; 7198 case CCValAssign::ZExt: 7199 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7200 break; 7201 case CCValAssign::SExt: 7202 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7203 break; 7204 } 7205 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 7206 bool isLittleEndian = Subtarget.isLittleEndian(); 7207 // Legalize ret f64 -> ret 2 x i32. 7208 SDValue SVal = 7209 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7210 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl)); 7211 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7212 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7213 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7214 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl)); 7215 Flag = Chain.getValue(1); 7216 VA = RVLocs[++i]; // skip ahead to next loc 7217 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7218 } else 7219 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 7220 Flag = Chain.getValue(1); 7221 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7222 } 7223 7224 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 7225 const MCPhysReg *I = 7226 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 7227 if (I) { 7228 for (; *I; ++I) { 7229 7230 if (PPC::G8RCRegClass.contains(*I)) 7231 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 7232 else if (PPC::F8RCRegClass.contains(*I)) 7233 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 7234 else if (PPC::CRRCRegClass.contains(*I)) 7235 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 7236 else if (PPC::VRRCRegClass.contains(*I)) 7237 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 7238 else 7239 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 7240 } 7241 } 7242 7243 RetOps[0] = Chain; // Update chain. 7244 7245 // Add the flag if we have it. 7246 if (Flag.getNode()) 7247 RetOps.push_back(Flag); 7248 7249 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 7250 } 7251 7252 SDValue 7253 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 7254 SelectionDAG &DAG) const { 7255 SDLoc dl(Op); 7256 7257 // Get the correct type for integers. 7258 EVT IntVT = Op.getValueType(); 7259 7260 // Get the inputs. 7261 SDValue Chain = Op.getOperand(0); 7262 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7263 // Build a DYNAREAOFFSET node. 7264 SDValue Ops[2] = {Chain, FPSIdx}; 7265 SDVTList VTs = DAG.getVTList(IntVT); 7266 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 7267 } 7268 7269 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 7270 SelectionDAG &DAG) const { 7271 // When we pop the dynamic allocation we need to restore the SP link. 7272 SDLoc dl(Op); 7273 7274 // Get the correct type for pointers. 7275 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7276 7277 // Construct the stack pointer operand. 7278 bool isPPC64 = Subtarget.isPPC64(); 7279 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 7280 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 7281 7282 // Get the operands for the STACKRESTORE. 7283 SDValue Chain = Op.getOperand(0); 7284 SDValue SaveSP = Op.getOperand(1); 7285 7286 // Load the old link SP. 7287 SDValue LoadLinkSP = 7288 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 7289 7290 // Restore the stack pointer. 7291 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 7292 7293 // Store the old link SP. 7294 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 7295 } 7296 7297 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 7298 MachineFunction &MF = DAG.getMachineFunction(); 7299 bool isPPC64 = Subtarget.isPPC64(); 7300 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7301 7302 // Get current frame pointer save index. The users of this index will be 7303 // primarily DYNALLOC instructions. 7304 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7305 int RASI = FI->getReturnAddrSaveIndex(); 7306 7307 // If the frame pointer save index hasn't been defined yet. 7308 if (!RASI) { 7309 // Find out what the fix offset of the frame pointer save area. 7310 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 7311 // Allocate the frame index for frame pointer save area. 7312 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 7313 // Save the result. 7314 FI->setReturnAddrSaveIndex(RASI); 7315 } 7316 return DAG.getFrameIndex(RASI, PtrVT); 7317 } 7318 7319 SDValue 7320 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 7321 MachineFunction &MF = DAG.getMachineFunction(); 7322 bool isPPC64 = Subtarget.isPPC64(); 7323 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7324 7325 // Get current frame pointer save index. The users of this index will be 7326 // primarily DYNALLOC instructions. 7327 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7328 int FPSI = FI->getFramePointerSaveIndex(); 7329 7330 // If the frame pointer save index hasn't been defined yet. 7331 if (!FPSI) { 7332 // Find out what the fix offset of the frame pointer save area. 7333 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 7334 // Allocate the frame index for frame pointer save area. 7335 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 7336 // Save the result. 7337 FI->setFramePointerSaveIndex(FPSI); 7338 } 7339 return DAG.getFrameIndex(FPSI, PtrVT); 7340 } 7341 7342 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7343 SelectionDAG &DAG) const { 7344 // Get the inputs. 7345 SDValue Chain = Op.getOperand(0); 7346 SDValue Size = Op.getOperand(1); 7347 SDLoc dl(Op); 7348 7349 // Get the correct type for pointers. 7350 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7351 // Negate the size. 7352 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 7353 DAG.getConstant(0, dl, PtrVT), Size); 7354 // Construct a node for the frame pointer save index. 7355 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7356 // Build a DYNALLOC node. 7357 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 7358 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 7359 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 7360 } 7361 7362 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 7363 SelectionDAG &DAG) const { 7364 MachineFunction &MF = DAG.getMachineFunction(); 7365 7366 bool isPPC64 = Subtarget.isPPC64(); 7367 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7368 7369 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 7370 return DAG.getFrameIndex(FI, PtrVT); 7371 } 7372 7373 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 7374 SelectionDAG &DAG) const { 7375 SDLoc DL(Op); 7376 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 7377 DAG.getVTList(MVT::i32, MVT::Other), 7378 Op.getOperand(0), Op.getOperand(1)); 7379 } 7380 7381 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 7382 SelectionDAG &DAG) const { 7383 SDLoc DL(Op); 7384 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 7385 Op.getOperand(0), Op.getOperand(1)); 7386 } 7387 7388 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 7389 if (Op.getValueType().isVector()) 7390 return LowerVectorLoad(Op, DAG); 7391 7392 assert(Op.getValueType() == MVT::i1 && 7393 "Custom lowering only for i1 loads"); 7394 7395 // First, load 8 bits into 32 bits, then truncate to 1 bit. 7396 7397 SDLoc dl(Op); 7398 LoadSDNode *LD = cast<LoadSDNode>(Op); 7399 7400 SDValue Chain = LD->getChain(); 7401 SDValue BasePtr = LD->getBasePtr(); 7402 MachineMemOperand *MMO = LD->getMemOperand(); 7403 7404 SDValue NewLD = 7405 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 7406 BasePtr, MVT::i8, MMO); 7407 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 7408 7409 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 7410 return DAG.getMergeValues(Ops, dl); 7411 } 7412 7413 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 7414 if (Op.getOperand(1).getValueType().isVector()) 7415 return LowerVectorStore(Op, DAG); 7416 7417 assert(Op.getOperand(1).getValueType() == MVT::i1 && 7418 "Custom lowering only for i1 stores"); 7419 7420 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 7421 7422 SDLoc dl(Op); 7423 StoreSDNode *ST = cast<StoreSDNode>(Op); 7424 7425 SDValue Chain = ST->getChain(); 7426 SDValue BasePtr = ST->getBasePtr(); 7427 SDValue Value = ST->getValue(); 7428 MachineMemOperand *MMO = ST->getMemOperand(); 7429 7430 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 7431 Value); 7432 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 7433 } 7434 7435 // FIXME: Remove this once the ANDI glue bug is fixed: 7436 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 7437 assert(Op.getValueType() == MVT::i1 && 7438 "Custom lowering only for i1 results"); 7439 7440 SDLoc DL(Op); 7441 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0)); 7442 } 7443 7444 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op, 7445 SelectionDAG &DAG) const { 7446 7447 // Implements a vector truncate that fits in a vector register as a shuffle. 7448 // We want to legalize vector truncates down to where the source fits in 7449 // a vector register (and target is therefore smaller than vector register 7450 // size). At that point legalization will try to custom lower the sub-legal 7451 // result and get here - where we can contain the truncate as a single target 7452 // operation. 7453 7454 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows: 7455 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2> 7456 // 7457 // We will implement it for big-endian ordering as this (where x denotes 7458 // undefined): 7459 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to 7460 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u> 7461 // 7462 // The same operation in little-endian ordering will be: 7463 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to 7464 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1> 7465 7466 assert(Op.getValueType().isVector() && "Vector type expected."); 7467 7468 SDLoc DL(Op); 7469 SDValue N1 = Op.getOperand(0); 7470 unsigned SrcSize = N1.getValueType().getSizeInBits(); 7471 assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector"); 7472 SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL); 7473 7474 EVT TrgVT = Op.getValueType(); 7475 unsigned TrgNumElts = TrgVT.getVectorNumElements(); 7476 EVT EltVT = TrgVT.getVectorElementType(); 7477 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7478 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7479 7480 // First list the elements we want to keep. 7481 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits(); 7482 SmallVector<int, 16> ShuffV; 7483 if (Subtarget.isLittleEndian()) 7484 for (unsigned i = 0; i < TrgNumElts; ++i) 7485 ShuffV.push_back(i * SizeMult); 7486 else 7487 for (unsigned i = 1; i <= TrgNumElts; ++i) 7488 ShuffV.push_back(i * SizeMult - 1); 7489 7490 // Populate the remaining elements with undefs. 7491 for (unsigned i = TrgNumElts; i < WideNumElts; ++i) 7492 // ShuffV.push_back(i + WideNumElts); 7493 ShuffV.push_back(WideNumElts + 1); 7494 7495 SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc); 7496 return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV); 7497 } 7498 7499 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 7500 /// possible. 7501 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 7502 // Not FP? Not a fsel. 7503 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 7504 !Op.getOperand(2).getValueType().isFloatingPoint()) 7505 return Op; 7506 7507 bool HasNoInfs = DAG.getTarget().Options.NoInfsFPMath; 7508 bool HasNoNaNs = DAG.getTarget().Options.NoNaNsFPMath; 7509 // We might be able to do better than this under some circumstances, but in 7510 // general, fsel-based lowering of select is a finite-math-only optimization. 7511 // For more information, see section F.3 of the 2.06 ISA specification. 7512 // With ISA 3.0, we have xsmaxcdp/xsmincdp which are OK to emit even in the 7513 // presence of infinities. 7514 if (!Subtarget.hasP9Vector() && (!HasNoInfs || !HasNoNaNs)) 7515 return Op; 7516 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 7517 7518 EVT ResVT = Op.getValueType(); 7519 EVT CmpVT = Op.getOperand(0).getValueType(); 7520 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7521 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 7522 SDLoc dl(Op); 7523 7524 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) { 7525 switch (CC) { 7526 default: 7527 // Not a min/max but with finite math, we may still be able to use fsel. 7528 if (HasNoInfs && HasNoNaNs) 7529 break; 7530 return Op; 7531 case ISD::SETOGT: 7532 case ISD::SETGT: 7533 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS); 7534 case ISD::SETOLT: 7535 case ISD::SETLT: 7536 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS); 7537 } 7538 } 7539 7540 // TODO: Propagate flags from the select rather than global settings. 7541 SDNodeFlags Flags; 7542 Flags.setNoInfs(true); 7543 Flags.setNoNaNs(true); 7544 7545 // If the RHS of the comparison is a 0.0, we don't need to do the 7546 // subtraction at all. 7547 SDValue Sel1; 7548 if (isFloatingPointZero(RHS)) 7549 switch (CC) { 7550 default: break; // SETUO etc aren't handled by fsel. 7551 case ISD::SETNE: 7552 std::swap(TV, FV); 7553 LLVM_FALLTHROUGH; 7554 case ISD::SETEQ: 7555 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7556 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7557 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7558 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7559 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7560 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7561 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 7562 case ISD::SETULT: 7563 case ISD::SETLT: 7564 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7565 LLVM_FALLTHROUGH; 7566 case ISD::SETOGE: 7567 case ISD::SETGE: 7568 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7569 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7570 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7571 case ISD::SETUGT: 7572 case ISD::SETGT: 7573 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7574 LLVM_FALLTHROUGH; 7575 case ISD::SETOLE: 7576 case ISD::SETLE: 7577 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7578 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7579 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7580 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 7581 } 7582 7583 SDValue Cmp; 7584 switch (CC) { 7585 default: break; // SETUO etc aren't handled by fsel. 7586 case ISD::SETNE: 7587 std::swap(TV, FV); 7588 LLVM_FALLTHROUGH; 7589 case ISD::SETEQ: 7590 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7591 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7592 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7593 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7594 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7595 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7596 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7597 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 7598 case ISD::SETULT: 7599 case ISD::SETLT: 7600 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7601 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7602 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7603 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7604 case ISD::SETOGE: 7605 case ISD::SETGE: 7606 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7607 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7608 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7609 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7610 case ISD::SETUGT: 7611 case ISD::SETGT: 7612 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7613 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7614 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7615 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7616 case ISD::SETOLE: 7617 case ISD::SETLE: 7618 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7619 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7620 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7621 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7622 } 7623 return Op; 7624 } 7625 7626 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 7627 SelectionDAG &DAG, 7628 const SDLoc &dl) const { 7629 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7630 SDValue Src = Op.getOperand(0); 7631 if (Src.getValueType() == MVT::f32) 7632 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7633 7634 SDValue Tmp; 7635 switch (Op.getSimpleValueType().SimpleTy) { 7636 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7637 case MVT::i32: 7638 Tmp = DAG.getNode( 7639 Op.getOpcode() == ISD::FP_TO_SINT 7640 ? PPCISD::FCTIWZ 7641 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7642 dl, MVT::f64, Src); 7643 break; 7644 case MVT::i64: 7645 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7646 "i64 FP_TO_UINT is supported only with FPCVT"); 7647 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7648 PPCISD::FCTIDUZ, 7649 dl, MVT::f64, Src); 7650 break; 7651 } 7652 7653 // Convert the FP value to an int value through memory. 7654 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 7655 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 7656 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 7657 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 7658 MachinePointerInfo MPI = 7659 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 7660 7661 // Emit a store to the stack slot. 7662 SDValue Chain; 7663 if (i32Stack) { 7664 MachineFunction &MF = DAG.getMachineFunction(); 7665 MachineMemOperand *MMO = 7666 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 7667 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 7668 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7669 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 7670 } else 7671 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 7672 7673 // Result is a load from the stack slot. If loading 4 bytes, make sure to 7674 // add in a bias on big endian. 7675 if (Op.getValueType() == MVT::i32 && !i32Stack) { 7676 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 7677 DAG.getConstant(4, dl, FIPtr.getValueType())); 7678 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 7679 } 7680 7681 RLI.Chain = Chain; 7682 RLI.Ptr = FIPtr; 7683 RLI.MPI = MPI; 7684 } 7685 7686 /// Custom lowers floating point to integer conversions to use 7687 /// the direct move instructions available in ISA 2.07 to avoid the 7688 /// need for load/store combinations. 7689 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 7690 SelectionDAG &DAG, 7691 const SDLoc &dl) const { 7692 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7693 SDValue Src = Op.getOperand(0); 7694 7695 if (Src.getValueType() == MVT::f32) 7696 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7697 7698 SDValue Tmp; 7699 switch (Op.getSimpleValueType().SimpleTy) { 7700 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7701 case MVT::i32: 7702 Tmp = DAG.getNode( 7703 Op.getOpcode() == ISD::FP_TO_SINT 7704 ? PPCISD::FCTIWZ 7705 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7706 dl, MVT::f64, Src); 7707 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 7708 break; 7709 case MVT::i64: 7710 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7711 "i64 FP_TO_UINT is supported only with FPCVT"); 7712 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7713 PPCISD::FCTIDUZ, 7714 dl, MVT::f64, Src); 7715 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 7716 break; 7717 } 7718 return Tmp; 7719 } 7720 7721 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 7722 const SDLoc &dl) const { 7723 7724 // FP to INT conversions are legal for f128. 7725 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128)) 7726 return Op; 7727 7728 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 7729 // PPC (the libcall is not available). 7730 if (Op.getOperand(0).getValueType() == MVT::ppcf128) { 7731 if (Op.getValueType() == MVT::i32) { 7732 if (Op.getOpcode() == ISD::FP_TO_SINT) { 7733 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7734 MVT::f64, Op.getOperand(0), 7735 DAG.getIntPtrConstant(0, dl)); 7736 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7737 MVT::f64, Op.getOperand(0), 7738 DAG.getIntPtrConstant(1, dl)); 7739 7740 // Add the two halves of the long double in round-to-zero mode. 7741 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7742 7743 // Now use a smaller FP_TO_SINT. 7744 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 7745 } 7746 if (Op.getOpcode() == ISD::FP_TO_UINT) { 7747 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 7748 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 7749 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128); 7750 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 7751 // FIXME: generated code sucks. 7752 // TODO: Are there fast-math-flags to propagate to this FSUB? 7753 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, 7754 Op.getOperand(0), Tmp); 7755 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 7756 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, 7757 DAG.getConstant(0x80000000, dl, MVT::i32)); 7758 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, 7759 Op.getOperand(0)); 7760 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False, 7761 ISD::SETGE); 7762 } 7763 } 7764 7765 return SDValue(); 7766 } 7767 7768 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 7769 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 7770 7771 ReuseLoadInfo RLI; 7772 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7773 7774 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7775 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7776 } 7777 7778 // We're trying to insert a regular store, S, and then a load, L. If the 7779 // incoming value, O, is a load, we might just be able to have our load use the 7780 // address used by O. However, we don't know if anything else will store to 7781 // that address before we can load from it. To prevent this situation, we need 7782 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7783 // the same chain operand as O, we create a token factor from the chain results 7784 // of O and L, and we replace all uses of O's chain result with that token 7785 // factor (see spliceIntoChain below for this last part). 7786 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7787 ReuseLoadInfo &RLI, 7788 SelectionDAG &DAG, 7789 ISD::LoadExtType ET) const { 7790 SDLoc dl(Op); 7791 if (ET == ISD::NON_EXTLOAD && 7792 (Op.getOpcode() == ISD::FP_TO_UINT || 7793 Op.getOpcode() == ISD::FP_TO_SINT) && 7794 isOperationLegalOrCustom(Op.getOpcode(), 7795 Op.getOperand(0).getValueType())) { 7796 7797 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7798 return true; 7799 } 7800 7801 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7802 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7803 LD->isNonTemporal()) 7804 return false; 7805 if (LD->getMemoryVT() != MemVT) 7806 return false; 7807 7808 RLI.Ptr = LD->getBasePtr(); 7809 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7810 assert(LD->getAddressingMode() == ISD::PRE_INC && 7811 "Non-pre-inc AM on PPC?"); 7812 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7813 LD->getOffset()); 7814 } 7815 7816 RLI.Chain = LD->getChain(); 7817 RLI.MPI = LD->getPointerInfo(); 7818 RLI.IsDereferenceable = LD->isDereferenceable(); 7819 RLI.IsInvariant = LD->isInvariant(); 7820 RLI.Alignment = LD->getAlignment(); 7821 RLI.AAInfo = LD->getAAInfo(); 7822 RLI.Ranges = LD->getRanges(); 7823 7824 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7825 return true; 7826 } 7827 7828 // Given the head of the old chain, ResChain, insert a token factor containing 7829 // it and NewResChain, and make users of ResChain now be users of that token 7830 // factor. 7831 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7832 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7833 SDValue NewResChain, 7834 SelectionDAG &DAG) const { 7835 if (!ResChain) 7836 return; 7837 7838 SDLoc dl(NewResChain); 7839 7840 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7841 NewResChain, DAG.getUNDEF(MVT::Other)); 7842 assert(TF.getNode() != NewResChain.getNode() && 7843 "A new TF really is required here"); 7844 7845 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7846 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7847 } 7848 7849 /// Analyze profitability of direct move 7850 /// prefer float load to int load plus direct move 7851 /// when there is no integer use of int load 7852 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7853 SDNode *Origin = Op.getOperand(0).getNode(); 7854 if (Origin->getOpcode() != ISD::LOAD) 7855 return true; 7856 7857 // If there is no LXSIBZX/LXSIHZX, like Power8, 7858 // prefer direct move if the memory size is 1 or 2 bytes. 7859 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7860 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7861 return true; 7862 7863 for (SDNode::use_iterator UI = Origin->use_begin(), 7864 UE = Origin->use_end(); 7865 UI != UE; ++UI) { 7866 7867 // Only look at the users of the loaded value. 7868 if (UI.getUse().get().getResNo() != 0) 7869 continue; 7870 7871 if (UI->getOpcode() != ISD::SINT_TO_FP && 7872 UI->getOpcode() != ISD::UINT_TO_FP) 7873 return true; 7874 } 7875 7876 return false; 7877 } 7878 7879 /// Custom lowers integer to floating point conversions to use 7880 /// the direct move instructions available in ISA 2.07 to avoid the 7881 /// need for load/store combinations. 7882 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7883 SelectionDAG &DAG, 7884 const SDLoc &dl) const { 7885 assert((Op.getValueType() == MVT::f32 || 7886 Op.getValueType() == MVT::f64) && 7887 "Invalid floating point type as target of conversion"); 7888 assert(Subtarget.hasFPCVT() && 7889 "Int to FP conversions with direct moves require FPCVT"); 7890 SDValue FP; 7891 SDValue Src = Op.getOperand(0); 7892 bool SinglePrec = Op.getValueType() == MVT::f32; 7893 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7894 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 7895 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 7896 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 7897 7898 if (WordInt) { 7899 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7900 dl, MVT::f64, Src); 7901 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7902 } 7903 else { 7904 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7905 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7906 } 7907 7908 return FP; 7909 } 7910 7911 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) { 7912 7913 EVT VecVT = Vec.getValueType(); 7914 assert(VecVT.isVector() && "Expected a vector type."); 7915 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width."); 7916 7917 EVT EltVT = VecVT.getVectorElementType(); 7918 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7919 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7920 7921 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements(); 7922 SmallVector<SDValue, 16> Ops(NumConcat); 7923 Ops[0] = Vec; 7924 SDValue UndefVec = DAG.getUNDEF(VecVT); 7925 for (unsigned i = 1; i < NumConcat; ++i) 7926 Ops[i] = UndefVec; 7927 7928 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops); 7929 } 7930 7931 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG, 7932 const SDLoc &dl) const { 7933 7934 unsigned Opc = Op.getOpcode(); 7935 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) && 7936 "Unexpected conversion type"); 7937 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) && 7938 "Supports conversions to v2f64/v4f32 only."); 7939 7940 bool SignedConv = Opc == ISD::SINT_TO_FP; 7941 bool FourEltRes = Op.getValueType() == MVT::v4f32; 7942 7943 SDValue Wide = widenVec(DAG, Op.getOperand(0), dl); 7944 EVT WideVT = Wide.getValueType(); 7945 unsigned WideNumElts = WideVT.getVectorNumElements(); 7946 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64; 7947 7948 SmallVector<int, 16> ShuffV; 7949 for (unsigned i = 0; i < WideNumElts; ++i) 7950 ShuffV.push_back(i + WideNumElts); 7951 7952 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2; 7953 int SaveElts = FourEltRes ? 4 : 2; 7954 if (Subtarget.isLittleEndian()) 7955 for (int i = 0; i < SaveElts; i++) 7956 ShuffV[i * Stride] = i; 7957 else 7958 for (int i = 1; i <= SaveElts; i++) 7959 ShuffV[i * Stride - 1] = i - 1; 7960 7961 SDValue ShuffleSrc2 = 7962 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT); 7963 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV); 7964 unsigned ExtendOp = 7965 SignedConv ? (unsigned)PPCISD::SExtVElems : (unsigned)ISD::BITCAST; 7966 7967 SDValue Extend; 7968 if (!Subtarget.hasP9Altivec() && SignedConv) { 7969 Arrange = DAG.getBitcast(IntermediateVT, Arrange); 7970 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange, 7971 DAG.getValueType(Op.getOperand(0).getValueType())); 7972 } else 7973 Extend = DAG.getNode(ExtendOp, dl, IntermediateVT, Arrange); 7974 7975 return DAG.getNode(Opc, dl, Op.getValueType(), Extend); 7976 } 7977 7978 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 7979 SelectionDAG &DAG) const { 7980 SDLoc dl(Op); 7981 7982 EVT InVT = Op.getOperand(0).getValueType(); 7983 EVT OutVT = Op.getValueType(); 7984 if (OutVT.isVector() && OutVT.isFloatingPoint() && 7985 isOperationCustom(Op.getOpcode(), InVT)) 7986 return LowerINT_TO_FPVector(Op, DAG, dl); 7987 7988 // Conversions to f128 are legal. 7989 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128)) 7990 return Op; 7991 7992 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 7993 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 7994 return SDValue(); 7995 7996 SDValue Value = Op.getOperand(0); 7997 // The values are now known to be -1 (false) or 1 (true). To convert this 7998 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7999 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8000 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8001 8002 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8003 8004 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8005 8006 if (Op.getValueType() != MVT::v4f64) 8007 Value = DAG.getNode(ISD::FP_ROUND, dl, 8008 Op.getValueType(), Value, 8009 DAG.getIntPtrConstant(1, dl)); 8010 return Value; 8011 } 8012 8013 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 8014 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 8015 return SDValue(); 8016 8017 if (Op.getOperand(0).getValueType() == MVT::i1) 8018 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 8019 DAG.getConstantFP(1.0, dl, Op.getValueType()), 8020 DAG.getConstantFP(0.0, dl, Op.getValueType())); 8021 8022 // If we have direct moves, we can do all the conversion, skip the store/load 8023 // however, without FPCVT we can't do most conversions. 8024 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 8025 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 8026 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 8027 8028 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 8029 "UINT_TO_FP is supported only with FPCVT"); 8030 8031 // If we have FCFIDS, then use it when converting to single-precision. 8032 // Otherwise, convert to double-precision and then round. 8033 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 8034 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 8035 : PPCISD::FCFIDS) 8036 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 8037 : PPCISD::FCFID); 8038 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 8039 ? MVT::f32 8040 : MVT::f64; 8041 8042 if (Op.getOperand(0).getValueType() == MVT::i64) { 8043 SDValue SINT = Op.getOperand(0); 8044 // When converting to single-precision, we actually need to convert 8045 // to double-precision first and then round to single-precision. 8046 // To avoid double-rounding effects during that operation, we have 8047 // to prepare the input operand. Bits that might be truncated when 8048 // converting to double-precision are replaced by a bit that won't 8049 // be lost at this stage, but is below the single-precision rounding 8050 // position. 8051 // 8052 // However, if -enable-unsafe-fp-math is in effect, accept double 8053 // rounding to avoid the extra overhead. 8054 if (Op.getValueType() == MVT::f32 && 8055 !Subtarget.hasFPCVT() && 8056 !DAG.getTarget().Options.UnsafeFPMath) { 8057 8058 // Twiddle input to make sure the low 11 bits are zero. (If this 8059 // is the case, we are guaranteed the value will fit into the 53 bit 8060 // mantissa of an IEEE double-precision value without rounding.) 8061 // If any of those low 11 bits were not zero originally, make sure 8062 // bit 12 (value 2048) is set instead, so that the final rounding 8063 // to single-precision gets the correct result. 8064 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8065 SINT, DAG.getConstant(2047, dl, MVT::i64)); 8066 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 8067 Round, DAG.getConstant(2047, dl, MVT::i64)); 8068 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 8069 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8070 Round, DAG.getConstant(-2048, dl, MVT::i64)); 8071 8072 // However, we cannot use that value unconditionally: if the magnitude 8073 // of the input value is small, the bit-twiddling we did above might 8074 // end up visibly changing the output. Fortunately, in that case, we 8075 // don't need to twiddle bits since the original input will convert 8076 // exactly to double-precision floating-point already. Therefore, 8077 // construct a conditional to use the original value if the top 11 8078 // bits are all sign-bit copies, and use the rounded value computed 8079 // above otherwise. 8080 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 8081 SINT, DAG.getConstant(53, dl, MVT::i32)); 8082 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 8083 Cond, DAG.getConstant(1, dl, MVT::i64)); 8084 Cond = DAG.getSetCC(dl, MVT::i32, 8085 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 8086 8087 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 8088 } 8089 8090 ReuseLoadInfo RLI; 8091 SDValue Bits; 8092 8093 MachineFunction &MF = DAG.getMachineFunction(); 8094 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 8095 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 8096 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 8097 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8098 } else if (Subtarget.hasLFIWAX() && 8099 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 8100 MachineMemOperand *MMO = 8101 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8102 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8103 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8104 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 8105 DAG.getVTList(MVT::f64, MVT::Other), 8106 Ops, MVT::i32, MMO); 8107 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8108 } else if (Subtarget.hasFPCVT() && 8109 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 8110 MachineMemOperand *MMO = 8111 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8112 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8113 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8114 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 8115 DAG.getVTList(MVT::f64, MVT::Other), 8116 Ops, MVT::i32, MMO); 8117 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8118 } else if (((Subtarget.hasLFIWAX() && 8119 SINT.getOpcode() == ISD::SIGN_EXTEND) || 8120 (Subtarget.hasFPCVT() && 8121 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 8122 SINT.getOperand(0).getValueType() == MVT::i32) { 8123 MachineFrameInfo &MFI = MF.getFrameInfo(); 8124 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8125 8126 int FrameIdx = MFI.CreateStackObject(4, 4, false); 8127 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8128 8129 SDValue Store = 8130 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 8131 MachinePointerInfo::getFixedStack( 8132 DAG.getMachineFunction(), FrameIdx)); 8133 8134 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8135 "Expected an i32 store"); 8136 8137 RLI.Ptr = FIdx; 8138 RLI.Chain = Store; 8139 RLI.MPI = 8140 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8141 RLI.Alignment = 4; 8142 8143 MachineMemOperand *MMO = 8144 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8145 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8146 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8147 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 8148 PPCISD::LFIWZX : PPCISD::LFIWAX, 8149 dl, DAG.getVTList(MVT::f64, MVT::Other), 8150 Ops, MVT::i32, MMO); 8151 } else 8152 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 8153 8154 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 8155 8156 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 8157 FP = DAG.getNode(ISD::FP_ROUND, dl, 8158 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 8159 return FP; 8160 } 8161 8162 assert(Op.getOperand(0).getValueType() == MVT::i32 && 8163 "Unhandled INT_TO_FP type in custom expander!"); 8164 // Since we only generate this in 64-bit mode, we can take advantage of 8165 // 64-bit registers. In particular, sign extend the input value into the 8166 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 8167 // then lfd it and fcfid it. 8168 MachineFunction &MF = DAG.getMachineFunction(); 8169 MachineFrameInfo &MFI = MF.getFrameInfo(); 8170 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8171 8172 SDValue Ld; 8173 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 8174 ReuseLoadInfo RLI; 8175 bool ReusingLoad; 8176 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 8177 DAG))) { 8178 int FrameIdx = MFI.CreateStackObject(4, 4, false); 8179 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8180 8181 SDValue Store = 8182 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 8183 MachinePointerInfo::getFixedStack( 8184 DAG.getMachineFunction(), FrameIdx)); 8185 8186 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8187 "Expected an i32 store"); 8188 8189 RLI.Ptr = FIdx; 8190 RLI.Chain = Store; 8191 RLI.MPI = 8192 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8193 RLI.Alignment = 4; 8194 } 8195 8196 MachineMemOperand *MMO = 8197 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8198 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8199 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8200 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 8201 PPCISD::LFIWZX : PPCISD::LFIWAX, 8202 dl, DAG.getVTList(MVT::f64, MVT::Other), 8203 Ops, MVT::i32, MMO); 8204 if (ReusingLoad) 8205 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 8206 } else { 8207 assert(Subtarget.isPPC64() && 8208 "i32->FP without LFIWAX supported only on PPC64"); 8209 8210 int FrameIdx = MFI.CreateStackObject(8, 8, false); 8211 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8212 8213 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 8214 Op.getOperand(0)); 8215 8216 // STD the extended value into the stack slot. 8217 SDValue Store = DAG.getStore( 8218 DAG.getEntryNode(), dl, Ext64, FIdx, 8219 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8220 8221 // Load the value as a double. 8222 Ld = DAG.getLoad( 8223 MVT::f64, dl, Store, FIdx, 8224 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8225 } 8226 8227 // FCFID it and return it. 8228 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 8229 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 8230 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 8231 DAG.getIntPtrConstant(0, dl)); 8232 return FP; 8233 } 8234 8235 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 8236 SelectionDAG &DAG) const { 8237 SDLoc dl(Op); 8238 /* 8239 The rounding mode is in bits 30:31 of FPSR, and has the following 8240 settings: 8241 00 Round to nearest 8242 01 Round to 0 8243 10 Round to +inf 8244 11 Round to -inf 8245 8246 FLT_ROUNDS, on the other hand, expects the following: 8247 -1 Undefined 8248 0 Round to 0 8249 1 Round to nearest 8250 2 Round to +inf 8251 3 Round to -inf 8252 8253 To perform the conversion, we do: 8254 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 8255 */ 8256 8257 MachineFunction &MF = DAG.getMachineFunction(); 8258 EVT VT = Op.getValueType(); 8259 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8260 8261 // Save FP Control Word to register 8262 EVT NodeTys[] = { 8263 MVT::f64, // return register 8264 MVT::Glue // unused in this context 8265 }; 8266 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 8267 8268 // Save FP register to stack slot 8269 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 8270 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 8271 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 8272 MachinePointerInfo()); 8273 8274 // Load FP Control Word from low 32 bits of stack slot. 8275 SDValue Four = DAG.getConstant(4, dl, PtrVT); 8276 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 8277 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 8278 8279 // Transform as necessary 8280 SDValue CWD1 = 8281 DAG.getNode(ISD::AND, dl, MVT::i32, 8282 CWD, DAG.getConstant(3, dl, MVT::i32)); 8283 SDValue CWD2 = 8284 DAG.getNode(ISD::SRL, dl, MVT::i32, 8285 DAG.getNode(ISD::AND, dl, MVT::i32, 8286 DAG.getNode(ISD::XOR, dl, MVT::i32, 8287 CWD, DAG.getConstant(3, dl, MVT::i32)), 8288 DAG.getConstant(3, dl, MVT::i32)), 8289 DAG.getConstant(1, dl, MVT::i32)); 8290 8291 SDValue RetVal = 8292 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 8293 8294 return DAG.getNode((VT.getSizeInBits() < 16 ? 8295 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 8296 } 8297 8298 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8299 EVT VT = Op.getValueType(); 8300 unsigned BitWidth = VT.getSizeInBits(); 8301 SDLoc dl(Op); 8302 assert(Op.getNumOperands() == 3 && 8303 VT == Op.getOperand(1).getValueType() && 8304 "Unexpected SHL!"); 8305 8306 // Expand into a bunch of logical ops. Note that these ops 8307 // depend on the PPC behavior for oversized shift amounts. 8308 SDValue Lo = Op.getOperand(0); 8309 SDValue Hi = Op.getOperand(1); 8310 SDValue Amt = Op.getOperand(2); 8311 EVT AmtVT = Amt.getValueType(); 8312 8313 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8314 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8315 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 8316 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 8317 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 8318 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8319 DAG.getConstant(-BitWidth, dl, AmtVT)); 8320 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 8321 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8322 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 8323 SDValue OutOps[] = { OutLo, OutHi }; 8324 return DAG.getMergeValues(OutOps, dl); 8325 } 8326 8327 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8328 EVT VT = Op.getValueType(); 8329 SDLoc dl(Op); 8330 unsigned BitWidth = VT.getSizeInBits(); 8331 assert(Op.getNumOperands() == 3 && 8332 VT == Op.getOperand(1).getValueType() && 8333 "Unexpected SRL!"); 8334 8335 // Expand into a bunch of logical ops. Note that these ops 8336 // depend on the PPC behavior for oversized shift amounts. 8337 SDValue Lo = Op.getOperand(0); 8338 SDValue Hi = Op.getOperand(1); 8339 SDValue Amt = Op.getOperand(2); 8340 EVT AmtVT = Amt.getValueType(); 8341 8342 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8343 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8344 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8345 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8346 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8347 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8348 DAG.getConstant(-BitWidth, dl, AmtVT)); 8349 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 8350 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8351 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 8352 SDValue OutOps[] = { OutLo, OutHi }; 8353 return DAG.getMergeValues(OutOps, dl); 8354 } 8355 8356 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 8357 SDLoc dl(Op); 8358 EVT VT = Op.getValueType(); 8359 unsigned BitWidth = VT.getSizeInBits(); 8360 assert(Op.getNumOperands() == 3 && 8361 VT == Op.getOperand(1).getValueType() && 8362 "Unexpected SRA!"); 8363 8364 // Expand into a bunch of logical ops, followed by a select_cc. 8365 SDValue Lo = Op.getOperand(0); 8366 SDValue Hi = Op.getOperand(1); 8367 SDValue Amt = Op.getOperand(2); 8368 EVT AmtVT = Amt.getValueType(); 8369 8370 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8371 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8372 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8373 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8374 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8375 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8376 DAG.getConstant(-BitWidth, dl, AmtVT)); 8377 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 8378 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 8379 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 8380 Tmp4, Tmp6, ISD::SETLE); 8381 SDValue OutOps[] = { OutLo, OutHi }; 8382 return DAG.getMergeValues(OutOps, dl); 8383 } 8384 8385 //===----------------------------------------------------------------------===// 8386 // Vector related lowering. 8387 // 8388 8389 /// BuildSplatI - Build a canonical splati of Val with an element size of 8390 /// SplatSize. Cast the result to VT. 8391 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 8392 SelectionDAG &DAG, const SDLoc &dl) { 8393 static const MVT VTys[] = { // canonical VT to use for each size. 8394 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 8395 }; 8396 8397 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 8398 8399 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 8400 if (Val == -1) 8401 SplatSize = 1; 8402 8403 EVT CanonicalVT = VTys[SplatSize-1]; 8404 8405 // Build a canonical splat for this value. 8406 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 8407 } 8408 8409 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 8410 /// specified intrinsic ID. 8411 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 8412 const SDLoc &dl, EVT DestVT = MVT::Other) { 8413 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 8414 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8415 DAG.getConstant(IID, dl, MVT::i32), Op); 8416 } 8417 8418 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 8419 /// specified intrinsic ID. 8420 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 8421 SelectionDAG &DAG, const SDLoc &dl, 8422 EVT DestVT = MVT::Other) { 8423 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 8424 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8425 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 8426 } 8427 8428 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 8429 /// specified intrinsic ID. 8430 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 8431 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 8432 EVT DestVT = MVT::Other) { 8433 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 8434 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8435 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 8436 } 8437 8438 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 8439 /// amount. The result has the specified value type. 8440 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 8441 SelectionDAG &DAG, const SDLoc &dl) { 8442 // Force LHS/RHS to be the right type. 8443 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 8444 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 8445 8446 int Ops[16]; 8447 for (unsigned i = 0; i != 16; ++i) 8448 Ops[i] = i + Amt; 8449 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 8450 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8451 } 8452 8453 /// Do we have an efficient pattern in a .td file for this node? 8454 /// 8455 /// \param V - pointer to the BuildVectorSDNode being matched 8456 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 8457 /// 8458 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 8459 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 8460 /// the opposite is true (expansion is beneficial) are: 8461 /// - The node builds a vector out of integers that are not 32 or 64-bits 8462 /// - The node builds a vector out of constants 8463 /// - The node is a "load-and-splat" 8464 /// In all other cases, we will choose to keep the BUILD_VECTOR. 8465 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 8466 bool HasDirectMove, 8467 bool HasP8Vector) { 8468 EVT VecVT = V->getValueType(0); 8469 bool RightType = VecVT == MVT::v2f64 || 8470 (HasP8Vector && VecVT == MVT::v4f32) || 8471 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 8472 if (!RightType) 8473 return false; 8474 8475 bool IsSplat = true; 8476 bool IsLoad = false; 8477 SDValue Op0 = V->getOperand(0); 8478 8479 // This function is called in a block that confirms the node is not a constant 8480 // splat. So a constant BUILD_VECTOR here means the vector is built out of 8481 // different constants. 8482 if (V->isConstant()) 8483 return false; 8484 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 8485 if (V->getOperand(i).isUndef()) 8486 return false; 8487 // We want to expand nodes that represent load-and-splat even if the 8488 // loaded value is a floating point truncation or conversion to int. 8489 if (V->getOperand(i).getOpcode() == ISD::LOAD || 8490 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 8491 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8492 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 8493 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8494 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 8495 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 8496 IsLoad = true; 8497 // If the operands are different or the input is not a load and has more 8498 // uses than just this BV node, then it isn't a splat. 8499 if (V->getOperand(i) != Op0 || 8500 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 8501 IsSplat = false; 8502 } 8503 return !(IsSplat && IsLoad); 8504 } 8505 8506 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 8507 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 8508 8509 SDLoc dl(Op); 8510 SDValue Op0 = Op->getOperand(0); 8511 8512 if (!EnableQuadPrecision || 8513 (Op.getValueType() != MVT::f128 ) || 8514 (Op0.getOpcode() != ISD::BUILD_PAIR) || 8515 (Op0.getOperand(0).getValueType() != MVT::i64) || 8516 (Op0.getOperand(1).getValueType() != MVT::i64)) 8517 return SDValue(); 8518 8519 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 8520 Op0.getOperand(1)); 8521 } 8522 8523 static const SDValue *getNormalLoadInput(const SDValue &Op) { 8524 const SDValue *InputLoad = &Op; 8525 if (InputLoad->getOpcode() == ISD::BITCAST) 8526 InputLoad = &InputLoad->getOperand(0); 8527 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR) 8528 InputLoad = &InputLoad->getOperand(0); 8529 if (InputLoad->getOpcode() != ISD::LOAD) 8530 return nullptr; 8531 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8532 return ISD::isNormalLoad(LD) ? InputLoad : nullptr; 8533 } 8534 8535 // If this is a case we can't handle, return null and let the default 8536 // expansion code take care of it. If we CAN select this case, and if it 8537 // selects to a single instruction, return Op. Otherwise, if we can codegen 8538 // this case more efficiently than a constant pool load, lower it to the 8539 // sequence of ops that should be used. 8540 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 8541 SelectionDAG &DAG) const { 8542 SDLoc dl(Op); 8543 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 8544 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 8545 8546 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 8547 // We first build an i32 vector, load it into a QPX register, 8548 // then convert it to a floating-point vector and compare it 8549 // to a zero vector to get the boolean result. 8550 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8551 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8552 MachinePointerInfo PtrInfo = 8553 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8554 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8555 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8556 8557 assert(BVN->getNumOperands() == 4 && 8558 "BUILD_VECTOR for v4i1 does not have 4 operands"); 8559 8560 bool IsConst = true; 8561 for (unsigned i = 0; i < 4; ++i) { 8562 if (BVN->getOperand(i).isUndef()) continue; 8563 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 8564 IsConst = false; 8565 break; 8566 } 8567 } 8568 8569 if (IsConst) { 8570 Constant *One = 8571 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 8572 Constant *NegOne = 8573 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 8574 8575 Constant *CV[4]; 8576 for (unsigned i = 0; i < 4; ++i) { 8577 if (BVN->getOperand(i).isUndef()) 8578 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 8579 else if (isNullConstant(BVN->getOperand(i))) 8580 CV[i] = NegOne; 8581 else 8582 CV[i] = One; 8583 } 8584 8585 Constant *CP = ConstantVector::get(CV); 8586 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 8587 16 /* alignment */); 8588 8589 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 8590 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 8591 return DAG.getMemIntrinsicNode( 8592 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 8593 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 8594 } 8595 8596 SmallVector<SDValue, 4> Stores; 8597 for (unsigned i = 0; i < 4; ++i) { 8598 if (BVN->getOperand(i).isUndef()) continue; 8599 8600 unsigned Offset = 4*i; 8601 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8602 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8603 8604 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 8605 if (StoreSize > 4) { 8606 Stores.push_back( 8607 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 8608 PtrInfo.getWithOffset(Offset), MVT::i32)); 8609 } else { 8610 SDValue StoreValue = BVN->getOperand(i); 8611 if (StoreSize < 4) 8612 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 8613 8614 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 8615 PtrInfo.getWithOffset(Offset))); 8616 } 8617 } 8618 8619 SDValue StoreChain; 8620 if (!Stores.empty()) 8621 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8622 else 8623 StoreChain = DAG.getEntryNode(); 8624 8625 // Now load from v4i32 into the QPX register; this will extend it to 8626 // v4i64 but not yet convert it to a floating point. Nevertheless, this 8627 // is typed as v4f64 because the QPX register integer states are not 8628 // explicitly represented. 8629 8630 SDValue Ops[] = {StoreChain, 8631 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 8632 FIdx}; 8633 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 8634 8635 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 8636 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8637 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8638 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 8639 LoadedVect); 8640 8641 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 8642 8643 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 8644 } 8645 8646 // All other QPX vectors are handled by generic code. 8647 if (Subtarget.hasQPX()) 8648 return SDValue(); 8649 8650 // Check if this is a splat of a constant value. 8651 APInt APSplatBits, APSplatUndef; 8652 unsigned SplatBitSize; 8653 bool HasAnyUndefs; 8654 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 8655 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 8656 SplatBitSize > 32) { 8657 8658 const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0)); 8659 // Handle load-and-splat patterns as we have instructions that will do this 8660 // in one go. 8661 if (InputLoad && DAG.isSplatValue(Op, true)) { 8662 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8663 8664 // We have handling for 4 and 8 byte elements. 8665 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits(); 8666 8667 // Checking for a single use of this load, we have to check for vector 8668 // width (128 bits) / ElementSize uses (since each operand of the 8669 // BUILD_VECTOR is a separate use of the value. 8670 if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) && 8671 ((Subtarget.hasVSX() && ElementSize == 64) || 8672 (Subtarget.hasP9Vector() && ElementSize == 32))) { 8673 SDValue Ops[] = { 8674 LD->getChain(), // Chain 8675 LD->getBasePtr(), // Ptr 8676 DAG.getValueType(Op.getValueType()) // VT 8677 }; 8678 return 8679 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, 8680 DAG.getVTList(Op.getValueType(), MVT::Other), 8681 Ops, LD->getMemoryVT(), LD->getMemOperand()); 8682 } 8683 } 8684 8685 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 8686 // lowered to VSX instructions under certain conditions. 8687 // Without VSX, there is no pattern more efficient than expanding the node. 8688 if (Subtarget.hasVSX() && 8689 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 8690 Subtarget.hasP8Vector())) 8691 return Op; 8692 return SDValue(); 8693 } 8694 8695 unsigned SplatBits = APSplatBits.getZExtValue(); 8696 unsigned SplatUndef = APSplatUndef.getZExtValue(); 8697 unsigned SplatSize = SplatBitSize / 8; 8698 8699 // First, handle single instruction cases. 8700 8701 // All zeros? 8702 if (SplatBits == 0) { 8703 // Canonicalize all zero vectors to be v4i32. 8704 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 8705 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 8706 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 8707 } 8708 return Op; 8709 } 8710 8711 // We have XXSPLTIB for constant splats one byte wide 8712 // FIXME: SplatBits is an unsigned int being cast to an int while passing it 8713 // as an argument to BuildSplatiI. Given SplatSize == 1 it is okay here. 8714 if (Subtarget.hasP9Vector() && SplatSize == 1) 8715 return BuildSplatI(SplatBits, SplatSize, Op.getValueType(), DAG, dl); 8716 8717 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 8718 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 8719 (32-SplatBitSize)); 8720 if (SextVal >= -16 && SextVal <= 15) 8721 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 8722 8723 // Two instruction sequences. 8724 8725 // If this value is in the range [-32,30] and is even, use: 8726 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 8727 // If this value is in the range [17,31] and is odd, use: 8728 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 8729 // If this value is in the range [-31,-17] and is odd, use: 8730 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 8731 // Note the last two are three-instruction sequences. 8732 if (SextVal >= -32 && SextVal <= 31) { 8733 // To avoid having these optimizations undone by constant folding, 8734 // we convert to a pseudo that will be expanded later into one of 8735 // the above forms. 8736 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 8737 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 8738 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 8739 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 8740 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 8741 if (VT == Op.getValueType()) 8742 return RetVal; 8743 else 8744 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 8745 } 8746 8747 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 8748 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 8749 // for fneg/fabs. 8750 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 8751 // Make -1 and vspltisw -1: 8752 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 8753 8754 // Make the VSLW intrinsic, computing 0x8000_0000. 8755 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 8756 OnesV, DAG, dl); 8757 8758 // xor by OnesV to invert it. 8759 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 8760 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8761 } 8762 8763 // Check to see if this is a wide variety of vsplti*, binop self cases. 8764 static const signed char SplatCsts[] = { 8765 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 8766 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 8767 }; 8768 8769 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 8770 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 8771 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 8772 int i = SplatCsts[idx]; 8773 8774 // Figure out what shift amount will be used by altivec if shifted by i in 8775 // this splat size. 8776 unsigned TypeShiftAmt = i & (SplatBitSize-1); 8777 8778 // vsplti + shl self. 8779 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 8780 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8781 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8782 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 8783 Intrinsic::ppc_altivec_vslw 8784 }; 8785 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8786 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8787 } 8788 8789 // vsplti + srl self. 8790 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8791 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8792 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8793 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 8794 Intrinsic::ppc_altivec_vsrw 8795 }; 8796 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8797 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8798 } 8799 8800 // vsplti + sra self. 8801 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8802 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8803 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8804 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 8805 Intrinsic::ppc_altivec_vsraw 8806 }; 8807 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8808 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8809 } 8810 8811 // vsplti + rol self. 8812 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 8813 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 8814 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8815 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8816 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 8817 Intrinsic::ppc_altivec_vrlw 8818 }; 8819 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8820 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8821 } 8822 8823 // t = vsplti c, result = vsldoi t, t, 1 8824 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 8825 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8826 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 8827 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8828 } 8829 // t = vsplti c, result = vsldoi t, t, 2 8830 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 8831 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8832 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 8833 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8834 } 8835 // t = vsplti c, result = vsldoi t, t, 3 8836 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 8837 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8838 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 8839 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8840 } 8841 } 8842 8843 return SDValue(); 8844 } 8845 8846 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 8847 /// the specified operations to build the shuffle. 8848 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 8849 SDValue RHS, SelectionDAG &DAG, 8850 const SDLoc &dl) { 8851 unsigned OpNum = (PFEntry >> 26) & 0x0F; 8852 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8853 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8854 8855 enum { 8856 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 8857 OP_VMRGHW, 8858 OP_VMRGLW, 8859 OP_VSPLTISW0, 8860 OP_VSPLTISW1, 8861 OP_VSPLTISW2, 8862 OP_VSPLTISW3, 8863 OP_VSLDOI4, 8864 OP_VSLDOI8, 8865 OP_VSLDOI12 8866 }; 8867 8868 if (OpNum == OP_COPY) { 8869 if (LHSID == (1*9+2)*9+3) return LHS; 8870 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 8871 return RHS; 8872 } 8873 8874 SDValue OpLHS, OpRHS; 8875 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 8876 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 8877 8878 int ShufIdxs[16]; 8879 switch (OpNum) { 8880 default: llvm_unreachable("Unknown i32 permute!"); 8881 case OP_VMRGHW: 8882 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 8883 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 8884 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 8885 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 8886 break; 8887 case OP_VMRGLW: 8888 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 8889 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 8890 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 8891 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 8892 break; 8893 case OP_VSPLTISW0: 8894 for (unsigned i = 0; i != 16; ++i) 8895 ShufIdxs[i] = (i&3)+0; 8896 break; 8897 case OP_VSPLTISW1: 8898 for (unsigned i = 0; i != 16; ++i) 8899 ShufIdxs[i] = (i&3)+4; 8900 break; 8901 case OP_VSPLTISW2: 8902 for (unsigned i = 0; i != 16; ++i) 8903 ShufIdxs[i] = (i&3)+8; 8904 break; 8905 case OP_VSPLTISW3: 8906 for (unsigned i = 0; i != 16; ++i) 8907 ShufIdxs[i] = (i&3)+12; 8908 break; 8909 case OP_VSLDOI4: 8910 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 8911 case OP_VSLDOI8: 8912 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 8913 case OP_VSLDOI12: 8914 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 8915 } 8916 EVT VT = OpLHS.getValueType(); 8917 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 8918 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 8919 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 8920 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8921 } 8922 8923 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 8924 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 8925 /// SDValue. 8926 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 8927 SelectionDAG &DAG) const { 8928 const unsigned BytesInVector = 16; 8929 bool IsLE = Subtarget.isLittleEndian(); 8930 SDLoc dl(N); 8931 SDValue V1 = N->getOperand(0); 8932 SDValue V2 = N->getOperand(1); 8933 unsigned ShiftElts = 0, InsertAtByte = 0; 8934 bool Swap = false; 8935 8936 // Shifts required to get the byte we want at element 7. 8937 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 8938 0, 15, 14, 13, 12, 11, 10, 9}; 8939 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 8940 1, 2, 3, 4, 5, 6, 7, 8}; 8941 8942 ArrayRef<int> Mask = N->getMask(); 8943 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 8944 8945 // For each mask element, find out if we're just inserting something 8946 // from V2 into V1 or vice versa. 8947 // Possible permutations inserting an element from V2 into V1: 8948 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8949 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8950 // ... 8951 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 8952 // Inserting from V1 into V2 will be similar, except mask range will be 8953 // [16,31]. 8954 8955 bool FoundCandidate = false; 8956 // If both vector operands for the shuffle are the same vector, the mask 8957 // will contain only elements from the first one and the second one will be 8958 // undef. 8959 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 8960 // Go through the mask of half-words to find an element that's being moved 8961 // from one vector to the other. 8962 for (unsigned i = 0; i < BytesInVector; ++i) { 8963 unsigned CurrentElement = Mask[i]; 8964 // If 2nd operand is undefined, we should only look for element 7 in the 8965 // Mask. 8966 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 8967 continue; 8968 8969 bool OtherElementsInOrder = true; 8970 // Examine the other elements in the Mask to see if they're in original 8971 // order. 8972 for (unsigned j = 0; j < BytesInVector; ++j) { 8973 if (j == i) 8974 continue; 8975 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 8976 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 8977 // in which we always assume we're always picking from the 1st operand. 8978 int MaskOffset = 8979 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 8980 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 8981 OtherElementsInOrder = false; 8982 break; 8983 } 8984 } 8985 // If other elements are in original order, we record the number of shifts 8986 // we need to get the element we want into element 7. Also record which byte 8987 // in the vector we should insert into. 8988 if (OtherElementsInOrder) { 8989 // If 2nd operand is undefined, we assume no shifts and no swapping. 8990 if (V2.isUndef()) { 8991 ShiftElts = 0; 8992 Swap = false; 8993 } else { 8994 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 8995 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 8996 : BigEndianShifts[CurrentElement & 0xF]; 8997 Swap = CurrentElement < BytesInVector; 8998 } 8999 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 9000 FoundCandidate = true; 9001 break; 9002 } 9003 } 9004 9005 if (!FoundCandidate) 9006 return SDValue(); 9007 9008 // Candidate found, construct the proper SDAG sequence with VINSERTB, 9009 // optionally with VECSHL if shift is required. 9010 if (Swap) 9011 std::swap(V1, V2); 9012 if (V2.isUndef()) 9013 V2 = V1; 9014 if (ShiftElts) { 9015 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9016 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9017 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 9018 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9019 } 9020 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 9021 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9022 } 9023 9024 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 9025 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 9026 /// SDValue. 9027 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 9028 SelectionDAG &DAG) const { 9029 const unsigned NumHalfWords = 8; 9030 const unsigned BytesInVector = NumHalfWords * 2; 9031 // Check that the shuffle is on half-words. 9032 if (!isNByteElemShuffleMask(N, 2, 1)) 9033 return SDValue(); 9034 9035 bool IsLE = Subtarget.isLittleEndian(); 9036 SDLoc dl(N); 9037 SDValue V1 = N->getOperand(0); 9038 SDValue V2 = N->getOperand(1); 9039 unsigned ShiftElts = 0, InsertAtByte = 0; 9040 bool Swap = false; 9041 9042 // Shifts required to get the half-word we want at element 3. 9043 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 9044 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 9045 9046 uint32_t Mask = 0; 9047 uint32_t OriginalOrderLow = 0x1234567; 9048 uint32_t OriginalOrderHigh = 0x89ABCDEF; 9049 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 9050 // 32-bit space, only need 4-bit nibbles per element. 9051 for (unsigned i = 0; i < NumHalfWords; ++i) { 9052 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9053 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 9054 } 9055 9056 // For each mask element, find out if we're just inserting something 9057 // from V2 into V1 or vice versa. Possible permutations inserting an element 9058 // from V2 into V1: 9059 // X, 1, 2, 3, 4, 5, 6, 7 9060 // 0, X, 2, 3, 4, 5, 6, 7 9061 // 0, 1, X, 3, 4, 5, 6, 7 9062 // 0, 1, 2, X, 4, 5, 6, 7 9063 // 0, 1, 2, 3, X, 5, 6, 7 9064 // 0, 1, 2, 3, 4, X, 6, 7 9065 // 0, 1, 2, 3, 4, 5, X, 7 9066 // 0, 1, 2, 3, 4, 5, 6, X 9067 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 9068 9069 bool FoundCandidate = false; 9070 // Go through the mask of half-words to find an element that's being moved 9071 // from one vector to the other. 9072 for (unsigned i = 0; i < NumHalfWords; ++i) { 9073 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9074 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 9075 uint32_t MaskOtherElts = ~(0xF << MaskShift); 9076 uint32_t TargetOrder = 0x0; 9077 9078 // If both vector operands for the shuffle are the same vector, the mask 9079 // will contain only elements from the first one and the second one will be 9080 // undef. 9081 if (V2.isUndef()) { 9082 ShiftElts = 0; 9083 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 9084 TargetOrder = OriginalOrderLow; 9085 Swap = false; 9086 // Skip if not the correct element or mask of other elements don't equal 9087 // to our expected order. 9088 if (MaskOneElt == VINSERTHSrcElem && 9089 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9090 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9091 FoundCandidate = true; 9092 break; 9093 } 9094 } else { // If both operands are defined. 9095 // Target order is [8,15] if the current mask is between [0,7]. 9096 TargetOrder = 9097 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 9098 // Skip if mask of other elements don't equal our expected order. 9099 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9100 // We only need the last 3 bits for the number of shifts. 9101 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 9102 : BigEndianShifts[MaskOneElt & 0x7]; 9103 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9104 Swap = MaskOneElt < NumHalfWords; 9105 FoundCandidate = true; 9106 break; 9107 } 9108 } 9109 } 9110 9111 if (!FoundCandidate) 9112 return SDValue(); 9113 9114 // Candidate found, construct the proper SDAG sequence with VINSERTH, 9115 // optionally with VECSHL if shift is required. 9116 if (Swap) 9117 std::swap(V1, V2); 9118 if (V2.isUndef()) 9119 V2 = V1; 9120 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9121 if (ShiftElts) { 9122 // Double ShiftElts because we're left shifting on v16i8 type. 9123 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9124 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 9125 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 9126 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9127 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9128 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9129 } 9130 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 9131 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9132 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9133 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9134 } 9135 9136 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 9137 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 9138 /// return the code it can be lowered into. Worst case, it can always be 9139 /// lowered into a vperm. 9140 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 9141 SelectionDAG &DAG) const { 9142 SDLoc dl(Op); 9143 SDValue V1 = Op.getOperand(0); 9144 SDValue V2 = Op.getOperand(1); 9145 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 9146 EVT VT = Op.getValueType(); 9147 bool isLittleEndian = Subtarget.isLittleEndian(); 9148 9149 unsigned ShiftElts, InsertAtByte; 9150 bool Swap = false; 9151 9152 // If this is a load-and-splat, we can do that with a single instruction 9153 // in some cases. However if the load has multiple uses, we don't want to 9154 // combine it because that will just produce multiple loads. 9155 const SDValue *InputLoad = getNormalLoadInput(V1); 9156 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() && 9157 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) && 9158 InputLoad->hasOneUse()) { 9159 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4); 9160 int SplatIdx = 9161 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG); 9162 9163 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 9164 // For 4-byte load-and-splat, we need Power9. 9165 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) { 9166 uint64_t Offset = 0; 9167 if (IsFourByte) 9168 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4; 9169 else 9170 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8; 9171 SDValue BasePtr = LD->getBasePtr(); 9172 if (Offset != 0) 9173 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 9174 BasePtr, DAG.getIntPtrConstant(Offset, dl)); 9175 SDValue Ops[] = { 9176 LD->getChain(), // Chain 9177 BasePtr, // BasePtr 9178 DAG.getValueType(Op.getValueType()) // VT 9179 }; 9180 SDVTList VTL = 9181 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other); 9182 SDValue LdSplt = 9183 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL, 9184 Ops, LD->getMemoryVT(), LD->getMemOperand()); 9185 if (LdSplt.getValueType() != SVOp->getValueType(0)) 9186 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt); 9187 return LdSplt; 9188 } 9189 } 9190 if (Subtarget.hasP9Vector() && 9191 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 9192 isLittleEndian)) { 9193 if (Swap) 9194 std::swap(V1, V2); 9195 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9196 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 9197 if (ShiftElts) { 9198 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 9199 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9200 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 9201 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9202 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9203 } 9204 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 9205 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9206 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9207 } 9208 9209 if (Subtarget.hasP9Altivec()) { 9210 SDValue NewISDNode; 9211 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 9212 return NewISDNode; 9213 9214 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 9215 return NewISDNode; 9216 } 9217 9218 if (Subtarget.hasVSX() && 9219 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9220 if (Swap) 9221 std::swap(V1, V2); 9222 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9223 SDValue Conv2 = 9224 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 9225 9226 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 9227 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9228 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 9229 } 9230 9231 if (Subtarget.hasVSX() && 9232 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9233 if (Swap) 9234 std::swap(V1, V2); 9235 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9236 SDValue Conv2 = 9237 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 9238 9239 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 9240 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9241 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 9242 } 9243 9244 if (Subtarget.hasP9Vector()) { 9245 if (PPC::isXXBRHShuffleMask(SVOp)) { 9246 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9247 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv); 9248 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 9249 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 9250 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9251 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv); 9252 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 9253 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 9254 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9255 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv); 9256 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 9257 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 9258 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 9259 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv); 9260 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 9261 } 9262 } 9263 9264 if (Subtarget.hasVSX()) { 9265 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 9266 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG); 9267 9268 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9269 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 9270 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9271 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 9272 } 9273 9274 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 9275 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 9276 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 9277 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 9278 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 9279 } 9280 } 9281 9282 if (Subtarget.hasQPX()) { 9283 if (VT.getVectorNumElements() != 4) 9284 return SDValue(); 9285 9286 if (V2.isUndef()) V2 = V1; 9287 9288 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 9289 if (AlignIdx != -1) { 9290 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 9291 DAG.getConstant(AlignIdx, dl, MVT::i32)); 9292 } else if (SVOp->isSplat()) { 9293 int SplatIdx = SVOp->getSplatIndex(); 9294 if (SplatIdx >= 4) { 9295 std::swap(V1, V2); 9296 SplatIdx -= 4; 9297 } 9298 9299 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 9300 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9301 } 9302 9303 // Lower this into a qvgpci/qvfperm pair. 9304 9305 // Compute the qvgpci literal 9306 unsigned idx = 0; 9307 for (unsigned i = 0; i < 4; ++i) { 9308 int m = SVOp->getMaskElt(i); 9309 unsigned mm = m >= 0 ? (unsigned) m : i; 9310 idx |= mm << (3-i)*3; 9311 } 9312 9313 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 9314 DAG.getConstant(idx, dl, MVT::i32)); 9315 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 9316 } 9317 9318 // Cases that are handled by instructions that take permute immediates 9319 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 9320 // selected by the instruction selector. 9321 if (V2.isUndef()) { 9322 if (PPC::isSplatShuffleMask(SVOp, 1) || 9323 PPC::isSplatShuffleMask(SVOp, 2) || 9324 PPC::isSplatShuffleMask(SVOp, 4) || 9325 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 9326 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 9327 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 9328 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 9329 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 9330 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 9331 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 9332 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 9333 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 9334 (Subtarget.hasP8Altivec() && ( 9335 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 9336 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 9337 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 9338 return Op; 9339 } 9340 } 9341 9342 // Altivec has a variety of "shuffle immediates" that take two vector inputs 9343 // and produce a fixed permutation. If any of these match, do not lower to 9344 // VPERM. 9345 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 9346 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 9347 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 9348 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 9349 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9350 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9351 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9352 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9353 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9354 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9355 (Subtarget.hasP8Altivec() && ( 9356 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 9357 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 9358 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 9359 return Op; 9360 9361 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 9362 // perfect shuffle table to emit an optimal matching sequence. 9363 ArrayRef<int> PermMask = SVOp->getMask(); 9364 9365 unsigned PFIndexes[4]; 9366 bool isFourElementShuffle = true; 9367 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 9368 unsigned EltNo = 8; // Start out undef. 9369 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 9370 if (PermMask[i*4+j] < 0) 9371 continue; // Undef, ignore it. 9372 9373 unsigned ByteSource = PermMask[i*4+j]; 9374 if ((ByteSource & 3) != j) { 9375 isFourElementShuffle = false; 9376 break; 9377 } 9378 9379 if (EltNo == 8) { 9380 EltNo = ByteSource/4; 9381 } else if (EltNo != ByteSource/4) { 9382 isFourElementShuffle = false; 9383 break; 9384 } 9385 } 9386 PFIndexes[i] = EltNo; 9387 } 9388 9389 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 9390 // perfect shuffle vector to determine if it is cost effective to do this as 9391 // discrete instructions, or whether we should use a vperm. 9392 // For now, we skip this for little endian until such time as we have a 9393 // little-endian perfect shuffle table. 9394 if (isFourElementShuffle && !isLittleEndian) { 9395 // Compute the index in the perfect shuffle table. 9396 unsigned PFTableIndex = 9397 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 9398 9399 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 9400 unsigned Cost = (PFEntry >> 30); 9401 9402 // Determining when to avoid vperm is tricky. Many things affect the cost 9403 // of vperm, particularly how many times the perm mask needs to be computed. 9404 // For example, if the perm mask can be hoisted out of a loop or is already 9405 // used (perhaps because there are multiple permutes with the same shuffle 9406 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 9407 // the loop requires an extra register. 9408 // 9409 // As a compromise, we only emit discrete instructions if the shuffle can be 9410 // generated in 3 or fewer operations. When we have loop information 9411 // available, if this block is within a loop, we should avoid using vperm 9412 // for 3-operation perms and use a constant pool load instead. 9413 if (Cost < 3) 9414 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 9415 } 9416 9417 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 9418 // vector that will get spilled to the constant pool. 9419 if (V2.isUndef()) V2 = V1; 9420 9421 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 9422 // that it is in input element units, not in bytes. Convert now. 9423 9424 // For little endian, the order of the input vectors is reversed, and 9425 // the permutation mask is complemented with respect to 31. This is 9426 // necessary to produce proper semantics with the big-endian-biased vperm 9427 // instruction. 9428 EVT EltVT = V1.getValueType().getVectorElementType(); 9429 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 9430 9431 SmallVector<SDValue, 16> ResultMask; 9432 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 9433 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 9434 9435 for (unsigned j = 0; j != BytesPerElement; ++j) 9436 if (isLittleEndian) 9437 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 9438 dl, MVT::i32)); 9439 else 9440 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 9441 MVT::i32)); 9442 } 9443 9444 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 9445 if (isLittleEndian) 9446 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9447 V2, V1, VPermMask); 9448 else 9449 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9450 V1, V2, VPermMask); 9451 } 9452 9453 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 9454 /// vector comparison. If it is, return true and fill in Opc/isDot with 9455 /// information about the intrinsic. 9456 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 9457 bool &isDot, const PPCSubtarget &Subtarget) { 9458 unsigned IntrinsicID = 9459 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 9460 CompareOpc = -1; 9461 isDot = false; 9462 switch (IntrinsicID) { 9463 default: 9464 return false; 9465 // Comparison predicates. 9466 case Intrinsic::ppc_altivec_vcmpbfp_p: 9467 CompareOpc = 966; 9468 isDot = true; 9469 break; 9470 case Intrinsic::ppc_altivec_vcmpeqfp_p: 9471 CompareOpc = 198; 9472 isDot = true; 9473 break; 9474 case Intrinsic::ppc_altivec_vcmpequb_p: 9475 CompareOpc = 6; 9476 isDot = true; 9477 break; 9478 case Intrinsic::ppc_altivec_vcmpequh_p: 9479 CompareOpc = 70; 9480 isDot = true; 9481 break; 9482 case Intrinsic::ppc_altivec_vcmpequw_p: 9483 CompareOpc = 134; 9484 isDot = true; 9485 break; 9486 case Intrinsic::ppc_altivec_vcmpequd_p: 9487 if (Subtarget.hasP8Altivec()) { 9488 CompareOpc = 199; 9489 isDot = true; 9490 } else 9491 return false; 9492 break; 9493 case Intrinsic::ppc_altivec_vcmpneb_p: 9494 case Intrinsic::ppc_altivec_vcmpneh_p: 9495 case Intrinsic::ppc_altivec_vcmpnew_p: 9496 case Intrinsic::ppc_altivec_vcmpnezb_p: 9497 case Intrinsic::ppc_altivec_vcmpnezh_p: 9498 case Intrinsic::ppc_altivec_vcmpnezw_p: 9499 if (Subtarget.hasP9Altivec()) { 9500 switch (IntrinsicID) { 9501 default: 9502 llvm_unreachable("Unknown comparison intrinsic."); 9503 case Intrinsic::ppc_altivec_vcmpneb_p: 9504 CompareOpc = 7; 9505 break; 9506 case Intrinsic::ppc_altivec_vcmpneh_p: 9507 CompareOpc = 71; 9508 break; 9509 case Intrinsic::ppc_altivec_vcmpnew_p: 9510 CompareOpc = 135; 9511 break; 9512 case Intrinsic::ppc_altivec_vcmpnezb_p: 9513 CompareOpc = 263; 9514 break; 9515 case Intrinsic::ppc_altivec_vcmpnezh_p: 9516 CompareOpc = 327; 9517 break; 9518 case Intrinsic::ppc_altivec_vcmpnezw_p: 9519 CompareOpc = 391; 9520 break; 9521 } 9522 isDot = true; 9523 } else 9524 return false; 9525 break; 9526 case Intrinsic::ppc_altivec_vcmpgefp_p: 9527 CompareOpc = 454; 9528 isDot = true; 9529 break; 9530 case Intrinsic::ppc_altivec_vcmpgtfp_p: 9531 CompareOpc = 710; 9532 isDot = true; 9533 break; 9534 case Intrinsic::ppc_altivec_vcmpgtsb_p: 9535 CompareOpc = 774; 9536 isDot = true; 9537 break; 9538 case Intrinsic::ppc_altivec_vcmpgtsh_p: 9539 CompareOpc = 838; 9540 isDot = true; 9541 break; 9542 case Intrinsic::ppc_altivec_vcmpgtsw_p: 9543 CompareOpc = 902; 9544 isDot = true; 9545 break; 9546 case Intrinsic::ppc_altivec_vcmpgtsd_p: 9547 if (Subtarget.hasP8Altivec()) { 9548 CompareOpc = 967; 9549 isDot = true; 9550 } else 9551 return false; 9552 break; 9553 case Intrinsic::ppc_altivec_vcmpgtub_p: 9554 CompareOpc = 518; 9555 isDot = true; 9556 break; 9557 case Intrinsic::ppc_altivec_vcmpgtuh_p: 9558 CompareOpc = 582; 9559 isDot = true; 9560 break; 9561 case Intrinsic::ppc_altivec_vcmpgtuw_p: 9562 CompareOpc = 646; 9563 isDot = true; 9564 break; 9565 case Intrinsic::ppc_altivec_vcmpgtud_p: 9566 if (Subtarget.hasP8Altivec()) { 9567 CompareOpc = 711; 9568 isDot = true; 9569 } else 9570 return false; 9571 break; 9572 9573 // VSX predicate comparisons use the same infrastructure 9574 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9575 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9576 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9577 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9578 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9579 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9580 if (Subtarget.hasVSX()) { 9581 switch (IntrinsicID) { 9582 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9583 CompareOpc = 99; 9584 break; 9585 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9586 CompareOpc = 115; 9587 break; 9588 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9589 CompareOpc = 107; 9590 break; 9591 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9592 CompareOpc = 67; 9593 break; 9594 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9595 CompareOpc = 83; 9596 break; 9597 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9598 CompareOpc = 75; 9599 break; 9600 } 9601 isDot = true; 9602 } else 9603 return false; 9604 break; 9605 9606 // Normal Comparisons. 9607 case Intrinsic::ppc_altivec_vcmpbfp: 9608 CompareOpc = 966; 9609 break; 9610 case Intrinsic::ppc_altivec_vcmpeqfp: 9611 CompareOpc = 198; 9612 break; 9613 case Intrinsic::ppc_altivec_vcmpequb: 9614 CompareOpc = 6; 9615 break; 9616 case Intrinsic::ppc_altivec_vcmpequh: 9617 CompareOpc = 70; 9618 break; 9619 case Intrinsic::ppc_altivec_vcmpequw: 9620 CompareOpc = 134; 9621 break; 9622 case Intrinsic::ppc_altivec_vcmpequd: 9623 if (Subtarget.hasP8Altivec()) 9624 CompareOpc = 199; 9625 else 9626 return false; 9627 break; 9628 case Intrinsic::ppc_altivec_vcmpneb: 9629 case Intrinsic::ppc_altivec_vcmpneh: 9630 case Intrinsic::ppc_altivec_vcmpnew: 9631 case Intrinsic::ppc_altivec_vcmpnezb: 9632 case Intrinsic::ppc_altivec_vcmpnezh: 9633 case Intrinsic::ppc_altivec_vcmpnezw: 9634 if (Subtarget.hasP9Altivec()) 9635 switch (IntrinsicID) { 9636 default: 9637 llvm_unreachable("Unknown comparison intrinsic."); 9638 case Intrinsic::ppc_altivec_vcmpneb: 9639 CompareOpc = 7; 9640 break; 9641 case Intrinsic::ppc_altivec_vcmpneh: 9642 CompareOpc = 71; 9643 break; 9644 case Intrinsic::ppc_altivec_vcmpnew: 9645 CompareOpc = 135; 9646 break; 9647 case Intrinsic::ppc_altivec_vcmpnezb: 9648 CompareOpc = 263; 9649 break; 9650 case Intrinsic::ppc_altivec_vcmpnezh: 9651 CompareOpc = 327; 9652 break; 9653 case Intrinsic::ppc_altivec_vcmpnezw: 9654 CompareOpc = 391; 9655 break; 9656 } 9657 else 9658 return false; 9659 break; 9660 case Intrinsic::ppc_altivec_vcmpgefp: 9661 CompareOpc = 454; 9662 break; 9663 case Intrinsic::ppc_altivec_vcmpgtfp: 9664 CompareOpc = 710; 9665 break; 9666 case Intrinsic::ppc_altivec_vcmpgtsb: 9667 CompareOpc = 774; 9668 break; 9669 case Intrinsic::ppc_altivec_vcmpgtsh: 9670 CompareOpc = 838; 9671 break; 9672 case Intrinsic::ppc_altivec_vcmpgtsw: 9673 CompareOpc = 902; 9674 break; 9675 case Intrinsic::ppc_altivec_vcmpgtsd: 9676 if (Subtarget.hasP8Altivec()) 9677 CompareOpc = 967; 9678 else 9679 return false; 9680 break; 9681 case Intrinsic::ppc_altivec_vcmpgtub: 9682 CompareOpc = 518; 9683 break; 9684 case Intrinsic::ppc_altivec_vcmpgtuh: 9685 CompareOpc = 582; 9686 break; 9687 case Intrinsic::ppc_altivec_vcmpgtuw: 9688 CompareOpc = 646; 9689 break; 9690 case Intrinsic::ppc_altivec_vcmpgtud: 9691 if (Subtarget.hasP8Altivec()) 9692 CompareOpc = 711; 9693 else 9694 return false; 9695 break; 9696 } 9697 return true; 9698 } 9699 9700 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 9701 /// lower, do it, otherwise return null. 9702 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 9703 SelectionDAG &DAG) const { 9704 unsigned IntrinsicID = 9705 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9706 9707 SDLoc dl(Op); 9708 9709 if (IntrinsicID == Intrinsic::thread_pointer) { 9710 // Reads the thread pointer register, used for __builtin_thread_pointer. 9711 if (Subtarget.isPPC64()) 9712 return DAG.getRegister(PPC::X13, MVT::i64); 9713 return DAG.getRegister(PPC::R2, MVT::i32); 9714 } 9715 9716 // If this is a lowered altivec predicate compare, CompareOpc is set to the 9717 // opcode number of the comparison. 9718 int CompareOpc; 9719 bool isDot; 9720 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 9721 return SDValue(); // Don't custom lower most intrinsics. 9722 9723 // If this is a non-dot comparison, make the VCMP node and we are done. 9724 if (!isDot) { 9725 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 9726 Op.getOperand(1), Op.getOperand(2), 9727 DAG.getConstant(CompareOpc, dl, MVT::i32)); 9728 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 9729 } 9730 9731 // Create the PPCISD altivec 'dot' comparison node. 9732 SDValue Ops[] = { 9733 Op.getOperand(2), // LHS 9734 Op.getOperand(3), // RHS 9735 DAG.getConstant(CompareOpc, dl, MVT::i32) 9736 }; 9737 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 9738 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 9739 9740 // Now that we have the comparison, emit a copy from the CR to a GPR. 9741 // This is flagged to the above dot comparison. 9742 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 9743 DAG.getRegister(PPC::CR6, MVT::i32), 9744 CompNode.getValue(1)); 9745 9746 // Unpack the result based on how the target uses it. 9747 unsigned BitNo; // Bit # of CR6. 9748 bool InvertBit; // Invert result? 9749 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 9750 default: // Can't happen, don't crash on invalid number though. 9751 case 0: // Return the value of the EQ bit of CR6. 9752 BitNo = 0; InvertBit = false; 9753 break; 9754 case 1: // Return the inverted value of the EQ bit of CR6. 9755 BitNo = 0; InvertBit = true; 9756 break; 9757 case 2: // Return the value of the LT bit of CR6. 9758 BitNo = 2; InvertBit = false; 9759 break; 9760 case 3: // Return the inverted value of the LT bit of CR6. 9761 BitNo = 2; InvertBit = true; 9762 break; 9763 } 9764 9765 // Shift the bit into the low position. 9766 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 9767 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 9768 // Isolate the bit. 9769 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 9770 DAG.getConstant(1, dl, MVT::i32)); 9771 9772 // If we are supposed to, toggle the bit. 9773 if (InvertBit) 9774 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 9775 DAG.getConstant(1, dl, MVT::i32)); 9776 return Flags; 9777 } 9778 9779 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 9780 SelectionDAG &DAG) const { 9781 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 9782 // the beginning of the argument list. 9783 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 9784 SDLoc DL(Op); 9785 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 9786 case Intrinsic::ppc_cfence: { 9787 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 9788 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 9789 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 9790 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 9791 Op.getOperand(ArgStart + 1)), 9792 Op.getOperand(0)), 9793 0); 9794 } 9795 default: 9796 break; 9797 } 9798 return SDValue(); 9799 } 9800 9801 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 9802 // Check for a DIV with the same operands as this REM. 9803 for (auto UI : Op.getOperand(1)->uses()) { 9804 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 9805 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 9806 if (UI->getOperand(0) == Op.getOperand(0) && 9807 UI->getOperand(1) == Op.getOperand(1)) 9808 return SDValue(); 9809 } 9810 return Op; 9811 } 9812 9813 // Lower scalar BSWAP64 to xxbrd. 9814 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 9815 SDLoc dl(Op); 9816 // MTVSRDD 9817 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 9818 Op.getOperand(0)); 9819 // XXBRD 9820 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op); 9821 // MFVSRD 9822 int VectorIndex = 0; 9823 if (Subtarget.isLittleEndian()) 9824 VectorIndex = 1; 9825 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 9826 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 9827 return Op; 9828 } 9829 9830 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 9831 // compared to a value that is atomically loaded (atomic loads zero-extend). 9832 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 9833 SelectionDAG &DAG) const { 9834 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 9835 "Expecting an atomic compare-and-swap here."); 9836 SDLoc dl(Op); 9837 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 9838 EVT MemVT = AtomicNode->getMemoryVT(); 9839 if (MemVT.getSizeInBits() >= 32) 9840 return Op; 9841 9842 SDValue CmpOp = Op.getOperand(2); 9843 // If this is already correctly zero-extended, leave it alone. 9844 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 9845 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 9846 return Op; 9847 9848 // Clear the high bits of the compare operand. 9849 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 9850 SDValue NewCmpOp = 9851 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 9852 DAG.getConstant(MaskVal, dl, MVT::i32)); 9853 9854 // Replace the existing compare operand with the properly zero-extended one. 9855 SmallVector<SDValue, 4> Ops; 9856 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 9857 Ops.push_back(AtomicNode->getOperand(i)); 9858 Ops[2] = NewCmpOp; 9859 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 9860 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 9861 auto NodeTy = 9862 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 9863 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 9864 } 9865 9866 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 9867 SelectionDAG &DAG) const { 9868 SDLoc dl(Op); 9869 // Create a stack slot that is 16-byte aligned. 9870 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9871 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9872 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9873 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9874 9875 // Store the input value into Value#0 of the stack slot. 9876 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 9877 MachinePointerInfo()); 9878 // Load it out. 9879 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 9880 } 9881 9882 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 9883 SelectionDAG &DAG) const { 9884 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 9885 "Should only be called for ISD::INSERT_VECTOR_ELT"); 9886 9887 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 9888 // We have legal lowering for constant indices but not for variable ones. 9889 if (!C) 9890 return SDValue(); 9891 9892 EVT VT = Op.getValueType(); 9893 SDLoc dl(Op); 9894 SDValue V1 = Op.getOperand(0); 9895 SDValue V2 = Op.getOperand(1); 9896 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 9897 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 9898 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 9899 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 9900 unsigned InsertAtElement = C->getZExtValue(); 9901 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 9902 if (Subtarget.isLittleEndian()) { 9903 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 9904 } 9905 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 9906 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9907 } 9908 return Op; 9909 } 9910 9911 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 9912 SelectionDAG &DAG) const { 9913 SDLoc dl(Op); 9914 SDNode *N = Op.getNode(); 9915 9916 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 9917 "Unknown extract_vector_elt type"); 9918 9919 SDValue Value = N->getOperand(0); 9920 9921 // The first part of this is like the store lowering except that we don't 9922 // need to track the chain. 9923 9924 // The values are now known to be -1 (false) or 1 (true). To convert this 9925 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9926 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9927 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9928 9929 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9930 // understand how to form the extending load. 9931 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9932 9933 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9934 9935 // Now convert to an integer and store. 9936 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9937 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9938 Value); 9939 9940 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9941 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9942 MachinePointerInfo PtrInfo = 9943 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9944 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9945 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9946 9947 SDValue StoreChain = DAG.getEntryNode(); 9948 SDValue Ops[] = {StoreChain, 9949 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9950 Value, FIdx}; 9951 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9952 9953 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9954 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9955 9956 // Extract the value requested. 9957 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9958 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9959 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9960 9961 SDValue IntVal = 9962 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 9963 9964 if (!Subtarget.useCRBits()) 9965 return IntVal; 9966 9967 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 9968 } 9969 9970 /// Lowering for QPX v4i1 loads 9971 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 9972 SelectionDAG &DAG) const { 9973 SDLoc dl(Op); 9974 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 9975 SDValue LoadChain = LN->getChain(); 9976 SDValue BasePtr = LN->getBasePtr(); 9977 9978 if (Op.getValueType() == MVT::v4f64 || 9979 Op.getValueType() == MVT::v4f32) { 9980 EVT MemVT = LN->getMemoryVT(); 9981 unsigned Alignment = LN->getAlignment(); 9982 9983 // If this load is properly aligned, then it is legal. 9984 if (Alignment >= MemVT.getStoreSize()) 9985 return Op; 9986 9987 EVT ScalarVT = Op.getValueType().getScalarType(), 9988 ScalarMemVT = MemVT.getScalarType(); 9989 unsigned Stride = ScalarMemVT.getStoreSize(); 9990 9991 SDValue Vals[4], LoadChains[4]; 9992 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9993 SDValue Load; 9994 if (ScalarVT != ScalarMemVT) 9995 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 9996 BasePtr, 9997 LN->getPointerInfo().getWithOffset(Idx * Stride), 9998 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9999 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10000 else 10001 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 10002 LN->getPointerInfo().getWithOffset(Idx * Stride), 10003 MinAlign(Alignment, Idx * Stride), 10004 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10005 10006 if (Idx == 0 && LN->isIndexed()) { 10007 assert(LN->getAddressingMode() == ISD::PRE_INC && 10008 "Unknown addressing mode on vector load"); 10009 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 10010 LN->getAddressingMode()); 10011 } 10012 10013 Vals[Idx] = Load; 10014 LoadChains[Idx] = Load.getValue(1); 10015 10016 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10017 DAG.getConstant(Stride, dl, 10018 BasePtr.getValueType())); 10019 } 10020 10021 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 10022 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 10023 10024 if (LN->isIndexed()) { 10025 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 10026 return DAG.getMergeValues(RetOps, dl); 10027 } 10028 10029 SDValue RetOps[] = { Value, TF }; 10030 return DAG.getMergeValues(RetOps, dl); 10031 } 10032 10033 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 10034 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 10035 10036 // To lower v4i1 from a byte array, we load the byte elements of the 10037 // vector and then reuse the BUILD_VECTOR logic. 10038 10039 SDValue VectElmts[4], VectElmtChains[4]; 10040 for (unsigned i = 0; i < 4; ++i) { 10041 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 10042 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 10043 10044 VectElmts[i] = DAG.getExtLoad( 10045 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 10046 LN->getPointerInfo().getWithOffset(i), MVT::i8, 10047 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10048 VectElmtChains[i] = VectElmts[i].getValue(1); 10049 } 10050 10051 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 10052 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 10053 10054 SDValue RVals[] = { Value, LoadChain }; 10055 return DAG.getMergeValues(RVals, dl); 10056 } 10057 10058 /// Lowering for QPX v4i1 stores 10059 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 10060 SelectionDAG &DAG) const { 10061 SDLoc dl(Op); 10062 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 10063 SDValue StoreChain = SN->getChain(); 10064 SDValue BasePtr = SN->getBasePtr(); 10065 SDValue Value = SN->getValue(); 10066 10067 if (Value.getValueType() == MVT::v4f64 || 10068 Value.getValueType() == MVT::v4f32) { 10069 EVT MemVT = SN->getMemoryVT(); 10070 unsigned Alignment = SN->getAlignment(); 10071 10072 // If this store is properly aligned, then it is legal. 10073 if (Alignment >= MemVT.getStoreSize()) 10074 return Op; 10075 10076 EVT ScalarVT = Value.getValueType().getScalarType(), 10077 ScalarMemVT = MemVT.getScalarType(); 10078 unsigned Stride = ScalarMemVT.getStoreSize(); 10079 10080 SDValue Stores[4]; 10081 for (unsigned Idx = 0; Idx < 4; ++Idx) { 10082 SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 10083 DAG.getVectorIdxConstant(Idx, dl)); 10084 SDValue Store; 10085 if (ScalarVT != ScalarMemVT) 10086 Store = 10087 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 10088 SN->getPointerInfo().getWithOffset(Idx * Stride), 10089 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 10090 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 10091 else 10092 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 10093 SN->getPointerInfo().getWithOffset(Idx * Stride), 10094 MinAlign(Alignment, Idx * Stride), 10095 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 10096 10097 if (Idx == 0 && SN->isIndexed()) { 10098 assert(SN->getAddressingMode() == ISD::PRE_INC && 10099 "Unknown addressing mode on vector store"); 10100 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 10101 SN->getAddressingMode()); 10102 } 10103 10104 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10105 DAG.getConstant(Stride, dl, 10106 BasePtr.getValueType())); 10107 Stores[Idx] = Store; 10108 } 10109 10110 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 10111 10112 if (SN->isIndexed()) { 10113 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 10114 return DAG.getMergeValues(RetOps, dl); 10115 } 10116 10117 return TF; 10118 } 10119 10120 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 10121 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 10122 10123 // The values are now known to be -1 (false) or 1 (true). To convert this 10124 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 10125 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 10126 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 10127 10128 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 10129 // understand how to form the extending load. 10130 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 10131 10132 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 10133 10134 // Now convert to an integer and store. 10135 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 10136 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 10137 Value); 10138 10139 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10140 int FrameIdx = MFI.CreateStackObject(16, 16, false); 10141 MachinePointerInfo PtrInfo = 10142 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 10143 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10144 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 10145 10146 SDValue Ops[] = {StoreChain, 10147 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 10148 Value, FIdx}; 10149 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 10150 10151 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 10152 dl, VTs, Ops, MVT::v4i32, PtrInfo); 10153 10154 // Move data into the byte array. 10155 SDValue Loads[4], LoadChains[4]; 10156 for (unsigned i = 0; i < 4; ++i) { 10157 unsigned Offset = 4*i; 10158 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 10159 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 10160 10161 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 10162 PtrInfo.getWithOffset(Offset)); 10163 LoadChains[i] = Loads[i].getValue(1); 10164 } 10165 10166 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 10167 10168 SDValue Stores[4]; 10169 for (unsigned i = 0; i < 4; ++i) { 10170 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 10171 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 10172 10173 Stores[i] = DAG.getTruncStore( 10174 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 10175 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 10176 SN->getAAInfo()); 10177 } 10178 10179 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 10180 10181 return StoreChain; 10182 } 10183 10184 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 10185 SDLoc dl(Op); 10186 if (Op.getValueType() == MVT::v4i32) { 10187 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10188 10189 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 10190 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 10191 10192 SDValue RHSSwap = // = vrlw RHS, 16 10193 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 10194 10195 // Shrinkify inputs to v8i16. 10196 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 10197 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 10198 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 10199 10200 // Low parts multiplied together, generating 32-bit results (we ignore the 10201 // top parts). 10202 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 10203 LHS, RHS, DAG, dl, MVT::v4i32); 10204 10205 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 10206 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 10207 // Shift the high parts up 16 bits. 10208 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 10209 Neg16, DAG, dl); 10210 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 10211 } else if (Op.getValueType() == MVT::v8i16) { 10212 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10213 10214 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 10215 10216 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 10217 LHS, RHS, Zero, DAG, dl); 10218 } else if (Op.getValueType() == MVT::v16i8) { 10219 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10220 bool isLittleEndian = Subtarget.isLittleEndian(); 10221 10222 // Multiply the even 8-bit parts, producing 16-bit sums. 10223 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 10224 LHS, RHS, DAG, dl, MVT::v8i16); 10225 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 10226 10227 // Multiply the odd 8-bit parts, producing 16-bit sums. 10228 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 10229 LHS, RHS, DAG, dl, MVT::v8i16); 10230 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 10231 10232 // Merge the results together. Because vmuleub and vmuloub are 10233 // instructions with a big-endian bias, we must reverse the 10234 // element numbering and reverse the meaning of "odd" and "even" 10235 // when generating little endian code. 10236 int Ops[16]; 10237 for (unsigned i = 0; i != 8; ++i) { 10238 if (isLittleEndian) { 10239 Ops[i*2 ] = 2*i; 10240 Ops[i*2+1] = 2*i+16; 10241 } else { 10242 Ops[i*2 ] = 2*i+1; 10243 Ops[i*2+1] = 2*i+1+16; 10244 } 10245 } 10246 if (isLittleEndian) 10247 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 10248 else 10249 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 10250 } else { 10251 llvm_unreachable("Unknown mul to lower!"); 10252 } 10253 } 10254 10255 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { 10256 10257 assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS"); 10258 10259 EVT VT = Op.getValueType(); 10260 assert(VT.isVector() && 10261 "Only set vector abs as custom, scalar abs shouldn't reach here!"); 10262 assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 10263 VT == MVT::v16i8) && 10264 "Unexpected vector element type!"); 10265 assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) && 10266 "Current subtarget doesn't support smax v2i64!"); 10267 10268 // For vector abs, it can be lowered to: 10269 // abs x 10270 // ==> 10271 // y = -x 10272 // smax(x, y) 10273 10274 SDLoc dl(Op); 10275 SDValue X = Op.getOperand(0); 10276 SDValue Zero = DAG.getConstant(0, dl, VT); 10277 SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X); 10278 10279 // SMAX patch https://reviews.llvm.org/D47332 10280 // hasn't landed yet, so use intrinsic first here. 10281 // TODO: Should use SMAX directly once SMAX patch landed 10282 Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw; 10283 if (VT == MVT::v2i64) 10284 BifID = Intrinsic::ppc_altivec_vmaxsd; 10285 else if (VT == MVT::v8i16) 10286 BifID = Intrinsic::ppc_altivec_vmaxsh; 10287 else if (VT == MVT::v16i8) 10288 BifID = Intrinsic::ppc_altivec_vmaxsb; 10289 10290 return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT); 10291 } 10292 10293 // Custom lowering for fpext vf32 to v2f64 10294 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 10295 10296 assert(Op.getOpcode() == ISD::FP_EXTEND && 10297 "Should only be called for ISD::FP_EXTEND"); 10298 10299 // We only want to custom lower an extend from v2f32 to v2f64. 10300 if (Op.getValueType() != MVT::v2f64 || 10301 Op.getOperand(0).getValueType() != MVT::v2f32) 10302 return SDValue(); 10303 10304 SDLoc dl(Op); 10305 SDValue Op0 = Op.getOperand(0); 10306 10307 switch (Op0.getOpcode()) { 10308 default: 10309 return SDValue(); 10310 case ISD::EXTRACT_SUBVECTOR: { 10311 assert(Op0.getNumOperands() == 2 && 10312 isa<ConstantSDNode>(Op0->getOperand(1)) && 10313 "Node should have 2 operands with second one being a constant!"); 10314 10315 if (Op0.getOperand(0).getValueType() != MVT::v4f32) 10316 return SDValue(); 10317 10318 // Custom lower is only done for high or low doubleword. 10319 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue(); 10320 if (Idx % 2 != 0) 10321 return SDValue(); 10322 10323 // Since input is v4f32, at this point Idx is either 0 or 2. 10324 // Shift to get the doubleword position we want. 10325 int DWord = Idx >> 1; 10326 10327 // High and low word positions are different on little endian. 10328 if (Subtarget.isLittleEndian()) 10329 DWord ^= 0x1; 10330 10331 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, 10332 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32)); 10333 } 10334 case ISD::FADD: 10335 case ISD::FMUL: 10336 case ISD::FSUB: { 10337 SDValue NewLoad[2]; 10338 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) { 10339 // Ensure both input are loads. 10340 SDValue LdOp = Op0.getOperand(i); 10341 if (LdOp.getOpcode() != ISD::LOAD) 10342 return SDValue(); 10343 // Generate new load node. 10344 LoadSDNode *LD = cast<LoadSDNode>(LdOp); 10345 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10346 NewLoad[i] = DAG.getMemIntrinsicNode( 10347 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10348 LD->getMemoryVT(), LD->getMemOperand()); 10349 } 10350 SDValue NewOp = 10351 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0], 10352 NewLoad[1], Op0.getNode()->getFlags()); 10353 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp, 10354 DAG.getConstant(0, dl, MVT::i32)); 10355 } 10356 case ISD::LOAD: { 10357 LoadSDNode *LD = cast<LoadSDNode>(Op0); 10358 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10359 SDValue NewLd = DAG.getMemIntrinsicNode( 10360 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10361 LD->getMemoryVT(), LD->getMemOperand()); 10362 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd, 10363 DAG.getConstant(0, dl, MVT::i32)); 10364 } 10365 } 10366 llvm_unreachable("ERROR:Should return for all cases within swtich."); 10367 } 10368 10369 /// LowerOperation - Provide custom lowering hooks for some operations. 10370 /// 10371 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10372 switch (Op.getOpcode()) { 10373 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 10374 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10375 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10376 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10377 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10378 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 10379 case ISD::SETCC: return LowerSETCC(Op, DAG); 10380 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 10381 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 10382 10383 // Variable argument lowering. 10384 case ISD::VASTART: return LowerVASTART(Op, DAG); 10385 case ISD::VAARG: return LowerVAARG(Op, DAG); 10386 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 10387 10388 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 10389 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 10390 case ISD::GET_DYNAMIC_AREA_OFFSET: 10391 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 10392 10393 // Exception handling lowering. 10394 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 10395 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 10396 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 10397 10398 case ISD::LOAD: return LowerLOAD(Op, DAG); 10399 case ISD::STORE: return LowerSTORE(Op, DAG); 10400 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 10401 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 10402 case ISD::FP_TO_UINT: 10403 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 10404 case ISD::UINT_TO_FP: 10405 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 10406 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 10407 10408 // Lower 64-bit shifts. 10409 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 10410 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 10411 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 10412 10413 // Vector-related lowering. 10414 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10415 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10416 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 10417 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10418 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 10419 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10420 case ISD::MUL: return LowerMUL(Op, DAG); 10421 case ISD::ABS: return LowerABS(Op, DAG); 10422 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 10423 10424 // For counter-based loop handling. 10425 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 10426 10427 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 10428 10429 // Frame & Return address. 10430 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 10431 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 10432 10433 case ISD::INTRINSIC_VOID: 10434 return LowerINTRINSIC_VOID(Op, DAG); 10435 case ISD::SREM: 10436 case ISD::UREM: 10437 return LowerREM(Op, DAG); 10438 case ISD::BSWAP: 10439 return LowerBSWAP(Op, DAG); 10440 case ISD::ATOMIC_CMP_SWAP: 10441 return LowerATOMIC_CMP_SWAP(Op, DAG); 10442 } 10443 } 10444 10445 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 10446 SmallVectorImpl<SDValue>&Results, 10447 SelectionDAG &DAG) const { 10448 SDLoc dl(N); 10449 switch (N->getOpcode()) { 10450 default: 10451 llvm_unreachable("Do not know how to custom type legalize this operation!"); 10452 case ISD::READCYCLECOUNTER: { 10453 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 10454 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 10455 10456 Results.push_back(RTB); 10457 Results.push_back(RTB.getValue(1)); 10458 Results.push_back(RTB.getValue(2)); 10459 break; 10460 } 10461 case ISD::INTRINSIC_W_CHAIN: { 10462 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 10463 Intrinsic::loop_decrement) 10464 break; 10465 10466 assert(N->getValueType(0) == MVT::i1 && 10467 "Unexpected result type for CTR decrement intrinsic"); 10468 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 10469 N->getValueType(0)); 10470 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 10471 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 10472 N->getOperand(1)); 10473 10474 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 10475 Results.push_back(NewInt.getValue(1)); 10476 break; 10477 } 10478 case ISD::VAARG: { 10479 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 10480 return; 10481 10482 EVT VT = N->getValueType(0); 10483 10484 if (VT == MVT::i64) { 10485 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 10486 10487 Results.push_back(NewNode); 10488 Results.push_back(NewNode.getValue(1)); 10489 } 10490 return; 10491 } 10492 case ISD::FP_TO_SINT: 10493 case ISD::FP_TO_UINT: 10494 // LowerFP_TO_INT() can only handle f32 and f64. 10495 if (N->getOperand(0).getValueType() == MVT::ppcf128) 10496 return; 10497 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 10498 return; 10499 case ISD::TRUNCATE: { 10500 EVT TrgVT = N->getValueType(0); 10501 EVT OpVT = N->getOperand(0).getValueType(); 10502 if (TrgVT.isVector() && 10503 isOperationCustom(N->getOpcode(), TrgVT) && 10504 OpVT.getSizeInBits() <= 128 && 10505 isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits())) 10506 Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG)); 10507 return; 10508 } 10509 case ISD::BITCAST: 10510 // Don't handle bitcast here. 10511 return; 10512 } 10513 } 10514 10515 //===----------------------------------------------------------------------===// 10516 // Other Lowering Code 10517 //===----------------------------------------------------------------------===// 10518 10519 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 10520 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 10521 Function *Func = Intrinsic::getDeclaration(M, Id); 10522 return Builder.CreateCall(Func, {}); 10523 } 10524 10525 // The mappings for emitLeading/TrailingFence is taken from 10526 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 10527 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 10528 Instruction *Inst, 10529 AtomicOrdering Ord) const { 10530 if (Ord == AtomicOrdering::SequentiallyConsistent) 10531 return callIntrinsic(Builder, Intrinsic::ppc_sync); 10532 if (isReleaseOrStronger(Ord)) 10533 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10534 return nullptr; 10535 } 10536 10537 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 10538 Instruction *Inst, 10539 AtomicOrdering Ord) const { 10540 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 10541 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 10542 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 10543 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 10544 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 10545 return Builder.CreateCall( 10546 Intrinsic::getDeclaration( 10547 Builder.GetInsertBlock()->getParent()->getParent(), 10548 Intrinsic::ppc_cfence, {Inst->getType()}), 10549 {Inst}); 10550 // FIXME: Can use isync for rmw operation. 10551 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10552 } 10553 return nullptr; 10554 } 10555 10556 MachineBasicBlock * 10557 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 10558 unsigned AtomicSize, 10559 unsigned BinOpcode, 10560 unsigned CmpOpcode, 10561 unsigned CmpPred) const { 10562 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10563 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10564 10565 auto LoadMnemonic = PPC::LDARX; 10566 auto StoreMnemonic = PPC::STDCX; 10567 switch (AtomicSize) { 10568 default: 10569 llvm_unreachable("Unexpected size of atomic entity"); 10570 case 1: 10571 LoadMnemonic = PPC::LBARX; 10572 StoreMnemonic = PPC::STBCX; 10573 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10574 break; 10575 case 2: 10576 LoadMnemonic = PPC::LHARX; 10577 StoreMnemonic = PPC::STHCX; 10578 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10579 break; 10580 case 4: 10581 LoadMnemonic = PPC::LWARX; 10582 StoreMnemonic = PPC::STWCX; 10583 break; 10584 case 8: 10585 LoadMnemonic = PPC::LDARX; 10586 StoreMnemonic = PPC::STDCX; 10587 break; 10588 } 10589 10590 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10591 MachineFunction *F = BB->getParent(); 10592 MachineFunction::iterator It = ++BB->getIterator(); 10593 10594 Register dest = MI.getOperand(0).getReg(); 10595 Register ptrA = MI.getOperand(1).getReg(); 10596 Register ptrB = MI.getOperand(2).getReg(); 10597 Register incr = MI.getOperand(3).getReg(); 10598 DebugLoc dl = MI.getDebugLoc(); 10599 10600 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10601 MachineBasicBlock *loop2MBB = 10602 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10603 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10604 F->insert(It, loopMBB); 10605 if (CmpOpcode) 10606 F->insert(It, loop2MBB); 10607 F->insert(It, exitMBB); 10608 exitMBB->splice(exitMBB->begin(), BB, 10609 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10610 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10611 10612 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10613 Register TmpReg = (!BinOpcode) ? incr : 10614 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 10615 : &PPC::GPRCRegClass); 10616 10617 // thisMBB: 10618 // ... 10619 // fallthrough --> loopMBB 10620 BB->addSuccessor(loopMBB); 10621 10622 // loopMBB: 10623 // l[wd]arx dest, ptr 10624 // add r0, dest, incr 10625 // st[wd]cx. r0, ptr 10626 // bne- loopMBB 10627 // fallthrough --> exitMBB 10628 10629 // For max/min... 10630 // loopMBB: 10631 // l[wd]arx dest, ptr 10632 // cmpl?[wd] incr, dest 10633 // bgt exitMBB 10634 // loop2MBB: 10635 // st[wd]cx. dest, ptr 10636 // bne- loopMBB 10637 // fallthrough --> exitMBB 10638 10639 BB = loopMBB; 10640 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10641 .addReg(ptrA).addReg(ptrB); 10642 if (BinOpcode) 10643 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 10644 if (CmpOpcode) { 10645 // Signed comparisons of byte or halfword values must be sign-extended. 10646 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 10647 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10648 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 10649 ExtReg).addReg(dest); 10650 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10651 .addReg(incr).addReg(ExtReg); 10652 } else 10653 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10654 .addReg(incr).addReg(dest); 10655 10656 BuildMI(BB, dl, TII->get(PPC::BCC)) 10657 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 10658 BB->addSuccessor(loop2MBB); 10659 BB->addSuccessor(exitMBB); 10660 BB = loop2MBB; 10661 } 10662 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10663 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 10664 BuildMI(BB, dl, TII->get(PPC::BCC)) 10665 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 10666 BB->addSuccessor(loopMBB); 10667 BB->addSuccessor(exitMBB); 10668 10669 // exitMBB: 10670 // ... 10671 BB = exitMBB; 10672 return BB; 10673 } 10674 10675 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( 10676 MachineInstr &MI, MachineBasicBlock *BB, 10677 bool is8bit, // operation 10678 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const { 10679 // If we support part-word atomic mnemonics, just use them 10680 if (Subtarget.hasPartwordAtomics()) 10681 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode, 10682 CmpPred); 10683 10684 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10685 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10686 // In 64 bit mode we have to use 64 bits for addresses, even though the 10687 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 10688 // registers without caring whether they're 32 or 64, but here we're 10689 // doing actual arithmetic on the addresses. 10690 bool is64bit = Subtarget.isPPC64(); 10691 bool isLittleEndian = Subtarget.isLittleEndian(); 10692 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10693 10694 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10695 MachineFunction *F = BB->getParent(); 10696 MachineFunction::iterator It = ++BB->getIterator(); 10697 10698 Register dest = MI.getOperand(0).getReg(); 10699 Register ptrA = MI.getOperand(1).getReg(); 10700 Register ptrB = MI.getOperand(2).getReg(); 10701 Register incr = MI.getOperand(3).getReg(); 10702 DebugLoc dl = MI.getDebugLoc(); 10703 10704 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10705 MachineBasicBlock *loop2MBB = 10706 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10707 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10708 F->insert(It, loopMBB); 10709 if (CmpOpcode) 10710 F->insert(It, loop2MBB); 10711 F->insert(It, exitMBB); 10712 exitMBB->splice(exitMBB->begin(), BB, 10713 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10714 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10715 10716 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10717 const TargetRegisterClass *RC = 10718 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10719 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 10720 10721 Register PtrReg = RegInfo.createVirtualRegister(RC); 10722 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 10723 Register ShiftReg = 10724 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 10725 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC); 10726 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 10727 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 10728 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 10729 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 10730 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC); 10731 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 10732 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 10733 Register Ptr1Reg; 10734 Register TmpReg = 10735 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC); 10736 10737 // thisMBB: 10738 // ... 10739 // fallthrough --> loopMBB 10740 BB->addSuccessor(loopMBB); 10741 10742 // The 4-byte load must be aligned, while a char or short may be 10743 // anywhere in the word. Hence all this nasty bookkeeping code. 10744 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10745 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10746 // xori shift, shift1, 24 [16] 10747 // rlwinm ptr, ptr1, 0, 0, 29 10748 // slw incr2, incr, shift 10749 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10750 // slw mask, mask2, shift 10751 // loopMBB: 10752 // lwarx tmpDest, ptr 10753 // add tmp, tmpDest, incr2 10754 // andc tmp2, tmpDest, mask 10755 // and tmp3, tmp, mask 10756 // or tmp4, tmp3, tmp2 10757 // stwcx. tmp4, ptr 10758 // bne- loopMBB 10759 // fallthrough --> exitMBB 10760 // srw dest, tmpDest, shift 10761 if (ptrA != ZeroReg) { 10762 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10763 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10764 .addReg(ptrA) 10765 .addReg(ptrB); 10766 } else { 10767 Ptr1Reg = ptrB; 10768 } 10769 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 10770 // mode. 10771 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 10772 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 10773 .addImm(3) 10774 .addImm(27) 10775 .addImm(is8bit ? 28 : 27); 10776 if (!isLittleEndian) 10777 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 10778 .addReg(Shift1Reg) 10779 .addImm(is8bit ? 24 : 16); 10780 if (is64bit) 10781 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10782 .addReg(Ptr1Reg) 10783 .addImm(0) 10784 .addImm(61); 10785 else 10786 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10787 .addReg(Ptr1Reg) 10788 .addImm(0) 10789 .addImm(0) 10790 .addImm(29); 10791 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg); 10792 if (is8bit) 10793 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10794 else { 10795 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10796 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10797 .addReg(Mask3Reg) 10798 .addImm(65535); 10799 } 10800 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10801 .addReg(Mask2Reg) 10802 .addReg(ShiftReg); 10803 10804 BB = loopMBB; 10805 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10806 .addReg(ZeroReg) 10807 .addReg(PtrReg); 10808 if (BinOpcode) 10809 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 10810 .addReg(Incr2Reg) 10811 .addReg(TmpDestReg); 10812 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 10813 .addReg(TmpDestReg) 10814 .addReg(MaskReg); 10815 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg); 10816 if (CmpOpcode) { 10817 // For unsigned comparisons, we can directly compare the shifted values. 10818 // For signed comparisons we shift and sign extend. 10819 Register SReg = RegInfo.createVirtualRegister(GPRC); 10820 BuildMI(BB, dl, TII->get(PPC::AND), SReg) 10821 .addReg(TmpDestReg) 10822 .addReg(MaskReg); 10823 unsigned ValueReg = SReg; 10824 unsigned CmpReg = Incr2Reg; 10825 if (CmpOpcode == PPC::CMPW) { 10826 ValueReg = RegInfo.createVirtualRegister(GPRC); 10827 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 10828 .addReg(SReg) 10829 .addReg(ShiftReg); 10830 Register ValueSReg = RegInfo.createVirtualRegister(GPRC); 10831 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 10832 .addReg(ValueReg); 10833 ValueReg = ValueSReg; 10834 CmpReg = incr; 10835 } 10836 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10837 .addReg(CmpReg) 10838 .addReg(ValueReg); 10839 BuildMI(BB, dl, TII->get(PPC::BCC)) 10840 .addImm(CmpPred) 10841 .addReg(PPC::CR0) 10842 .addMBB(exitMBB); 10843 BB->addSuccessor(loop2MBB); 10844 BB->addSuccessor(exitMBB); 10845 BB = loop2MBB; 10846 } 10847 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg); 10848 BuildMI(BB, dl, TII->get(PPC::STWCX)) 10849 .addReg(Tmp4Reg) 10850 .addReg(ZeroReg) 10851 .addReg(PtrReg); 10852 BuildMI(BB, dl, TII->get(PPC::BCC)) 10853 .addImm(PPC::PRED_NE) 10854 .addReg(PPC::CR0) 10855 .addMBB(loopMBB); 10856 BB->addSuccessor(loopMBB); 10857 BB->addSuccessor(exitMBB); 10858 10859 // exitMBB: 10860 // ... 10861 BB = exitMBB; 10862 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 10863 .addReg(TmpDestReg) 10864 .addReg(ShiftReg); 10865 return BB; 10866 } 10867 10868 llvm::MachineBasicBlock * 10869 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 10870 MachineBasicBlock *MBB) const { 10871 DebugLoc DL = MI.getDebugLoc(); 10872 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10873 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 10874 10875 MachineFunction *MF = MBB->getParent(); 10876 MachineRegisterInfo &MRI = MF->getRegInfo(); 10877 10878 const BasicBlock *BB = MBB->getBasicBlock(); 10879 MachineFunction::iterator I = ++MBB->getIterator(); 10880 10881 Register DstReg = MI.getOperand(0).getReg(); 10882 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 10883 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 10884 Register mainDstReg = MRI.createVirtualRegister(RC); 10885 Register restoreDstReg = MRI.createVirtualRegister(RC); 10886 10887 MVT PVT = getPointerTy(MF->getDataLayout()); 10888 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10889 "Invalid Pointer Size!"); 10890 // For v = setjmp(buf), we generate 10891 // 10892 // thisMBB: 10893 // SjLjSetup mainMBB 10894 // bl mainMBB 10895 // v_restore = 1 10896 // b sinkMBB 10897 // 10898 // mainMBB: 10899 // buf[LabelOffset] = LR 10900 // v_main = 0 10901 // 10902 // sinkMBB: 10903 // v = phi(main, restore) 10904 // 10905 10906 MachineBasicBlock *thisMBB = MBB; 10907 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 10908 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 10909 MF->insert(I, mainMBB); 10910 MF->insert(I, sinkMBB); 10911 10912 MachineInstrBuilder MIB; 10913 10914 // Transfer the remainder of BB and its successor edges to sinkMBB. 10915 sinkMBB->splice(sinkMBB->begin(), MBB, 10916 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 10917 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 10918 10919 // Note that the structure of the jmp_buf used here is not compatible 10920 // with that used by libc, and is not designed to be. Specifically, it 10921 // stores only those 'reserved' registers that LLVM does not otherwise 10922 // understand how to spill. Also, by convention, by the time this 10923 // intrinsic is called, Clang has already stored the frame address in the 10924 // first slot of the buffer and stack address in the third. Following the 10925 // X86 target code, we'll store the jump address in the second slot. We also 10926 // need to save the TOC pointer (R2) to handle jumps between shared 10927 // libraries, and that will be stored in the fourth slot. The thread 10928 // identifier (R13) is not affected. 10929 10930 // thisMBB: 10931 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10932 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10933 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10934 10935 // Prepare IP either in reg. 10936 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 10937 Register LabelReg = MRI.createVirtualRegister(PtrRC); 10938 Register BufReg = MI.getOperand(1).getReg(); 10939 10940 if (Subtarget.is64BitELFABI()) { 10941 setUsesTOCBasePtr(*MBB->getParent()); 10942 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 10943 .addReg(PPC::X2) 10944 .addImm(TOCOffset) 10945 .addReg(BufReg) 10946 .cloneMemRefs(MI); 10947 } 10948 10949 // Naked functions never have a base pointer, and so we use r1. For all 10950 // other functions, this decision must be delayed until during PEI. 10951 unsigned BaseReg; 10952 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 10953 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 10954 else 10955 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 10956 10957 MIB = BuildMI(*thisMBB, MI, DL, 10958 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 10959 .addReg(BaseReg) 10960 .addImm(BPOffset) 10961 .addReg(BufReg) 10962 .cloneMemRefs(MI); 10963 10964 // Setup 10965 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 10966 MIB.addRegMask(TRI->getNoPreservedMask()); 10967 10968 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 10969 10970 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 10971 .addMBB(mainMBB); 10972 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 10973 10974 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 10975 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 10976 10977 // mainMBB: 10978 // mainDstReg = 0 10979 MIB = 10980 BuildMI(mainMBB, DL, 10981 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 10982 10983 // Store IP 10984 if (Subtarget.isPPC64()) { 10985 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 10986 .addReg(LabelReg) 10987 .addImm(LabelOffset) 10988 .addReg(BufReg); 10989 } else { 10990 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 10991 .addReg(LabelReg) 10992 .addImm(LabelOffset) 10993 .addReg(BufReg); 10994 } 10995 MIB.cloneMemRefs(MI); 10996 10997 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 10998 mainMBB->addSuccessor(sinkMBB); 10999 11000 // sinkMBB: 11001 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 11002 TII->get(PPC::PHI), DstReg) 11003 .addReg(mainDstReg).addMBB(mainMBB) 11004 .addReg(restoreDstReg).addMBB(thisMBB); 11005 11006 MI.eraseFromParent(); 11007 return sinkMBB; 11008 } 11009 11010 MachineBasicBlock * 11011 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 11012 MachineBasicBlock *MBB) const { 11013 DebugLoc DL = MI.getDebugLoc(); 11014 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11015 11016 MachineFunction *MF = MBB->getParent(); 11017 MachineRegisterInfo &MRI = MF->getRegInfo(); 11018 11019 MVT PVT = getPointerTy(MF->getDataLayout()); 11020 assert((PVT == MVT::i64 || PVT == MVT::i32) && 11021 "Invalid Pointer Size!"); 11022 11023 const TargetRegisterClass *RC = 11024 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11025 Register Tmp = MRI.createVirtualRegister(RC); 11026 // Since FP is only updated here but NOT referenced, it's treated as GPR. 11027 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 11028 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 11029 unsigned BP = 11030 (PVT == MVT::i64) 11031 ? PPC::X30 11032 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 11033 : PPC::R30); 11034 11035 MachineInstrBuilder MIB; 11036 11037 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 11038 const int64_t SPOffset = 2 * PVT.getStoreSize(); 11039 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11040 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11041 11042 Register BufReg = MI.getOperand(0).getReg(); 11043 11044 // Reload FP (the jumped-to function may not have had a 11045 // frame pointer, and if so, then its r31 will be restored 11046 // as necessary). 11047 if (PVT == MVT::i64) { 11048 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 11049 .addImm(0) 11050 .addReg(BufReg); 11051 } else { 11052 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 11053 .addImm(0) 11054 .addReg(BufReg); 11055 } 11056 MIB.cloneMemRefs(MI); 11057 11058 // Reload IP 11059 if (PVT == MVT::i64) { 11060 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 11061 .addImm(LabelOffset) 11062 .addReg(BufReg); 11063 } else { 11064 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 11065 .addImm(LabelOffset) 11066 .addReg(BufReg); 11067 } 11068 MIB.cloneMemRefs(MI); 11069 11070 // Reload SP 11071 if (PVT == MVT::i64) { 11072 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 11073 .addImm(SPOffset) 11074 .addReg(BufReg); 11075 } else { 11076 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 11077 .addImm(SPOffset) 11078 .addReg(BufReg); 11079 } 11080 MIB.cloneMemRefs(MI); 11081 11082 // Reload BP 11083 if (PVT == MVT::i64) { 11084 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 11085 .addImm(BPOffset) 11086 .addReg(BufReg); 11087 } else { 11088 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 11089 .addImm(BPOffset) 11090 .addReg(BufReg); 11091 } 11092 MIB.cloneMemRefs(MI); 11093 11094 // Reload TOC 11095 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 11096 setUsesTOCBasePtr(*MBB->getParent()); 11097 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 11098 .addImm(TOCOffset) 11099 .addReg(BufReg) 11100 .cloneMemRefs(MI); 11101 } 11102 11103 // Jump 11104 BuildMI(*MBB, MI, DL, 11105 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 11106 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 11107 11108 MI.eraseFromParent(); 11109 return MBB; 11110 } 11111 11112 MachineBasicBlock * 11113 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 11114 MachineBasicBlock *BB) const { 11115 if (MI.getOpcode() == TargetOpcode::STACKMAP || 11116 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 11117 if (Subtarget.is64BitELFABI() && 11118 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 11119 // Call lowering should have added an r2 operand to indicate a dependence 11120 // on the TOC base pointer value. It can't however, because there is no 11121 // way to mark the dependence as implicit there, and so the stackmap code 11122 // will confuse it with a regular operand. Instead, add the dependence 11123 // here. 11124 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 11125 } 11126 11127 return emitPatchPoint(MI, BB); 11128 } 11129 11130 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 11131 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 11132 return emitEHSjLjSetJmp(MI, BB); 11133 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 11134 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 11135 return emitEHSjLjLongJmp(MI, BB); 11136 } 11137 11138 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11139 11140 // To "insert" these instructions we actually have to insert their 11141 // control-flow patterns. 11142 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11143 MachineFunction::iterator It = ++BB->getIterator(); 11144 11145 MachineFunction *F = BB->getParent(); 11146 11147 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11148 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 || 11149 MI.getOpcode() == PPC::SELECT_I8) { 11150 SmallVector<MachineOperand, 2> Cond; 11151 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11152 MI.getOpcode() == PPC::SELECT_CC_I8) 11153 Cond.push_back(MI.getOperand(4)); 11154 else 11155 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 11156 Cond.push_back(MI.getOperand(1)); 11157 11158 DebugLoc dl = MI.getDebugLoc(); 11159 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 11160 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 11161 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 || 11162 MI.getOpcode() == PPC::SELECT_CC_F8 || 11163 MI.getOpcode() == PPC::SELECT_CC_F16 || 11164 MI.getOpcode() == PPC::SELECT_CC_QFRC || 11165 MI.getOpcode() == PPC::SELECT_CC_QSRC || 11166 MI.getOpcode() == PPC::SELECT_CC_QBRC || 11167 MI.getOpcode() == PPC::SELECT_CC_VRRC || 11168 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 11169 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 11170 MI.getOpcode() == PPC::SELECT_CC_VSRC || 11171 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 11172 MI.getOpcode() == PPC::SELECT_CC_SPE || 11173 MI.getOpcode() == PPC::SELECT_F4 || 11174 MI.getOpcode() == PPC::SELECT_F8 || 11175 MI.getOpcode() == PPC::SELECT_F16 || 11176 MI.getOpcode() == PPC::SELECT_QFRC || 11177 MI.getOpcode() == PPC::SELECT_QSRC || 11178 MI.getOpcode() == PPC::SELECT_QBRC || 11179 MI.getOpcode() == PPC::SELECT_SPE || 11180 MI.getOpcode() == PPC::SELECT_SPE4 || 11181 MI.getOpcode() == PPC::SELECT_VRRC || 11182 MI.getOpcode() == PPC::SELECT_VSFRC || 11183 MI.getOpcode() == PPC::SELECT_VSSRC || 11184 MI.getOpcode() == PPC::SELECT_VSRC) { 11185 // The incoming instruction knows the destination vreg to set, the 11186 // condition code register to branch on, the true/false values to 11187 // select between, and a branch opcode to use. 11188 11189 // thisMBB: 11190 // ... 11191 // TrueVal = ... 11192 // cmpTY ccX, r1, r2 11193 // bCC copy1MBB 11194 // fallthrough --> copy0MBB 11195 MachineBasicBlock *thisMBB = BB; 11196 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 11197 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11198 DebugLoc dl = MI.getDebugLoc(); 11199 F->insert(It, copy0MBB); 11200 F->insert(It, sinkMBB); 11201 11202 // Transfer the remainder of BB and its successor edges to sinkMBB. 11203 sinkMBB->splice(sinkMBB->begin(), BB, 11204 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11205 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11206 11207 // Next, add the true and fallthrough blocks as its successors. 11208 BB->addSuccessor(copy0MBB); 11209 BB->addSuccessor(sinkMBB); 11210 11211 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 11212 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 11213 MI.getOpcode() == PPC::SELECT_F16 || 11214 MI.getOpcode() == PPC::SELECT_SPE4 || 11215 MI.getOpcode() == PPC::SELECT_SPE || 11216 MI.getOpcode() == PPC::SELECT_QFRC || 11217 MI.getOpcode() == PPC::SELECT_QSRC || 11218 MI.getOpcode() == PPC::SELECT_QBRC || 11219 MI.getOpcode() == PPC::SELECT_VRRC || 11220 MI.getOpcode() == PPC::SELECT_VSFRC || 11221 MI.getOpcode() == PPC::SELECT_VSSRC || 11222 MI.getOpcode() == PPC::SELECT_VSRC) { 11223 BuildMI(BB, dl, TII->get(PPC::BC)) 11224 .addReg(MI.getOperand(1).getReg()) 11225 .addMBB(sinkMBB); 11226 } else { 11227 unsigned SelectPred = MI.getOperand(4).getImm(); 11228 BuildMI(BB, dl, TII->get(PPC::BCC)) 11229 .addImm(SelectPred) 11230 .addReg(MI.getOperand(1).getReg()) 11231 .addMBB(sinkMBB); 11232 } 11233 11234 // copy0MBB: 11235 // %FalseValue = ... 11236 // # fallthrough to sinkMBB 11237 BB = copy0MBB; 11238 11239 // Update machine-CFG edges 11240 BB->addSuccessor(sinkMBB); 11241 11242 // sinkMBB: 11243 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 11244 // ... 11245 BB = sinkMBB; 11246 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 11247 .addReg(MI.getOperand(3).getReg()) 11248 .addMBB(copy0MBB) 11249 .addReg(MI.getOperand(2).getReg()) 11250 .addMBB(thisMBB); 11251 } else if (MI.getOpcode() == PPC::ReadTB) { 11252 // To read the 64-bit time-base register on a 32-bit target, we read the 11253 // two halves. Should the counter have wrapped while it was being read, we 11254 // need to try again. 11255 // ... 11256 // readLoop: 11257 // mfspr Rx,TBU # load from TBU 11258 // mfspr Ry,TB # load from TB 11259 // mfspr Rz,TBU # load from TBU 11260 // cmpw crX,Rx,Rz # check if 'old'='new' 11261 // bne readLoop # branch if they're not equal 11262 // ... 11263 11264 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 11265 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11266 DebugLoc dl = MI.getDebugLoc(); 11267 F->insert(It, readMBB); 11268 F->insert(It, sinkMBB); 11269 11270 // Transfer the remainder of BB and its successor edges to sinkMBB. 11271 sinkMBB->splice(sinkMBB->begin(), BB, 11272 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11273 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11274 11275 BB->addSuccessor(readMBB); 11276 BB = readMBB; 11277 11278 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11279 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 11280 Register LoReg = MI.getOperand(0).getReg(); 11281 Register HiReg = MI.getOperand(1).getReg(); 11282 11283 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 11284 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 11285 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 11286 11287 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11288 11289 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 11290 .addReg(HiReg) 11291 .addReg(ReadAgainReg); 11292 BuildMI(BB, dl, TII->get(PPC::BCC)) 11293 .addImm(PPC::PRED_NE) 11294 .addReg(CmpReg) 11295 .addMBB(readMBB); 11296 11297 BB->addSuccessor(readMBB); 11298 BB->addSuccessor(sinkMBB); 11299 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 11300 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 11301 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 11302 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 11303 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 11304 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 11305 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 11306 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 11307 11308 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 11309 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 11310 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 11311 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 11312 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 11313 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 11314 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 11315 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 11316 11317 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 11318 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 11319 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 11320 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 11321 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 11322 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 11323 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 11324 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 11325 11326 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 11327 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 11328 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 11329 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 11330 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 11331 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 11332 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 11333 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 11334 11335 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 11336 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 11337 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 11338 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 11339 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 11340 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 11341 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 11342 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 11343 11344 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 11345 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 11346 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 11347 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 11348 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 11349 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 11350 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 11351 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 11352 11353 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 11354 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 11355 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 11356 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 11357 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 11358 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 11359 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 11360 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 11361 11362 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 11363 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 11364 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 11365 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 11366 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 11367 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 11368 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 11369 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 11370 11371 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 11372 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 11373 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 11374 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 11375 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 11376 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 11377 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 11378 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 11379 11380 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 11381 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 11382 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 11383 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 11384 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 11385 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 11386 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 11387 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 11388 11389 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 11390 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 11391 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 11392 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 11393 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 11394 BB = EmitAtomicBinary(MI, BB, 4, 0); 11395 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 11396 BB = EmitAtomicBinary(MI, BB, 8, 0); 11397 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 11398 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 11399 (Subtarget.hasPartwordAtomics() && 11400 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 11401 (Subtarget.hasPartwordAtomics() && 11402 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 11403 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 11404 11405 auto LoadMnemonic = PPC::LDARX; 11406 auto StoreMnemonic = PPC::STDCX; 11407 switch (MI.getOpcode()) { 11408 default: 11409 llvm_unreachable("Compare and swap of unknown size"); 11410 case PPC::ATOMIC_CMP_SWAP_I8: 11411 LoadMnemonic = PPC::LBARX; 11412 StoreMnemonic = PPC::STBCX; 11413 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11414 break; 11415 case PPC::ATOMIC_CMP_SWAP_I16: 11416 LoadMnemonic = PPC::LHARX; 11417 StoreMnemonic = PPC::STHCX; 11418 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11419 break; 11420 case PPC::ATOMIC_CMP_SWAP_I32: 11421 LoadMnemonic = PPC::LWARX; 11422 StoreMnemonic = PPC::STWCX; 11423 break; 11424 case PPC::ATOMIC_CMP_SWAP_I64: 11425 LoadMnemonic = PPC::LDARX; 11426 StoreMnemonic = PPC::STDCX; 11427 break; 11428 } 11429 Register dest = MI.getOperand(0).getReg(); 11430 Register ptrA = MI.getOperand(1).getReg(); 11431 Register ptrB = MI.getOperand(2).getReg(); 11432 Register oldval = MI.getOperand(3).getReg(); 11433 Register newval = MI.getOperand(4).getReg(); 11434 DebugLoc dl = MI.getDebugLoc(); 11435 11436 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11437 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11438 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11439 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11440 F->insert(It, loop1MBB); 11441 F->insert(It, loop2MBB); 11442 F->insert(It, midMBB); 11443 F->insert(It, exitMBB); 11444 exitMBB->splice(exitMBB->begin(), BB, 11445 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11446 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11447 11448 // thisMBB: 11449 // ... 11450 // fallthrough --> loopMBB 11451 BB->addSuccessor(loop1MBB); 11452 11453 // loop1MBB: 11454 // l[bhwd]arx dest, ptr 11455 // cmp[wd] dest, oldval 11456 // bne- midMBB 11457 // loop2MBB: 11458 // st[bhwd]cx. newval, ptr 11459 // bne- loopMBB 11460 // b exitBB 11461 // midMBB: 11462 // st[bhwd]cx. dest, ptr 11463 // exitBB: 11464 BB = loop1MBB; 11465 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB); 11466 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 11467 .addReg(oldval) 11468 .addReg(dest); 11469 BuildMI(BB, dl, TII->get(PPC::BCC)) 11470 .addImm(PPC::PRED_NE) 11471 .addReg(PPC::CR0) 11472 .addMBB(midMBB); 11473 BB->addSuccessor(loop2MBB); 11474 BB->addSuccessor(midMBB); 11475 11476 BB = loop2MBB; 11477 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11478 .addReg(newval) 11479 .addReg(ptrA) 11480 .addReg(ptrB); 11481 BuildMI(BB, dl, TII->get(PPC::BCC)) 11482 .addImm(PPC::PRED_NE) 11483 .addReg(PPC::CR0) 11484 .addMBB(loop1MBB); 11485 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11486 BB->addSuccessor(loop1MBB); 11487 BB->addSuccessor(exitMBB); 11488 11489 BB = midMBB; 11490 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11491 .addReg(dest) 11492 .addReg(ptrA) 11493 .addReg(ptrB); 11494 BB->addSuccessor(exitMBB); 11495 11496 // exitMBB: 11497 // ... 11498 BB = exitMBB; 11499 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 11500 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 11501 // We must use 64-bit registers for addresses when targeting 64-bit, 11502 // since we're actually doing arithmetic on them. Other registers 11503 // can be 32-bit. 11504 bool is64bit = Subtarget.isPPC64(); 11505 bool isLittleEndian = Subtarget.isLittleEndian(); 11506 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 11507 11508 Register dest = MI.getOperand(0).getReg(); 11509 Register ptrA = MI.getOperand(1).getReg(); 11510 Register ptrB = MI.getOperand(2).getReg(); 11511 Register oldval = MI.getOperand(3).getReg(); 11512 Register newval = MI.getOperand(4).getReg(); 11513 DebugLoc dl = MI.getDebugLoc(); 11514 11515 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11516 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11517 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11518 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11519 F->insert(It, loop1MBB); 11520 F->insert(It, loop2MBB); 11521 F->insert(It, midMBB); 11522 F->insert(It, exitMBB); 11523 exitMBB->splice(exitMBB->begin(), BB, 11524 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11525 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11526 11527 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11528 const TargetRegisterClass *RC = 11529 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11530 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 11531 11532 Register PtrReg = RegInfo.createVirtualRegister(RC); 11533 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 11534 Register ShiftReg = 11535 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 11536 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC); 11537 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC); 11538 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC); 11539 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC); 11540 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 11541 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 11542 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 11543 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 11544 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 11545 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 11546 Register Ptr1Reg; 11547 Register TmpReg = RegInfo.createVirtualRegister(GPRC); 11548 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 11549 // thisMBB: 11550 // ... 11551 // fallthrough --> loopMBB 11552 BB->addSuccessor(loop1MBB); 11553 11554 // The 4-byte load must be aligned, while a char or short may be 11555 // anywhere in the word. Hence all this nasty bookkeeping code. 11556 // add ptr1, ptrA, ptrB [copy if ptrA==0] 11557 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 11558 // xori shift, shift1, 24 [16] 11559 // rlwinm ptr, ptr1, 0, 0, 29 11560 // slw newval2, newval, shift 11561 // slw oldval2, oldval,shift 11562 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 11563 // slw mask, mask2, shift 11564 // and newval3, newval2, mask 11565 // and oldval3, oldval2, mask 11566 // loop1MBB: 11567 // lwarx tmpDest, ptr 11568 // and tmp, tmpDest, mask 11569 // cmpw tmp, oldval3 11570 // bne- midMBB 11571 // loop2MBB: 11572 // andc tmp2, tmpDest, mask 11573 // or tmp4, tmp2, newval3 11574 // stwcx. tmp4, ptr 11575 // bne- loop1MBB 11576 // b exitBB 11577 // midMBB: 11578 // stwcx. tmpDest, ptr 11579 // exitBB: 11580 // srw dest, tmpDest, shift 11581 if (ptrA != ZeroReg) { 11582 Ptr1Reg = RegInfo.createVirtualRegister(RC); 11583 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 11584 .addReg(ptrA) 11585 .addReg(ptrB); 11586 } else { 11587 Ptr1Reg = ptrB; 11588 } 11589 11590 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 11591 // mode. 11592 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 11593 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 11594 .addImm(3) 11595 .addImm(27) 11596 .addImm(is8bit ? 28 : 27); 11597 if (!isLittleEndian) 11598 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 11599 .addReg(Shift1Reg) 11600 .addImm(is8bit ? 24 : 16); 11601 if (is64bit) 11602 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 11603 .addReg(Ptr1Reg) 11604 .addImm(0) 11605 .addImm(61); 11606 else 11607 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 11608 .addReg(Ptr1Reg) 11609 .addImm(0) 11610 .addImm(0) 11611 .addImm(29); 11612 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 11613 .addReg(newval) 11614 .addReg(ShiftReg); 11615 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 11616 .addReg(oldval) 11617 .addReg(ShiftReg); 11618 if (is8bit) 11619 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 11620 else { 11621 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 11622 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 11623 .addReg(Mask3Reg) 11624 .addImm(65535); 11625 } 11626 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 11627 .addReg(Mask2Reg) 11628 .addReg(ShiftReg); 11629 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 11630 .addReg(NewVal2Reg) 11631 .addReg(MaskReg); 11632 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 11633 .addReg(OldVal2Reg) 11634 .addReg(MaskReg); 11635 11636 BB = loop1MBB; 11637 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 11638 .addReg(ZeroReg) 11639 .addReg(PtrReg); 11640 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg) 11641 .addReg(TmpDestReg) 11642 .addReg(MaskReg); 11643 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 11644 .addReg(TmpReg) 11645 .addReg(OldVal3Reg); 11646 BuildMI(BB, dl, TII->get(PPC::BCC)) 11647 .addImm(PPC::PRED_NE) 11648 .addReg(PPC::CR0) 11649 .addMBB(midMBB); 11650 BB->addSuccessor(loop2MBB); 11651 BB->addSuccessor(midMBB); 11652 11653 BB = loop2MBB; 11654 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 11655 .addReg(TmpDestReg) 11656 .addReg(MaskReg); 11657 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg) 11658 .addReg(Tmp2Reg) 11659 .addReg(NewVal3Reg); 11660 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11661 .addReg(Tmp4Reg) 11662 .addReg(ZeroReg) 11663 .addReg(PtrReg); 11664 BuildMI(BB, dl, TII->get(PPC::BCC)) 11665 .addImm(PPC::PRED_NE) 11666 .addReg(PPC::CR0) 11667 .addMBB(loop1MBB); 11668 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11669 BB->addSuccessor(loop1MBB); 11670 BB->addSuccessor(exitMBB); 11671 11672 BB = midMBB; 11673 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11674 .addReg(TmpDestReg) 11675 .addReg(ZeroReg) 11676 .addReg(PtrReg); 11677 BB->addSuccessor(exitMBB); 11678 11679 // exitMBB: 11680 // ... 11681 BB = exitMBB; 11682 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 11683 .addReg(TmpReg) 11684 .addReg(ShiftReg); 11685 } else if (MI.getOpcode() == PPC::FADDrtz) { 11686 // This pseudo performs an FADD with rounding mode temporarily forced 11687 // to round-to-zero. We emit this via custom inserter since the FPSCR 11688 // is not modeled at the SelectionDAG level. 11689 Register Dest = MI.getOperand(0).getReg(); 11690 Register Src1 = MI.getOperand(1).getReg(); 11691 Register Src2 = MI.getOperand(2).getReg(); 11692 DebugLoc dl = MI.getDebugLoc(); 11693 11694 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11695 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 11696 11697 // Save FPSCR value. 11698 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 11699 11700 // Set rounding mode to round-to-zero. 11701 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 11702 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 11703 11704 // Perform addition. 11705 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 11706 11707 // Restore FPSCR value. 11708 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 11709 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 11710 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT || 11711 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 11712 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) { 11713 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 11714 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) 11715 ? PPC::ANDI8_rec 11716 : PPC::ANDI_rec; 11717 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 11718 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8); 11719 11720 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11721 Register Dest = RegInfo.createVirtualRegister( 11722 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); 11723 11724 DebugLoc Dl = MI.getDebugLoc(); 11725 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest) 11726 .addReg(MI.getOperand(1).getReg()) 11727 .addImm(1); 11728 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11729 MI.getOperand(0).getReg()) 11730 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT); 11731 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 11732 DebugLoc Dl = MI.getDebugLoc(); 11733 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11734 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11735 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 11736 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11737 MI.getOperand(0).getReg()) 11738 .addReg(CRReg); 11739 } else if (MI.getOpcode() == PPC::TBEGIN_RET) { 11740 DebugLoc Dl = MI.getDebugLoc(); 11741 unsigned Imm = MI.getOperand(1).getImm(); 11742 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm); 11743 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11744 MI.getOperand(0).getReg()) 11745 .addReg(PPC::CR0EQ); 11746 } else if (MI.getOpcode() == PPC::SETRNDi) { 11747 DebugLoc dl = MI.getDebugLoc(); 11748 Register OldFPSCRReg = MI.getOperand(0).getReg(); 11749 11750 // Save FPSCR value. 11751 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 11752 11753 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has 11754 // the following settings: 11755 // 00 Round to nearest 11756 // 01 Round to 0 11757 // 10 Round to +inf 11758 // 11 Round to -inf 11759 11760 // When the operand is immediate, using the two least significant bits of 11761 // the immediate to set the bits 62:63 of FPSCR. 11762 unsigned Mode = MI.getOperand(1).getImm(); 11763 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0)) 11764 .addImm(31); 11765 11766 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0)) 11767 .addImm(30); 11768 } else if (MI.getOpcode() == PPC::SETRND) { 11769 DebugLoc dl = MI.getDebugLoc(); 11770 11771 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg 11772 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg. 11773 // If the target doesn't have DirectMove, we should use stack to do the 11774 // conversion, because the target doesn't have the instructions like mtvsrd 11775 // or mfvsrd to do this conversion directly. 11776 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) { 11777 if (Subtarget.hasDirectMove()) { 11778 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg) 11779 .addReg(SrcReg); 11780 } else { 11781 // Use stack to do the register copy. 11782 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD; 11783 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11784 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg); 11785 if (RC == &PPC::F8RCRegClass) { 11786 // Copy register from F8RCRegClass to G8RCRegclass. 11787 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) && 11788 "Unsupported RegClass."); 11789 11790 StoreOp = PPC::STFD; 11791 LoadOp = PPC::LD; 11792 } else { 11793 // Copy register from G8RCRegClass to F8RCRegclass. 11794 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && 11795 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && 11796 "Unsupported RegClass."); 11797 } 11798 11799 MachineFrameInfo &MFI = F->getFrameInfo(); 11800 int FrameIdx = MFI.CreateStackObject(8, 8, false); 11801 11802 MachineMemOperand *MMOStore = F->getMachineMemOperand( 11803 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 11804 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), 11805 MFI.getObjectAlignment(FrameIdx)); 11806 11807 // Store the SrcReg into the stack. 11808 BuildMI(*BB, MI, dl, TII->get(StoreOp)) 11809 .addReg(SrcReg) 11810 .addImm(0) 11811 .addFrameIndex(FrameIdx) 11812 .addMemOperand(MMOStore); 11813 11814 MachineMemOperand *MMOLoad = F->getMachineMemOperand( 11815 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 11816 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), 11817 MFI.getObjectAlignment(FrameIdx)); 11818 11819 // Load from the stack where SrcReg is stored, and save to DestReg, 11820 // so we have done the RegClass conversion from RegClass::SrcReg to 11821 // RegClass::DestReg. 11822 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg) 11823 .addImm(0) 11824 .addFrameIndex(FrameIdx) 11825 .addMemOperand(MMOLoad); 11826 } 11827 }; 11828 11829 Register OldFPSCRReg = MI.getOperand(0).getReg(); 11830 11831 // Save FPSCR value. 11832 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 11833 11834 // When the operand is gprc register, use two least significant bits of the 11835 // register and mtfsf instruction to set the bits 62:63 of FPSCR. 11836 // 11837 // copy OldFPSCRTmpReg, OldFPSCRReg 11838 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1) 11839 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62 11840 // copy NewFPSCRReg, NewFPSCRTmpReg 11841 // mtfsf 255, NewFPSCRReg 11842 MachineOperand SrcOp = MI.getOperand(1); 11843 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11844 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11845 11846 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg); 11847 11848 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11849 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11850 11851 // The first operand of INSERT_SUBREG should be a register which has 11852 // subregisters, we only care about its RegClass, so we should use an 11853 // IMPLICIT_DEF register. 11854 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg); 11855 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg) 11856 .addReg(ImDefReg) 11857 .add(SrcOp) 11858 .addImm(1); 11859 11860 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11861 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg) 11862 .addReg(OldFPSCRTmpReg) 11863 .addReg(ExtSrcReg) 11864 .addImm(0) 11865 .addImm(62); 11866 11867 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 11868 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg); 11869 11870 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63 11871 // bits of FPSCR. 11872 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)) 11873 .addImm(255) 11874 .addReg(NewFPSCRReg) 11875 .addImm(0) 11876 .addImm(0); 11877 } else { 11878 llvm_unreachable("Unexpected instr type to insert"); 11879 } 11880 11881 MI.eraseFromParent(); // The pseudo instruction is gone now. 11882 return BB; 11883 } 11884 11885 //===----------------------------------------------------------------------===// 11886 // Target Optimization Hooks 11887 //===----------------------------------------------------------------------===// 11888 11889 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 11890 // For the estimates, convergence is quadratic, so we essentially double the 11891 // number of digits correct after every iteration. For both FRE and FRSQRTE, 11892 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 11893 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 11894 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 11895 if (VT.getScalarType() == MVT::f64) 11896 RefinementSteps++; 11897 return RefinementSteps; 11898 } 11899 11900 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 11901 int Enabled, int &RefinementSteps, 11902 bool &UseOneConstNR, 11903 bool Reciprocal) const { 11904 EVT VT = Operand.getValueType(); 11905 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 11906 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 11907 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 11908 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 11909 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 11910 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 11911 if (RefinementSteps == ReciprocalEstimate::Unspecified) 11912 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 11913 11914 // The Newton-Raphson computation with a single constant does not provide 11915 // enough accuracy on some CPUs. 11916 UseOneConstNR = !Subtarget.needsTwoConstNR(); 11917 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 11918 } 11919 return SDValue(); 11920 } 11921 11922 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 11923 int Enabled, 11924 int &RefinementSteps) const { 11925 EVT VT = Operand.getValueType(); 11926 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 11927 (VT == MVT::f64 && Subtarget.hasFRE()) || 11928 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 11929 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 11930 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 11931 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 11932 if (RefinementSteps == ReciprocalEstimate::Unspecified) 11933 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 11934 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 11935 } 11936 return SDValue(); 11937 } 11938 11939 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 11940 // Note: This functionality is used only when unsafe-fp-math is enabled, and 11941 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 11942 // enabled for division), this functionality is redundant with the default 11943 // combiner logic (once the division -> reciprocal/multiply transformation 11944 // has taken place). As a result, this matters more for older cores than for 11945 // newer ones. 11946 11947 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 11948 // reciprocal if there are two or more FDIVs (for embedded cores with only 11949 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 11950 switch (Subtarget.getCPUDirective()) { 11951 default: 11952 return 3; 11953 case PPC::DIR_440: 11954 case PPC::DIR_A2: 11955 case PPC::DIR_E500: 11956 case PPC::DIR_E500mc: 11957 case PPC::DIR_E5500: 11958 return 2; 11959 } 11960 } 11961 11962 // isConsecutiveLSLoc needs to work even if all adds have not yet been 11963 // collapsed, and so we need to look through chains of them. 11964 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 11965 int64_t& Offset, SelectionDAG &DAG) { 11966 if (DAG.isBaseWithConstantOffset(Loc)) { 11967 Base = Loc.getOperand(0); 11968 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 11969 11970 // The base might itself be a base plus an offset, and if so, accumulate 11971 // that as well. 11972 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 11973 } 11974 } 11975 11976 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 11977 unsigned Bytes, int Dist, 11978 SelectionDAG &DAG) { 11979 if (VT.getSizeInBits() / 8 != Bytes) 11980 return false; 11981 11982 SDValue BaseLoc = Base->getBasePtr(); 11983 if (Loc.getOpcode() == ISD::FrameIndex) { 11984 if (BaseLoc.getOpcode() != ISD::FrameIndex) 11985 return false; 11986 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 11987 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 11988 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 11989 int FS = MFI.getObjectSize(FI); 11990 int BFS = MFI.getObjectSize(BFI); 11991 if (FS != BFS || FS != (int)Bytes) return false; 11992 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 11993 } 11994 11995 SDValue Base1 = Loc, Base2 = BaseLoc; 11996 int64_t Offset1 = 0, Offset2 = 0; 11997 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 11998 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 11999 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 12000 return true; 12001 12002 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12003 const GlobalValue *GV1 = nullptr; 12004 const GlobalValue *GV2 = nullptr; 12005 Offset1 = 0; 12006 Offset2 = 0; 12007 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 12008 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 12009 if (isGA1 && isGA2 && GV1 == GV2) 12010 return Offset1 == (Offset2 + Dist*Bytes); 12011 return false; 12012 } 12013 12014 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 12015 // not enforce equality of the chain operands. 12016 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 12017 unsigned Bytes, int Dist, 12018 SelectionDAG &DAG) { 12019 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 12020 EVT VT = LS->getMemoryVT(); 12021 SDValue Loc = LS->getBasePtr(); 12022 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 12023 } 12024 12025 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 12026 EVT VT; 12027 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12028 default: return false; 12029 case Intrinsic::ppc_qpx_qvlfd: 12030 case Intrinsic::ppc_qpx_qvlfda: 12031 VT = MVT::v4f64; 12032 break; 12033 case Intrinsic::ppc_qpx_qvlfs: 12034 case Intrinsic::ppc_qpx_qvlfsa: 12035 VT = MVT::v4f32; 12036 break; 12037 case Intrinsic::ppc_qpx_qvlfcd: 12038 case Intrinsic::ppc_qpx_qvlfcda: 12039 VT = MVT::v2f64; 12040 break; 12041 case Intrinsic::ppc_qpx_qvlfcs: 12042 case Intrinsic::ppc_qpx_qvlfcsa: 12043 VT = MVT::v2f32; 12044 break; 12045 case Intrinsic::ppc_qpx_qvlfiwa: 12046 case Intrinsic::ppc_qpx_qvlfiwz: 12047 case Intrinsic::ppc_altivec_lvx: 12048 case Intrinsic::ppc_altivec_lvxl: 12049 case Intrinsic::ppc_vsx_lxvw4x: 12050 case Intrinsic::ppc_vsx_lxvw4x_be: 12051 VT = MVT::v4i32; 12052 break; 12053 case Intrinsic::ppc_vsx_lxvd2x: 12054 case Intrinsic::ppc_vsx_lxvd2x_be: 12055 VT = MVT::v2f64; 12056 break; 12057 case Intrinsic::ppc_altivec_lvebx: 12058 VT = MVT::i8; 12059 break; 12060 case Intrinsic::ppc_altivec_lvehx: 12061 VT = MVT::i16; 12062 break; 12063 case Intrinsic::ppc_altivec_lvewx: 12064 VT = MVT::i32; 12065 break; 12066 } 12067 12068 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 12069 } 12070 12071 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 12072 EVT VT; 12073 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12074 default: return false; 12075 case Intrinsic::ppc_qpx_qvstfd: 12076 case Intrinsic::ppc_qpx_qvstfda: 12077 VT = MVT::v4f64; 12078 break; 12079 case Intrinsic::ppc_qpx_qvstfs: 12080 case Intrinsic::ppc_qpx_qvstfsa: 12081 VT = MVT::v4f32; 12082 break; 12083 case Intrinsic::ppc_qpx_qvstfcd: 12084 case Intrinsic::ppc_qpx_qvstfcda: 12085 VT = MVT::v2f64; 12086 break; 12087 case Intrinsic::ppc_qpx_qvstfcs: 12088 case Intrinsic::ppc_qpx_qvstfcsa: 12089 VT = MVT::v2f32; 12090 break; 12091 case Intrinsic::ppc_qpx_qvstfiw: 12092 case Intrinsic::ppc_qpx_qvstfiwa: 12093 case Intrinsic::ppc_altivec_stvx: 12094 case Intrinsic::ppc_altivec_stvxl: 12095 case Intrinsic::ppc_vsx_stxvw4x: 12096 VT = MVT::v4i32; 12097 break; 12098 case Intrinsic::ppc_vsx_stxvd2x: 12099 VT = MVT::v2f64; 12100 break; 12101 case Intrinsic::ppc_vsx_stxvw4x_be: 12102 VT = MVT::v4i32; 12103 break; 12104 case Intrinsic::ppc_vsx_stxvd2x_be: 12105 VT = MVT::v2f64; 12106 break; 12107 case Intrinsic::ppc_altivec_stvebx: 12108 VT = MVT::i8; 12109 break; 12110 case Intrinsic::ppc_altivec_stvehx: 12111 VT = MVT::i16; 12112 break; 12113 case Intrinsic::ppc_altivec_stvewx: 12114 VT = MVT::i32; 12115 break; 12116 } 12117 12118 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 12119 } 12120 12121 return false; 12122 } 12123 12124 // Return true is there is a nearyby consecutive load to the one provided 12125 // (regardless of alignment). We search up and down the chain, looking though 12126 // token factors and other loads (but nothing else). As a result, a true result 12127 // indicates that it is safe to create a new consecutive load adjacent to the 12128 // load provided. 12129 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 12130 SDValue Chain = LD->getChain(); 12131 EVT VT = LD->getMemoryVT(); 12132 12133 SmallSet<SDNode *, 16> LoadRoots; 12134 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 12135 SmallSet<SDNode *, 16> Visited; 12136 12137 // First, search up the chain, branching to follow all token-factor operands. 12138 // If we find a consecutive load, then we're done, otherwise, record all 12139 // nodes just above the top-level loads and token factors. 12140 while (!Queue.empty()) { 12141 SDNode *ChainNext = Queue.pop_back_val(); 12142 if (!Visited.insert(ChainNext).second) 12143 continue; 12144 12145 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 12146 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12147 return true; 12148 12149 if (!Visited.count(ChainLD->getChain().getNode())) 12150 Queue.push_back(ChainLD->getChain().getNode()); 12151 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 12152 for (const SDUse &O : ChainNext->ops()) 12153 if (!Visited.count(O.getNode())) 12154 Queue.push_back(O.getNode()); 12155 } else 12156 LoadRoots.insert(ChainNext); 12157 } 12158 12159 // Second, search down the chain, starting from the top-level nodes recorded 12160 // in the first phase. These top-level nodes are the nodes just above all 12161 // loads and token factors. Starting with their uses, recursively look though 12162 // all loads (just the chain uses) and token factors to find a consecutive 12163 // load. 12164 Visited.clear(); 12165 Queue.clear(); 12166 12167 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 12168 IE = LoadRoots.end(); I != IE; ++I) { 12169 Queue.push_back(*I); 12170 12171 while (!Queue.empty()) { 12172 SDNode *LoadRoot = Queue.pop_back_val(); 12173 if (!Visited.insert(LoadRoot).second) 12174 continue; 12175 12176 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 12177 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12178 return true; 12179 12180 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 12181 UE = LoadRoot->use_end(); UI != UE; ++UI) 12182 if (((isa<MemSDNode>(*UI) && 12183 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 12184 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 12185 Queue.push_back(*UI); 12186 } 12187 } 12188 12189 return false; 12190 } 12191 12192 /// This function is called when we have proved that a SETCC node can be replaced 12193 /// by subtraction (and other supporting instructions) so that the result of 12194 /// comparison is kept in a GPR instead of CR. This function is purely for 12195 /// codegen purposes and has some flags to guide the codegen process. 12196 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 12197 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 12198 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12199 12200 // Zero extend the operands to the largest legal integer. Originally, they 12201 // must be of a strictly smaller size. 12202 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 12203 DAG.getConstant(Size, DL, MVT::i32)); 12204 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 12205 DAG.getConstant(Size, DL, MVT::i32)); 12206 12207 // Swap if needed. Depends on the condition code. 12208 if (Swap) 12209 std::swap(Op0, Op1); 12210 12211 // Subtract extended integers. 12212 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 12213 12214 // Move the sign bit to the least significant position and zero out the rest. 12215 // Now the least significant bit carries the result of original comparison. 12216 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 12217 DAG.getConstant(Size - 1, DL, MVT::i32)); 12218 auto Final = Shifted; 12219 12220 // Complement the result if needed. Based on the condition code. 12221 if (Complement) 12222 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 12223 DAG.getConstant(1, DL, MVT::i64)); 12224 12225 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 12226 } 12227 12228 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 12229 DAGCombinerInfo &DCI) const { 12230 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12231 12232 SelectionDAG &DAG = DCI.DAG; 12233 SDLoc DL(N); 12234 12235 // Size of integers being compared has a critical role in the following 12236 // analysis, so we prefer to do this when all types are legal. 12237 if (!DCI.isAfterLegalizeDAG()) 12238 return SDValue(); 12239 12240 // If all users of SETCC extend its value to a legal integer type 12241 // then we replace SETCC with a subtraction 12242 for (SDNode::use_iterator UI = N->use_begin(), 12243 UE = N->use_end(); UI != UE; ++UI) { 12244 if (UI->getOpcode() != ISD::ZERO_EXTEND) 12245 return SDValue(); 12246 } 12247 12248 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 12249 auto OpSize = N->getOperand(0).getValueSizeInBits(); 12250 12251 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 12252 12253 if (OpSize < Size) { 12254 switch (CC) { 12255 default: break; 12256 case ISD::SETULT: 12257 return generateEquivalentSub(N, Size, false, false, DL, DAG); 12258 case ISD::SETULE: 12259 return generateEquivalentSub(N, Size, true, true, DL, DAG); 12260 case ISD::SETUGT: 12261 return generateEquivalentSub(N, Size, false, true, DL, DAG); 12262 case ISD::SETUGE: 12263 return generateEquivalentSub(N, Size, true, false, DL, DAG); 12264 } 12265 } 12266 12267 return SDValue(); 12268 } 12269 12270 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 12271 DAGCombinerInfo &DCI) const { 12272 SelectionDAG &DAG = DCI.DAG; 12273 SDLoc dl(N); 12274 12275 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 12276 // If we're tracking CR bits, we need to be careful that we don't have: 12277 // trunc(binary-ops(zext(x), zext(y))) 12278 // or 12279 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 12280 // such that we're unnecessarily moving things into GPRs when it would be 12281 // better to keep them in CR bits. 12282 12283 // Note that trunc here can be an actual i1 trunc, or can be the effective 12284 // truncation that comes from a setcc or select_cc. 12285 if (N->getOpcode() == ISD::TRUNCATE && 12286 N->getValueType(0) != MVT::i1) 12287 return SDValue(); 12288 12289 if (N->getOperand(0).getValueType() != MVT::i32 && 12290 N->getOperand(0).getValueType() != MVT::i64) 12291 return SDValue(); 12292 12293 if (N->getOpcode() == ISD::SETCC || 12294 N->getOpcode() == ISD::SELECT_CC) { 12295 // If we're looking at a comparison, then we need to make sure that the 12296 // high bits (all except for the first) don't matter the result. 12297 ISD::CondCode CC = 12298 cast<CondCodeSDNode>(N->getOperand( 12299 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 12300 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 12301 12302 if (ISD::isSignedIntSetCC(CC)) { 12303 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 12304 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 12305 return SDValue(); 12306 } else if (ISD::isUnsignedIntSetCC(CC)) { 12307 if (!DAG.MaskedValueIsZero(N->getOperand(0), 12308 APInt::getHighBitsSet(OpBits, OpBits-1)) || 12309 !DAG.MaskedValueIsZero(N->getOperand(1), 12310 APInt::getHighBitsSet(OpBits, OpBits-1))) 12311 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 12312 : SDValue()); 12313 } else { 12314 // This is neither a signed nor an unsigned comparison, just make sure 12315 // that the high bits are equal. 12316 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0)); 12317 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1)); 12318 12319 // We don't really care about what is known about the first bit (if 12320 // anything), so clear it in all masks prior to comparing them. 12321 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 12322 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 12323 12324 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 12325 return SDValue(); 12326 } 12327 } 12328 12329 // We now know that the higher-order bits are irrelevant, we just need to 12330 // make sure that all of the intermediate operations are bit operations, and 12331 // all inputs are extensions. 12332 if (N->getOperand(0).getOpcode() != ISD::AND && 12333 N->getOperand(0).getOpcode() != ISD::OR && 12334 N->getOperand(0).getOpcode() != ISD::XOR && 12335 N->getOperand(0).getOpcode() != ISD::SELECT && 12336 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 12337 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 12338 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 12339 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 12340 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 12341 return SDValue(); 12342 12343 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 12344 N->getOperand(1).getOpcode() != ISD::AND && 12345 N->getOperand(1).getOpcode() != ISD::OR && 12346 N->getOperand(1).getOpcode() != ISD::XOR && 12347 N->getOperand(1).getOpcode() != ISD::SELECT && 12348 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 12349 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 12350 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 12351 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 12352 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 12353 return SDValue(); 12354 12355 SmallVector<SDValue, 4> Inputs; 12356 SmallVector<SDValue, 8> BinOps, PromOps; 12357 SmallPtrSet<SDNode *, 16> Visited; 12358 12359 for (unsigned i = 0; i < 2; ++i) { 12360 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12361 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12362 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12363 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12364 isa<ConstantSDNode>(N->getOperand(i))) 12365 Inputs.push_back(N->getOperand(i)); 12366 else 12367 BinOps.push_back(N->getOperand(i)); 12368 12369 if (N->getOpcode() == ISD::TRUNCATE) 12370 break; 12371 } 12372 12373 // Visit all inputs, collect all binary operations (and, or, xor and 12374 // select) that are all fed by extensions. 12375 while (!BinOps.empty()) { 12376 SDValue BinOp = BinOps.back(); 12377 BinOps.pop_back(); 12378 12379 if (!Visited.insert(BinOp.getNode()).second) 12380 continue; 12381 12382 PromOps.push_back(BinOp); 12383 12384 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12385 // The condition of the select is not promoted. 12386 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12387 continue; 12388 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12389 continue; 12390 12391 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12392 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12393 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12394 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12395 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12396 Inputs.push_back(BinOp.getOperand(i)); 12397 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12398 BinOp.getOperand(i).getOpcode() == ISD::OR || 12399 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12400 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12401 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 12402 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12403 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12404 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12405 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 12406 BinOps.push_back(BinOp.getOperand(i)); 12407 } else { 12408 // We have an input that is not an extension or another binary 12409 // operation; we'll abort this transformation. 12410 return SDValue(); 12411 } 12412 } 12413 } 12414 12415 // Make sure that this is a self-contained cluster of operations (which 12416 // is not quite the same thing as saying that everything has only one 12417 // use). 12418 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12419 if (isa<ConstantSDNode>(Inputs[i])) 12420 continue; 12421 12422 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12423 UE = Inputs[i].getNode()->use_end(); 12424 UI != UE; ++UI) { 12425 SDNode *User = *UI; 12426 if (User != N && !Visited.count(User)) 12427 return SDValue(); 12428 12429 // Make sure that we're not going to promote the non-output-value 12430 // operand(s) or SELECT or SELECT_CC. 12431 // FIXME: Although we could sometimes handle this, and it does occur in 12432 // practice that one of the condition inputs to the select is also one of 12433 // the outputs, we currently can't deal with this. 12434 if (User->getOpcode() == ISD::SELECT) { 12435 if (User->getOperand(0) == Inputs[i]) 12436 return SDValue(); 12437 } else if (User->getOpcode() == ISD::SELECT_CC) { 12438 if (User->getOperand(0) == Inputs[i] || 12439 User->getOperand(1) == Inputs[i]) 12440 return SDValue(); 12441 } 12442 } 12443 } 12444 12445 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12446 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12447 UE = PromOps[i].getNode()->use_end(); 12448 UI != UE; ++UI) { 12449 SDNode *User = *UI; 12450 if (User != N && !Visited.count(User)) 12451 return SDValue(); 12452 12453 // Make sure that we're not going to promote the non-output-value 12454 // operand(s) or SELECT or SELECT_CC. 12455 // FIXME: Although we could sometimes handle this, and it does occur in 12456 // practice that one of the condition inputs to the select is also one of 12457 // the outputs, we currently can't deal with this. 12458 if (User->getOpcode() == ISD::SELECT) { 12459 if (User->getOperand(0) == PromOps[i]) 12460 return SDValue(); 12461 } else if (User->getOpcode() == ISD::SELECT_CC) { 12462 if (User->getOperand(0) == PromOps[i] || 12463 User->getOperand(1) == PromOps[i]) 12464 return SDValue(); 12465 } 12466 } 12467 } 12468 12469 // Replace all inputs with the extension operand. 12470 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12471 // Constants may have users outside the cluster of to-be-promoted nodes, 12472 // and so we need to replace those as we do the promotions. 12473 if (isa<ConstantSDNode>(Inputs[i])) 12474 continue; 12475 else 12476 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 12477 } 12478 12479 std::list<HandleSDNode> PromOpHandles; 12480 for (auto &PromOp : PromOps) 12481 PromOpHandles.emplace_back(PromOp); 12482 12483 // Replace all operations (these are all the same, but have a different 12484 // (i1) return type). DAG.getNode will validate that the types of 12485 // a binary operator match, so go through the list in reverse so that 12486 // we've likely promoted both operands first. Any intermediate truncations or 12487 // extensions disappear. 12488 while (!PromOpHandles.empty()) { 12489 SDValue PromOp = PromOpHandles.back().getValue(); 12490 PromOpHandles.pop_back(); 12491 12492 if (PromOp.getOpcode() == ISD::TRUNCATE || 12493 PromOp.getOpcode() == ISD::SIGN_EXTEND || 12494 PromOp.getOpcode() == ISD::ZERO_EXTEND || 12495 PromOp.getOpcode() == ISD::ANY_EXTEND) { 12496 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 12497 PromOp.getOperand(0).getValueType() != MVT::i1) { 12498 // The operand is not yet ready (see comment below). 12499 PromOpHandles.emplace_front(PromOp); 12500 continue; 12501 } 12502 12503 SDValue RepValue = PromOp.getOperand(0); 12504 if (isa<ConstantSDNode>(RepValue)) 12505 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 12506 12507 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 12508 continue; 12509 } 12510 12511 unsigned C; 12512 switch (PromOp.getOpcode()) { 12513 default: C = 0; break; 12514 case ISD::SELECT: C = 1; break; 12515 case ISD::SELECT_CC: C = 2; break; 12516 } 12517 12518 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12519 PromOp.getOperand(C).getValueType() != MVT::i1) || 12520 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12521 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 12522 // The to-be-promoted operands of this node have not yet been 12523 // promoted (this should be rare because we're going through the 12524 // list backward, but if one of the operands has several users in 12525 // this cluster of to-be-promoted nodes, it is possible). 12526 PromOpHandles.emplace_front(PromOp); 12527 continue; 12528 } 12529 12530 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12531 PromOp.getNode()->op_end()); 12532 12533 // If there are any constant inputs, make sure they're replaced now. 12534 for (unsigned i = 0; i < 2; ++i) 12535 if (isa<ConstantSDNode>(Ops[C+i])) 12536 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 12537 12538 DAG.ReplaceAllUsesOfValueWith(PromOp, 12539 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 12540 } 12541 12542 // Now we're left with the initial truncation itself. 12543 if (N->getOpcode() == ISD::TRUNCATE) 12544 return N->getOperand(0); 12545 12546 // Otherwise, this is a comparison. The operands to be compared have just 12547 // changed type (to i1), but everything else is the same. 12548 return SDValue(N, 0); 12549 } 12550 12551 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 12552 DAGCombinerInfo &DCI) const { 12553 SelectionDAG &DAG = DCI.DAG; 12554 SDLoc dl(N); 12555 12556 // If we're tracking CR bits, we need to be careful that we don't have: 12557 // zext(binary-ops(trunc(x), trunc(y))) 12558 // or 12559 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 12560 // such that we're unnecessarily moving things into CR bits that can more 12561 // efficiently stay in GPRs. Note that if we're not certain that the high 12562 // bits are set as required by the final extension, we still may need to do 12563 // some masking to get the proper behavior. 12564 12565 // This same functionality is important on PPC64 when dealing with 12566 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 12567 // the return values of functions. Because it is so similar, it is handled 12568 // here as well. 12569 12570 if (N->getValueType(0) != MVT::i32 && 12571 N->getValueType(0) != MVT::i64) 12572 return SDValue(); 12573 12574 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 12575 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 12576 return SDValue(); 12577 12578 if (N->getOperand(0).getOpcode() != ISD::AND && 12579 N->getOperand(0).getOpcode() != ISD::OR && 12580 N->getOperand(0).getOpcode() != ISD::XOR && 12581 N->getOperand(0).getOpcode() != ISD::SELECT && 12582 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 12583 return SDValue(); 12584 12585 SmallVector<SDValue, 4> Inputs; 12586 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 12587 SmallPtrSet<SDNode *, 16> Visited; 12588 12589 // Visit all inputs, collect all binary operations (and, or, xor and 12590 // select) that are all fed by truncations. 12591 while (!BinOps.empty()) { 12592 SDValue BinOp = BinOps.back(); 12593 BinOps.pop_back(); 12594 12595 if (!Visited.insert(BinOp.getNode()).second) 12596 continue; 12597 12598 PromOps.push_back(BinOp); 12599 12600 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12601 // The condition of the select is not promoted. 12602 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12603 continue; 12604 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12605 continue; 12606 12607 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12608 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12609 Inputs.push_back(BinOp.getOperand(i)); 12610 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12611 BinOp.getOperand(i).getOpcode() == ISD::OR || 12612 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12613 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12614 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 12615 BinOps.push_back(BinOp.getOperand(i)); 12616 } else { 12617 // We have an input that is not a truncation or another binary 12618 // operation; we'll abort this transformation. 12619 return SDValue(); 12620 } 12621 } 12622 } 12623 12624 // The operands of a select that must be truncated when the select is 12625 // promoted because the operand is actually part of the to-be-promoted set. 12626 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 12627 12628 // Make sure that this is a self-contained cluster of operations (which 12629 // is not quite the same thing as saying that everything has only one 12630 // use). 12631 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12632 if (isa<ConstantSDNode>(Inputs[i])) 12633 continue; 12634 12635 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12636 UE = Inputs[i].getNode()->use_end(); 12637 UI != UE; ++UI) { 12638 SDNode *User = *UI; 12639 if (User != N && !Visited.count(User)) 12640 return SDValue(); 12641 12642 // If we're going to promote the non-output-value operand(s) or SELECT or 12643 // SELECT_CC, record them for truncation. 12644 if (User->getOpcode() == ISD::SELECT) { 12645 if (User->getOperand(0) == Inputs[i]) 12646 SelectTruncOp[0].insert(std::make_pair(User, 12647 User->getOperand(0).getValueType())); 12648 } else if (User->getOpcode() == ISD::SELECT_CC) { 12649 if (User->getOperand(0) == Inputs[i]) 12650 SelectTruncOp[0].insert(std::make_pair(User, 12651 User->getOperand(0).getValueType())); 12652 if (User->getOperand(1) == Inputs[i]) 12653 SelectTruncOp[1].insert(std::make_pair(User, 12654 User->getOperand(1).getValueType())); 12655 } 12656 } 12657 } 12658 12659 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12660 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12661 UE = PromOps[i].getNode()->use_end(); 12662 UI != UE; ++UI) { 12663 SDNode *User = *UI; 12664 if (User != N && !Visited.count(User)) 12665 return SDValue(); 12666 12667 // If we're going to promote the non-output-value operand(s) or SELECT or 12668 // SELECT_CC, record them for truncation. 12669 if (User->getOpcode() == ISD::SELECT) { 12670 if (User->getOperand(0) == PromOps[i]) 12671 SelectTruncOp[0].insert(std::make_pair(User, 12672 User->getOperand(0).getValueType())); 12673 } else if (User->getOpcode() == ISD::SELECT_CC) { 12674 if (User->getOperand(0) == PromOps[i]) 12675 SelectTruncOp[0].insert(std::make_pair(User, 12676 User->getOperand(0).getValueType())); 12677 if (User->getOperand(1) == PromOps[i]) 12678 SelectTruncOp[1].insert(std::make_pair(User, 12679 User->getOperand(1).getValueType())); 12680 } 12681 } 12682 } 12683 12684 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 12685 bool ReallyNeedsExt = false; 12686 if (N->getOpcode() != ISD::ANY_EXTEND) { 12687 // If all of the inputs are not already sign/zero extended, then 12688 // we'll still need to do that at the end. 12689 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12690 if (isa<ConstantSDNode>(Inputs[i])) 12691 continue; 12692 12693 unsigned OpBits = 12694 Inputs[i].getOperand(0).getValueSizeInBits(); 12695 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 12696 12697 if ((N->getOpcode() == ISD::ZERO_EXTEND && 12698 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 12699 APInt::getHighBitsSet(OpBits, 12700 OpBits-PromBits))) || 12701 (N->getOpcode() == ISD::SIGN_EXTEND && 12702 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 12703 (OpBits-(PromBits-1)))) { 12704 ReallyNeedsExt = true; 12705 break; 12706 } 12707 } 12708 } 12709 12710 // Replace all inputs, either with the truncation operand, or a 12711 // truncation or extension to the final output type. 12712 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12713 // Constant inputs need to be replaced with the to-be-promoted nodes that 12714 // use them because they might have users outside of the cluster of 12715 // promoted nodes. 12716 if (isa<ConstantSDNode>(Inputs[i])) 12717 continue; 12718 12719 SDValue InSrc = Inputs[i].getOperand(0); 12720 if (Inputs[i].getValueType() == N->getValueType(0)) 12721 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 12722 else if (N->getOpcode() == ISD::SIGN_EXTEND) 12723 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12724 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 12725 else if (N->getOpcode() == ISD::ZERO_EXTEND) 12726 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12727 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 12728 else 12729 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12730 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 12731 } 12732 12733 std::list<HandleSDNode> PromOpHandles; 12734 for (auto &PromOp : PromOps) 12735 PromOpHandles.emplace_back(PromOp); 12736 12737 // Replace all operations (these are all the same, but have a different 12738 // (promoted) return type). DAG.getNode will validate that the types of 12739 // a binary operator match, so go through the list in reverse so that 12740 // we've likely promoted both operands first. 12741 while (!PromOpHandles.empty()) { 12742 SDValue PromOp = PromOpHandles.back().getValue(); 12743 PromOpHandles.pop_back(); 12744 12745 unsigned C; 12746 switch (PromOp.getOpcode()) { 12747 default: C = 0; break; 12748 case ISD::SELECT: C = 1; break; 12749 case ISD::SELECT_CC: C = 2; break; 12750 } 12751 12752 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12753 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 12754 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12755 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 12756 // The to-be-promoted operands of this node have not yet been 12757 // promoted (this should be rare because we're going through the 12758 // list backward, but if one of the operands has several users in 12759 // this cluster of to-be-promoted nodes, it is possible). 12760 PromOpHandles.emplace_front(PromOp); 12761 continue; 12762 } 12763 12764 // For SELECT and SELECT_CC nodes, we do a similar check for any 12765 // to-be-promoted comparison inputs. 12766 if (PromOp.getOpcode() == ISD::SELECT || 12767 PromOp.getOpcode() == ISD::SELECT_CC) { 12768 if ((SelectTruncOp[0].count(PromOp.getNode()) && 12769 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 12770 (SelectTruncOp[1].count(PromOp.getNode()) && 12771 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 12772 PromOpHandles.emplace_front(PromOp); 12773 continue; 12774 } 12775 } 12776 12777 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12778 PromOp.getNode()->op_end()); 12779 12780 // If this node has constant inputs, then they'll need to be promoted here. 12781 for (unsigned i = 0; i < 2; ++i) { 12782 if (!isa<ConstantSDNode>(Ops[C+i])) 12783 continue; 12784 if (Ops[C+i].getValueType() == N->getValueType(0)) 12785 continue; 12786 12787 if (N->getOpcode() == ISD::SIGN_EXTEND) 12788 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12789 else if (N->getOpcode() == ISD::ZERO_EXTEND) 12790 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12791 else 12792 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12793 } 12794 12795 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 12796 // truncate them again to the original value type. 12797 if (PromOp.getOpcode() == ISD::SELECT || 12798 PromOp.getOpcode() == ISD::SELECT_CC) { 12799 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 12800 if (SI0 != SelectTruncOp[0].end()) 12801 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 12802 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 12803 if (SI1 != SelectTruncOp[1].end()) 12804 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 12805 } 12806 12807 DAG.ReplaceAllUsesOfValueWith(PromOp, 12808 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 12809 } 12810 12811 // Now we're left with the initial extension itself. 12812 if (!ReallyNeedsExt) 12813 return N->getOperand(0); 12814 12815 // To zero extend, just mask off everything except for the first bit (in the 12816 // i1 case). 12817 if (N->getOpcode() == ISD::ZERO_EXTEND) 12818 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 12819 DAG.getConstant(APInt::getLowBitsSet( 12820 N->getValueSizeInBits(0), PromBits), 12821 dl, N->getValueType(0))); 12822 12823 assert(N->getOpcode() == ISD::SIGN_EXTEND && 12824 "Invalid extension type"); 12825 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 12826 SDValue ShiftCst = 12827 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 12828 return DAG.getNode( 12829 ISD::SRA, dl, N->getValueType(0), 12830 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 12831 ShiftCst); 12832 } 12833 12834 SDValue PPCTargetLowering::combineSetCC(SDNode *N, 12835 DAGCombinerInfo &DCI) const { 12836 assert(N->getOpcode() == ISD::SETCC && 12837 "Should be called with a SETCC node"); 12838 12839 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 12840 if (CC == ISD::SETNE || CC == ISD::SETEQ) { 12841 SDValue LHS = N->getOperand(0); 12842 SDValue RHS = N->getOperand(1); 12843 12844 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS. 12845 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && 12846 LHS.hasOneUse()) 12847 std::swap(LHS, RHS); 12848 12849 // x == 0-y --> x+y == 0 12850 // x != 0-y --> x+y != 0 12851 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && 12852 RHS.hasOneUse()) { 12853 SDLoc DL(N); 12854 SelectionDAG &DAG = DCI.DAG; 12855 EVT VT = N->getValueType(0); 12856 EVT OpVT = LHS.getValueType(); 12857 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1)); 12858 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); 12859 } 12860 } 12861 12862 return DAGCombineTruncBoolExt(N, DCI); 12863 } 12864 12865 // Is this an extending load from an f32 to an f64? 12866 static bool isFPExtLoad(SDValue Op) { 12867 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 12868 return LD->getExtensionType() == ISD::EXTLOAD && 12869 Op.getValueType() == MVT::f64; 12870 return false; 12871 } 12872 12873 /// Reduces the number of fp-to-int conversion when building a vector. 12874 /// 12875 /// If this vector is built out of floating to integer conversions, 12876 /// transform it to a vector built out of floating point values followed by a 12877 /// single floating to integer conversion of the vector. 12878 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 12879 /// becomes (fptosi (build_vector ($A, $B, ...))) 12880 SDValue PPCTargetLowering:: 12881 combineElementTruncationToVectorTruncation(SDNode *N, 12882 DAGCombinerInfo &DCI) const { 12883 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12884 "Should be called with a BUILD_VECTOR node"); 12885 12886 SelectionDAG &DAG = DCI.DAG; 12887 SDLoc dl(N); 12888 12889 SDValue FirstInput = N->getOperand(0); 12890 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 12891 "The input operand must be an fp-to-int conversion."); 12892 12893 // This combine happens after legalization so the fp_to_[su]i nodes are 12894 // already converted to PPCSISD nodes. 12895 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 12896 if (FirstConversion == PPCISD::FCTIDZ || 12897 FirstConversion == PPCISD::FCTIDUZ || 12898 FirstConversion == PPCISD::FCTIWZ || 12899 FirstConversion == PPCISD::FCTIWUZ) { 12900 bool IsSplat = true; 12901 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 12902 FirstConversion == PPCISD::FCTIWUZ; 12903 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 12904 SmallVector<SDValue, 4> Ops; 12905 EVT TargetVT = N->getValueType(0); 12906 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 12907 SDValue NextOp = N->getOperand(i); 12908 if (NextOp.getOpcode() != PPCISD::MFVSR) 12909 return SDValue(); 12910 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 12911 if (NextConversion != FirstConversion) 12912 return SDValue(); 12913 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 12914 // This is not valid if the input was originally double precision. It is 12915 // also not profitable to do unless this is an extending load in which 12916 // case doing this combine will allow us to combine consecutive loads. 12917 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 12918 return SDValue(); 12919 if (N->getOperand(i) != FirstInput) 12920 IsSplat = false; 12921 } 12922 12923 // If this is a splat, we leave it as-is since there will be only a single 12924 // fp-to-int conversion followed by a splat of the integer. This is better 12925 // for 32-bit and smaller ints and neutral for 64-bit ints. 12926 if (IsSplat) 12927 return SDValue(); 12928 12929 // Now that we know we have the right type of node, get its operands 12930 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 12931 SDValue In = N->getOperand(i).getOperand(0); 12932 if (Is32Bit) { 12933 // For 32-bit values, we need to add an FP_ROUND node (if we made it 12934 // here, we know that all inputs are extending loads so this is safe). 12935 if (In.isUndef()) 12936 Ops.push_back(DAG.getUNDEF(SrcVT)); 12937 else { 12938 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 12939 MVT::f32, In.getOperand(0), 12940 DAG.getIntPtrConstant(1, dl)); 12941 Ops.push_back(Trunc); 12942 } 12943 } else 12944 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 12945 } 12946 12947 unsigned Opcode; 12948 if (FirstConversion == PPCISD::FCTIDZ || 12949 FirstConversion == PPCISD::FCTIWZ) 12950 Opcode = ISD::FP_TO_SINT; 12951 else 12952 Opcode = ISD::FP_TO_UINT; 12953 12954 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 12955 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 12956 return DAG.getNode(Opcode, dl, TargetVT, BV); 12957 } 12958 return SDValue(); 12959 } 12960 12961 /// Reduce the number of loads when building a vector. 12962 /// 12963 /// Building a vector out of multiple loads can be converted to a load 12964 /// of the vector type if the loads are consecutive. If the loads are 12965 /// consecutive but in descending order, a shuffle is added at the end 12966 /// to reorder the vector. 12967 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 12968 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12969 "Should be called with a BUILD_VECTOR node"); 12970 12971 SDLoc dl(N); 12972 12973 // Return early for non byte-sized type, as they can't be consecutive. 12974 if (!N->getValueType(0).getVectorElementType().isByteSized()) 12975 return SDValue(); 12976 12977 bool InputsAreConsecutiveLoads = true; 12978 bool InputsAreReverseConsecutive = true; 12979 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize(); 12980 SDValue FirstInput = N->getOperand(0); 12981 bool IsRoundOfExtLoad = false; 12982 12983 if (FirstInput.getOpcode() == ISD::FP_ROUND && 12984 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 12985 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 12986 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 12987 } 12988 // Not a build vector of (possibly fp_rounded) loads. 12989 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) || 12990 N->getNumOperands() == 1) 12991 return SDValue(); 12992 12993 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 12994 // If any inputs are fp_round(extload), they all must be. 12995 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 12996 return SDValue(); 12997 12998 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 12999 N->getOperand(i); 13000 if (NextInput.getOpcode() != ISD::LOAD) 13001 return SDValue(); 13002 13003 SDValue PreviousInput = 13004 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 13005 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 13006 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 13007 13008 // If any inputs are fp_round(extload), they all must be. 13009 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 13010 return SDValue(); 13011 13012 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 13013 InputsAreConsecutiveLoads = false; 13014 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 13015 InputsAreReverseConsecutive = false; 13016 13017 // Exit early if the loads are neither consecutive nor reverse consecutive. 13018 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 13019 return SDValue(); 13020 } 13021 13022 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 13023 "The loads cannot be both consecutive and reverse consecutive."); 13024 13025 SDValue FirstLoadOp = 13026 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 13027 SDValue LastLoadOp = 13028 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 13029 N->getOperand(N->getNumOperands()-1); 13030 13031 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 13032 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 13033 if (InputsAreConsecutiveLoads) { 13034 assert(LD1 && "Input needs to be a LoadSDNode."); 13035 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 13036 LD1->getBasePtr(), LD1->getPointerInfo(), 13037 LD1->getAlignment()); 13038 } 13039 if (InputsAreReverseConsecutive) { 13040 assert(LDL && "Input needs to be a LoadSDNode."); 13041 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 13042 LDL->getBasePtr(), LDL->getPointerInfo(), 13043 LDL->getAlignment()); 13044 SmallVector<int, 16> Ops; 13045 for (int i = N->getNumOperands() - 1; i >= 0; i--) 13046 Ops.push_back(i); 13047 13048 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 13049 DAG.getUNDEF(N->getValueType(0)), Ops); 13050 } 13051 return SDValue(); 13052 } 13053 13054 // This function adds the required vector_shuffle needed to get 13055 // the elements of the vector extract in the correct position 13056 // as specified by the CorrectElems encoding. 13057 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 13058 SDValue Input, uint64_t Elems, 13059 uint64_t CorrectElems) { 13060 SDLoc dl(N); 13061 13062 unsigned NumElems = Input.getValueType().getVectorNumElements(); 13063 SmallVector<int, 16> ShuffleMask(NumElems, -1); 13064 13065 // Knowing the element indices being extracted from the original 13066 // vector and the order in which they're being inserted, just put 13067 // them at element indices required for the instruction. 13068 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13069 if (DAG.getDataLayout().isLittleEndian()) 13070 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 13071 else 13072 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 13073 CorrectElems = CorrectElems >> 8; 13074 Elems = Elems >> 8; 13075 } 13076 13077 SDValue Shuffle = 13078 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 13079 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 13080 13081 EVT Ty = N->getValueType(0); 13082 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 13083 return BV; 13084 } 13085 13086 // Look for build vector patterns where input operands come from sign 13087 // extended vector_extract elements of specific indices. If the correct indices 13088 // aren't used, add a vector shuffle to fix up the indices and create a new 13089 // PPCISD:SExtVElems node which selects the vector sign extend instructions 13090 // during instruction selection. 13091 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 13092 // This array encodes the indices that the vector sign extend instructions 13093 // extract from when extending from one type to another for both BE and LE. 13094 // The right nibble of each byte corresponds to the LE incides. 13095 // and the left nibble of each byte corresponds to the BE incides. 13096 // For example: 0x3074B8FC byte->word 13097 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 13098 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 13099 // For example: 0x000070F8 byte->double word 13100 // For LE: the allowed indices are: 0x0,0x8 13101 // For BE: the allowed indices are: 0x7,0xF 13102 uint64_t TargetElems[] = { 13103 0x3074B8FC, // b->w 13104 0x000070F8, // b->d 13105 0x10325476, // h->w 13106 0x00003074, // h->d 13107 0x00001032, // w->d 13108 }; 13109 13110 uint64_t Elems = 0; 13111 int Index; 13112 SDValue Input; 13113 13114 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 13115 if (!Op) 13116 return false; 13117 if (Op.getOpcode() != ISD::SIGN_EXTEND && 13118 Op.getOpcode() != ISD::SIGN_EXTEND_INREG) 13119 return false; 13120 13121 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value 13122 // of the right width. 13123 SDValue Extract = Op.getOperand(0); 13124 if (Extract.getOpcode() == ISD::ANY_EXTEND) 13125 Extract = Extract.getOperand(0); 13126 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13127 return false; 13128 13129 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 13130 if (!ExtOp) 13131 return false; 13132 13133 Index = ExtOp->getZExtValue(); 13134 if (Input && Input != Extract.getOperand(0)) 13135 return false; 13136 13137 if (!Input) 13138 Input = Extract.getOperand(0); 13139 13140 Elems = Elems << 8; 13141 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 13142 Elems |= Index; 13143 13144 return true; 13145 }; 13146 13147 // If the build vector operands aren't sign extended vector extracts, 13148 // of the same input vector, then return. 13149 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13150 if (!isSExtOfVecExtract(N->getOperand(i))) { 13151 return SDValue(); 13152 } 13153 } 13154 13155 // If the vector extract indicies are not correct, add the appropriate 13156 // vector_shuffle. 13157 int TgtElemArrayIdx; 13158 int InputSize = Input.getValueType().getScalarSizeInBits(); 13159 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 13160 if (InputSize + OutputSize == 40) 13161 TgtElemArrayIdx = 0; 13162 else if (InputSize + OutputSize == 72) 13163 TgtElemArrayIdx = 1; 13164 else if (InputSize + OutputSize == 48) 13165 TgtElemArrayIdx = 2; 13166 else if (InputSize + OutputSize == 80) 13167 TgtElemArrayIdx = 3; 13168 else if (InputSize + OutputSize == 96) 13169 TgtElemArrayIdx = 4; 13170 else 13171 return SDValue(); 13172 13173 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 13174 CorrectElems = DAG.getDataLayout().isLittleEndian() 13175 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 13176 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 13177 if (Elems != CorrectElems) { 13178 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 13179 } 13180 13181 // Regular lowering will catch cases where a shuffle is not needed. 13182 return SDValue(); 13183 } 13184 13185 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 13186 DAGCombinerInfo &DCI) const { 13187 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13188 "Should be called with a BUILD_VECTOR node"); 13189 13190 SelectionDAG &DAG = DCI.DAG; 13191 SDLoc dl(N); 13192 13193 if (!Subtarget.hasVSX()) 13194 return SDValue(); 13195 13196 // The target independent DAG combiner will leave a build_vector of 13197 // float-to-int conversions intact. We can generate MUCH better code for 13198 // a float-to-int conversion of a vector of floats. 13199 SDValue FirstInput = N->getOperand(0); 13200 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 13201 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 13202 if (Reduced) 13203 return Reduced; 13204 } 13205 13206 // If we're building a vector out of consecutive loads, just load that 13207 // vector type. 13208 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 13209 if (Reduced) 13210 return Reduced; 13211 13212 // If we're building a vector out of extended elements from another vector 13213 // we have P9 vector integer extend instructions. The code assumes legal 13214 // input types (i.e. it can't handle things like v4i16) so do not run before 13215 // legalization. 13216 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) { 13217 Reduced = combineBVOfVecSExt(N, DAG); 13218 if (Reduced) 13219 return Reduced; 13220 } 13221 13222 13223 if (N->getValueType(0) != MVT::v2f64) 13224 return SDValue(); 13225 13226 // Looking for: 13227 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 13228 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 13229 FirstInput.getOpcode() != ISD::UINT_TO_FP) 13230 return SDValue(); 13231 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 13232 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 13233 return SDValue(); 13234 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 13235 return SDValue(); 13236 13237 SDValue Ext1 = FirstInput.getOperand(0); 13238 SDValue Ext2 = N->getOperand(1).getOperand(0); 13239 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 13240 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13241 return SDValue(); 13242 13243 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 13244 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 13245 if (!Ext1Op || !Ext2Op) 13246 return SDValue(); 13247 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 || 13248 Ext1.getOperand(0) != Ext2.getOperand(0)) 13249 return SDValue(); 13250 13251 int FirstElem = Ext1Op->getZExtValue(); 13252 int SecondElem = Ext2Op->getZExtValue(); 13253 int SubvecIdx; 13254 if (FirstElem == 0 && SecondElem == 1) 13255 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 13256 else if (FirstElem == 2 && SecondElem == 3) 13257 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 13258 else 13259 return SDValue(); 13260 13261 SDValue SrcVec = Ext1.getOperand(0); 13262 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 13263 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 13264 return DAG.getNode(NodeType, dl, MVT::v2f64, 13265 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 13266 } 13267 13268 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 13269 DAGCombinerInfo &DCI) const { 13270 assert((N->getOpcode() == ISD::SINT_TO_FP || 13271 N->getOpcode() == ISD::UINT_TO_FP) && 13272 "Need an int -> FP conversion node here"); 13273 13274 if (useSoftFloat() || !Subtarget.has64BitSupport()) 13275 return SDValue(); 13276 13277 SelectionDAG &DAG = DCI.DAG; 13278 SDLoc dl(N); 13279 SDValue Op(N, 0); 13280 13281 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 13282 // from the hardware. 13283 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 13284 return SDValue(); 13285 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 13286 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 13287 return SDValue(); 13288 13289 SDValue FirstOperand(Op.getOperand(0)); 13290 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 13291 (FirstOperand.getValueType() == MVT::i8 || 13292 FirstOperand.getValueType() == MVT::i16); 13293 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 13294 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 13295 bool DstDouble = Op.getValueType() == MVT::f64; 13296 unsigned ConvOp = Signed ? 13297 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 13298 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 13299 SDValue WidthConst = 13300 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 13301 dl, false); 13302 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 13303 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 13304 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 13305 DAG.getVTList(MVT::f64, MVT::Other), 13306 Ops, MVT::i8, LDN->getMemOperand()); 13307 13308 // For signed conversion, we need to sign-extend the value in the VSR 13309 if (Signed) { 13310 SDValue ExtOps[] = { Ld, WidthConst }; 13311 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 13312 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 13313 } else 13314 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 13315 } 13316 13317 13318 // For i32 intermediate values, unfortunately, the conversion functions 13319 // leave the upper 32 bits of the value are undefined. Within the set of 13320 // scalar instructions, we have no method for zero- or sign-extending the 13321 // value. Thus, we cannot handle i32 intermediate values here. 13322 if (Op.getOperand(0).getValueType() == MVT::i32) 13323 return SDValue(); 13324 13325 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 13326 "UINT_TO_FP is supported only with FPCVT"); 13327 13328 // If we have FCFIDS, then use it when converting to single-precision. 13329 // Otherwise, convert to double-precision and then round. 13330 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13331 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 13332 : PPCISD::FCFIDS) 13333 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 13334 : PPCISD::FCFID); 13335 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13336 ? MVT::f32 13337 : MVT::f64; 13338 13339 // If we're converting from a float, to an int, and back to a float again, 13340 // then we don't need the store/load pair at all. 13341 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 13342 Subtarget.hasFPCVT()) || 13343 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 13344 SDValue Src = Op.getOperand(0).getOperand(0); 13345 if (Src.getValueType() == MVT::f32) { 13346 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 13347 DCI.AddToWorklist(Src.getNode()); 13348 } else if (Src.getValueType() != MVT::f64) { 13349 // Make sure that we don't pick up a ppc_fp128 source value. 13350 return SDValue(); 13351 } 13352 13353 unsigned FCTOp = 13354 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 13355 PPCISD::FCTIDUZ; 13356 13357 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 13358 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 13359 13360 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 13361 FP = DAG.getNode(ISD::FP_ROUND, dl, 13362 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 13363 DCI.AddToWorklist(FP.getNode()); 13364 } 13365 13366 return FP; 13367 } 13368 13369 return SDValue(); 13370 } 13371 13372 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 13373 // builtins) into loads with swaps. 13374 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 13375 DAGCombinerInfo &DCI) const { 13376 SelectionDAG &DAG = DCI.DAG; 13377 SDLoc dl(N); 13378 SDValue Chain; 13379 SDValue Base; 13380 MachineMemOperand *MMO; 13381 13382 switch (N->getOpcode()) { 13383 default: 13384 llvm_unreachable("Unexpected opcode for little endian VSX load"); 13385 case ISD::LOAD: { 13386 LoadSDNode *LD = cast<LoadSDNode>(N); 13387 Chain = LD->getChain(); 13388 Base = LD->getBasePtr(); 13389 MMO = LD->getMemOperand(); 13390 // If the MMO suggests this isn't a load of a full vector, leave 13391 // things alone. For a built-in, we have to make the change for 13392 // correctness, so if there is a size problem that will be a bug. 13393 if (MMO->getSize() < 16) 13394 return SDValue(); 13395 break; 13396 } 13397 case ISD::INTRINSIC_W_CHAIN: { 13398 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13399 Chain = Intrin->getChain(); 13400 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 13401 // us what we want. Get operand 2 instead. 13402 Base = Intrin->getOperand(2); 13403 MMO = Intrin->getMemOperand(); 13404 break; 13405 } 13406 } 13407 13408 MVT VecTy = N->getValueType(0).getSimpleVT(); 13409 13410 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 13411 // aligned and the type is a vector with elements up to 4 bytes 13412 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 13413 && VecTy.getScalarSizeInBits() <= 32 ) { 13414 return SDValue(); 13415 } 13416 13417 SDValue LoadOps[] = { Chain, Base }; 13418 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 13419 DAG.getVTList(MVT::v2f64, MVT::Other), 13420 LoadOps, MVT::v2f64, MMO); 13421 13422 DCI.AddToWorklist(Load.getNode()); 13423 Chain = Load.getValue(1); 13424 SDValue Swap = DAG.getNode( 13425 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 13426 DCI.AddToWorklist(Swap.getNode()); 13427 13428 // Add a bitcast if the resulting load type doesn't match v2f64. 13429 if (VecTy != MVT::v2f64) { 13430 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 13431 DCI.AddToWorklist(N.getNode()); 13432 // Package {bitcast value, swap's chain} to match Load's shape. 13433 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 13434 N, Swap.getValue(1)); 13435 } 13436 13437 return Swap; 13438 } 13439 13440 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 13441 // builtins) into stores with swaps. 13442 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 13443 DAGCombinerInfo &DCI) const { 13444 SelectionDAG &DAG = DCI.DAG; 13445 SDLoc dl(N); 13446 SDValue Chain; 13447 SDValue Base; 13448 unsigned SrcOpnd; 13449 MachineMemOperand *MMO; 13450 13451 switch (N->getOpcode()) { 13452 default: 13453 llvm_unreachable("Unexpected opcode for little endian VSX store"); 13454 case ISD::STORE: { 13455 StoreSDNode *ST = cast<StoreSDNode>(N); 13456 Chain = ST->getChain(); 13457 Base = ST->getBasePtr(); 13458 MMO = ST->getMemOperand(); 13459 SrcOpnd = 1; 13460 // If the MMO suggests this isn't a store of a full vector, leave 13461 // things alone. For a built-in, we have to make the change for 13462 // correctness, so if there is a size problem that will be a bug. 13463 if (MMO->getSize() < 16) 13464 return SDValue(); 13465 break; 13466 } 13467 case ISD::INTRINSIC_VOID: { 13468 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13469 Chain = Intrin->getChain(); 13470 // Intrin->getBasePtr() oddly does not get what we want. 13471 Base = Intrin->getOperand(3); 13472 MMO = Intrin->getMemOperand(); 13473 SrcOpnd = 2; 13474 break; 13475 } 13476 } 13477 13478 SDValue Src = N->getOperand(SrcOpnd); 13479 MVT VecTy = Src.getValueType().getSimpleVT(); 13480 13481 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 13482 // aligned and the type is a vector with elements up to 4 bytes 13483 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 13484 && VecTy.getScalarSizeInBits() <= 32 ) { 13485 return SDValue(); 13486 } 13487 13488 // All stores are done as v2f64 and possible bit cast. 13489 if (VecTy != MVT::v2f64) { 13490 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 13491 DCI.AddToWorklist(Src.getNode()); 13492 } 13493 13494 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 13495 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 13496 DCI.AddToWorklist(Swap.getNode()); 13497 Chain = Swap.getValue(1); 13498 SDValue StoreOps[] = { Chain, Swap, Base }; 13499 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 13500 DAG.getVTList(MVT::Other), 13501 StoreOps, VecTy, MMO); 13502 DCI.AddToWorklist(Store.getNode()); 13503 return Store; 13504 } 13505 13506 // Handle DAG combine for STORE (FP_TO_INT F). 13507 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 13508 DAGCombinerInfo &DCI) const { 13509 13510 SelectionDAG &DAG = DCI.DAG; 13511 SDLoc dl(N); 13512 unsigned Opcode = N->getOperand(1).getOpcode(); 13513 13514 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 13515 && "Not a FP_TO_INT Instruction!"); 13516 13517 SDValue Val = N->getOperand(1).getOperand(0); 13518 EVT Op1VT = N->getOperand(1).getValueType(); 13519 EVT ResVT = Val.getValueType(); 13520 13521 // Floating point types smaller than 32 bits are not legal on Power. 13522 if (ResVT.getScalarSizeInBits() < 32) 13523 return SDValue(); 13524 13525 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 13526 bool ValidTypeForStoreFltAsInt = 13527 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 13528 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 13529 13530 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() || 13531 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 13532 return SDValue(); 13533 13534 // Extend f32 values to f64 13535 if (ResVT.getScalarSizeInBits() == 32) { 13536 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 13537 DCI.AddToWorklist(Val.getNode()); 13538 } 13539 13540 // Set signed or unsigned conversion opcode. 13541 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 13542 PPCISD::FP_TO_SINT_IN_VSR : 13543 PPCISD::FP_TO_UINT_IN_VSR; 13544 13545 Val = DAG.getNode(ConvOpcode, 13546 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 13547 DCI.AddToWorklist(Val.getNode()); 13548 13549 // Set number of bytes being converted. 13550 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 13551 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 13552 DAG.getIntPtrConstant(ByteSize, dl, false), 13553 DAG.getValueType(Op1VT) }; 13554 13555 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 13556 DAG.getVTList(MVT::Other), Ops, 13557 cast<StoreSDNode>(N)->getMemoryVT(), 13558 cast<StoreSDNode>(N)->getMemOperand()); 13559 13560 DCI.AddToWorklist(Val.getNode()); 13561 return Val; 13562 } 13563 13564 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN, 13565 LSBaseSDNode *LSBase, 13566 DAGCombinerInfo &DCI) const { 13567 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) && 13568 "Not a reverse memop pattern!"); 13569 13570 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool { 13571 auto Mask = SVN->getMask(); 13572 int i = 0; 13573 auto I = Mask.rbegin(); 13574 auto E = Mask.rend(); 13575 13576 for (; I != E; ++I) { 13577 if (*I != i) 13578 return false; 13579 i++; 13580 } 13581 return true; 13582 }; 13583 13584 SelectionDAG &DAG = DCI.DAG; 13585 EVT VT = SVN->getValueType(0); 13586 13587 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX()) 13588 return SDValue(); 13589 13590 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order. 13591 // See comment in PPCVSXSwapRemoval.cpp. 13592 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it. 13593 if (!Subtarget.hasP9Vector()) 13594 return SDValue(); 13595 13596 if(!IsElementReverse(SVN)) 13597 return SDValue(); 13598 13599 if (LSBase->getOpcode() == ISD::LOAD) { 13600 SDLoc dl(SVN); 13601 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()}; 13602 return DAG.getMemIntrinsicNode( 13603 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps, 13604 LSBase->getMemoryVT(), LSBase->getMemOperand()); 13605 } 13606 13607 if (LSBase->getOpcode() == ISD::STORE) { 13608 SDLoc dl(LSBase); 13609 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0), 13610 LSBase->getBasePtr()}; 13611 return DAG.getMemIntrinsicNode( 13612 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps, 13613 LSBase->getMemoryVT(), LSBase->getMemOperand()); 13614 } 13615 13616 llvm_unreachable("Expected a load or store node here"); 13617 } 13618 13619 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 13620 DAGCombinerInfo &DCI) const { 13621 SelectionDAG &DAG = DCI.DAG; 13622 SDLoc dl(N); 13623 switch (N->getOpcode()) { 13624 default: break; 13625 case ISD::ADD: 13626 return combineADD(N, DCI); 13627 case ISD::SHL: 13628 return combineSHL(N, DCI); 13629 case ISD::SRA: 13630 return combineSRA(N, DCI); 13631 case ISD::SRL: 13632 return combineSRL(N, DCI); 13633 case ISD::MUL: 13634 return combineMUL(N, DCI); 13635 case PPCISD::SHL: 13636 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 13637 return N->getOperand(0); 13638 break; 13639 case PPCISD::SRL: 13640 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 13641 return N->getOperand(0); 13642 break; 13643 case PPCISD::SRA: 13644 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 13645 if (C->isNullValue() || // 0 >>s V -> 0. 13646 C->isAllOnesValue()) // -1 >>s V -> -1. 13647 return N->getOperand(0); 13648 } 13649 break; 13650 case ISD::SIGN_EXTEND: 13651 case ISD::ZERO_EXTEND: 13652 case ISD::ANY_EXTEND: 13653 return DAGCombineExtBoolTrunc(N, DCI); 13654 case ISD::TRUNCATE: 13655 return combineTRUNCATE(N, DCI); 13656 case ISD::SETCC: 13657 if (SDValue CSCC = combineSetCC(N, DCI)) 13658 return CSCC; 13659 LLVM_FALLTHROUGH; 13660 case ISD::SELECT_CC: 13661 return DAGCombineTruncBoolExt(N, DCI); 13662 case ISD::SINT_TO_FP: 13663 case ISD::UINT_TO_FP: 13664 return combineFPToIntToFP(N, DCI); 13665 case ISD::VECTOR_SHUFFLE: 13666 if (ISD::isNormalLoad(N->getOperand(0).getNode())) { 13667 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0)); 13668 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI); 13669 } 13670 break; 13671 case ISD::STORE: { 13672 13673 EVT Op1VT = N->getOperand(1).getValueType(); 13674 unsigned Opcode = N->getOperand(1).getOpcode(); 13675 13676 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 13677 SDValue Val= combineStoreFPToInt(N, DCI); 13678 if (Val) 13679 return Val; 13680 } 13681 13682 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) { 13683 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1)); 13684 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI); 13685 if (Val) 13686 return Val; 13687 } 13688 13689 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 13690 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 13691 N->getOperand(1).getNode()->hasOneUse() && 13692 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 13693 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 13694 13695 // STBRX can only handle simple types and it makes no sense to store less 13696 // two bytes in byte-reversed order. 13697 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 13698 if (mVT.isExtended() || mVT.getSizeInBits() < 16) 13699 break; 13700 13701 SDValue BSwapOp = N->getOperand(1).getOperand(0); 13702 // Do an any-extend to 32-bits if this is a half-word input. 13703 if (BSwapOp.getValueType() == MVT::i16) 13704 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 13705 13706 // If the type of BSWAP operand is wider than stored memory width 13707 // it need to be shifted to the right side before STBRX. 13708 if (Op1VT.bitsGT(mVT)) { 13709 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 13710 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 13711 DAG.getConstant(Shift, dl, MVT::i32)); 13712 // Need to truncate if this is a bswap of i64 stored as i32/i16. 13713 if (Op1VT == MVT::i64) 13714 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 13715 } 13716 13717 SDValue Ops[] = { 13718 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 13719 }; 13720 return 13721 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 13722 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 13723 cast<StoreSDNode>(N)->getMemOperand()); 13724 } 13725 13726 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 13727 // So it can increase the chance of CSE constant construction. 13728 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 13729 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 13730 // Need to sign-extended to 64-bits to handle negative values. 13731 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 13732 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 13733 MemVT.getSizeInBits()); 13734 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 13735 13736 // DAG.getTruncStore() can't be used here because it doesn't accept 13737 // the general (base + offset) addressing mode. 13738 // So we use UpdateNodeOperands and setTruncatingStore instead. 13739 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 13740 N->getOperand(3)); 13741 cast<StoreSDNode>(N)->setTruncatingStore(true); 13742 return SDValue(N, 0); 13743 } 13744 13745 // For little endian, VSX stores require generating xxswapd/lxvd2x. 13746 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 13747 if (Op1VT.isSimple()) { 13748 MVT StoreVT = Op1VT.getSimpleVT(); 13749 if (Subtarget.needsSwapsForVSXMemOps() && 13750 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 13751 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 13752 return expandVSXStoreForLE(N, DCI); 13753 } 13754 break; 13755 } 13756 case ISD::LOAD: { 13757 LoadSDNode *LD = cast<LoadSDNode>(N); 13758 EVT VT = LD->getValueType(0); 13759 13760 // For little endian, VSX loads require generating lxvd2x/xxswapd. 13761 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 13762 if (VT.isSimple()) { 13763 MVT LoadVT = VT.getSimpleVT(); 13764 if (Subtarget.needsSwapsForVSXMemOps() && 13765 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 13766 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 13767 return expandVSXLoadForLE(N, DCI); 13768 } 13769 13770 // We sometimes end up with a 64-bit integer load, from which we extract 13771 // two single-precision floating-point numbers. This happens with 13772 // std::complex<float>, and other similar structures, because of the way we 13773 // canonicalize structure copies. However, if we lack direct moves, 13774 // then the final bitcasts from the extracted integer values to the 13775 // floating-point numbers turn into store/load pairs. Even with direct moves, 13776 // just loading the two floating-point numbers is likely better. 13777 auto ReplaceTwoFloatLoad = [&]() { 13778 if (VT != MVT::i64) 13779 return false; 13780 13781 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 13782 LD->isVolatile()) 13783 return false; 13784 13785 // We're looking for a sequence like this: 13786 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 13787 // t16: i64 = srl t13, Constant:i32<32> 13788 // t17: i32 = truncate t16 13789 // t18: f32 = bitcast t17 13790 // t19: i32 = truncate t13 13791 // t20: f32 = bitcast t19 13792 13793 if (!LD->hasNUsesOfValue(2, 0)) 13794 return false; 13795 13796 auto UI = LD->use_begin(); 13797 while (UI.getUse().getResNo() != 0) ++UI; 13798 SDNode *Trunc = *UI++; 13799 while (UI.getUse().getResNo() != 0) ++UI; 13800 SDNode *RightShift = *UI; 13801 if (Trunc->getOpcode() != ISD::TRUNCATE) 13802 std::swap(Trunc, RightShift); 13803 13804 if (Trunc->getOpcode() != ISD::TRUNCATE || 13805 Trunc->getValueType(0) != MVT::i32 || 13806 !Trunc->hasOneUse()) 13807 return false; 13808 if (RightShift->getOpcode() != ISD::SRL || 13809 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 13810 RightShift->getConstantOperandVal(1) != 32 || 13811 !RightShift->hasOneUse()) 13812 return false; 13813 13814 SDNode *Trunc2 = *RightShift->use_begin(); 13815 if (Trunc2->getOpcode() != ISD::TRUNCATE || 13816 Trunc2->getValueType(0) != MVT::i32 || 13817 !Trunc2->hasOneUse()) 13818 return false; 13819 13820 SDNode *Bitcast = *Trunc->use_begin(); 13821 SDNode *Bitcast2 = *Trunc2->use_begin(); 13822 13823 if (Bitcast->getOpcode() != ISD::BITCAST || 13824 Bitcast->getValueType(0) != MVT::f32) 13825 return false; 13826 if (Bitcast2->getOpcode() != ISD::BITCAST || 13827 Bitcast2->getValueType(0) != MVT::f32) 13828 return false; 13829 13830 if (Subtarget.isLittleEndian()) 13831 std::swap(Bitcast, Bitcast2); 13832 13833 // Bitcast has the second float (in memory-layout order) and Bitcast2 13834 // has the first one. 13835 13836 SDValue BasePtr = LD->getBasePtr(); 13837 if (LD->isIndexed()) { 13838 assert(LD->getAddressingMode() == ISD::PRE_INC && 13839 "Non-pre-inc AM on PPC?"); 13840 BasePtr = 13841 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 13842 LD->getOffset()); 13843 } 13844 13845 auto MMOFlags = 13846 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 13847 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 13848 LD->getPointerInfo(), LD->getAlignment(), 13849 MMOFlags, LD->getAAInfo()); 13850 SDValue AddPtr = 13851 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 13852 BasePtr, DAG.getIntPtrConstant(4, dl)); 13853 SDValue FloatLoad2 = DAG.getLoad( 13854 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 13855 LD->getPointerInfo().getWithOffset(4), 13856 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 13857 13858 if (LD->isIndexed()) { 13859 // Note that DAGCombine should re-form any pre-increment load(s) from 13860 // what is produced here if that makes sense. 13861 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 13862 } 13863 13864 DCI.CombineTo(Bitcast2, FloatLoad); 13865 DCI.CombineTo(Bitcast, FloatLoad2); 13866 13867 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 13868 SDValue(FloatLoad2.getNode(), 1)); 13869 return true; 13870 }; 13871 13872 if (ReplaceTwoFloatLoad()) 13873 return SDValue(N, 0); 13874 13875 EVT MemVT = LD->getMemoryVT(); 13876 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 13877 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 13878 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 13879 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 13880 if (LD->isUnindexed() && VT.isVector() && 13881 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 13882 // P8 and later hardware should just use LOAD. 13883 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 13884 VT == MVT::v4i32 || VT == MVT::v4f32)) || 13885 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 13886 LD->getAlignment() >= ScalarABIAlignment)) && 13887 LD->getAlignment() < ABIAlignment) { 13888 // This is a type-legal unaligned Altivec or QPX load. 13889 SDValue Chain = LD->getChain(); 13890 SDValue Ptr = LD->getBasePtr(); 13891 bool isLittleEndian = Subtarget.isLittleEndian(); 13892 13893 // This implements the loading of unaligned vectors as described in 13894 // the venerable Apple Velocity Engine overview. Specifically: 13895 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 13896 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 13897 // 13898 // The general idea is to expand a sequence of one or more unaligned 13899 // loads into an alignment-based permutation-control instruction (lvsl 13900 // or lvsr), a series of regular vector loads (which always truncate 13901 // their input address to an aligned address), and a series of 13902 // permutations. The results of these permutations are the requested 13903 // loaded values. The trick is that the last "extra" load is not taken 13904 // from the address you might suspect (sizeof(vector) bytes after the 13905 // last requested load), but rather sizeof(vector) - 1 bytes after the 13906 // last requested vector. The point of this is to avoid a page fault if 13907 // the base address happened to be aligned. This works because if the 13908 // base address is aligned, then adding less than a full vector length 13909 // will cause the last vector in the sequence to be (re)loaded. 13910 // Otherwise, the next vector will be fetched as you might suspect was 13911 // necessary. 13912 13913 // We might be able to reuse the permutation generation from 13914 // a different base address offset from this one by an aligned amount. 13915 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 13916 // optimization later. 13917 Intrinsic::ID Intr, IntrLD, IntrPerm; 13918 MVT PermCntlTy, PermTy, LDTy; 13919 if (Subtarget.hasAltivec()) { 13920 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 13921 Intrinsic::ppc_altivec_lvsl; 13922 IntrLD = Intrinsic::ppc_altivec_lvx; 13923 IntrPerm = Intrinsic::ppc_altivec_vperm; 13924 PermCntlTy = MVT::v16i8; 13925 PermTy = MVT::v4i32; 13926 LDTy = MVT::v4i32; 13927 } else { 13928 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 13929 Intrinsic::ppc_qpx_qvlpcls; 13930 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 13931 Intrinsic::ppc_qpx_qvlfs; 13932 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 13933 PermCntlTy = MVT::v4f64; 13934 PermTy = MVT::v4f64; 13935 LDTy = MemVT.getSimpleVT(); 13936 } 13937 13938 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 13939 13940 // Create the new MMO for the new base load. It is like the original MMO, 13941 // but represents an area in memory almost twice the vector size centered 13942 // on the original address. If the address is unaligned, we might start 13943 // reading up to (sizeof(vector)-1) bytes below the address of the 13944 // original unaligned load. 13945 MachineFunction &MF = DAG.getMachineFunction(); 13946 MachineMemOperand *BaseMMO = 13947 MF.getMachineMemOperand(LD->getMemOperand(), 13948 -(long)MemVT.getStoreSize()+1, 13949 2*MemVT.getStoreSize()-1); 13950 13951 // Create the new base load. 13952 SDValue LDXIntID = 13953 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 13954 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 13955 SDValue BaseLoad = 13956 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 13957 DAG.getVTList(PermTy, MVT::Other), 13958 BaseLoadOps, LDTy, BaseMMO); 13959 13960 // Note that the value of IncOffset (which is provided to the next 13961 // load's pointer info offset value, and thus used to calculate the 13962 // alignment), and the value of IncValue (which is actually used to 13963 // increment the pointer value) are different! This is because we 13964 // require the next load to appear to be aligned, even though it 13965 // is actually offset from the base pointer by a lesser amount. 13966 int IncOffset = VT.getSizeInBits() / 8; 13967 int IncValue = IncOffset; 13968 13969 // Walk (both up and down) the chain looking for another load at the real 13970 // (aligned) offset (the alignment of the other load does not matter in 13971 // this case). If found, then do not use the offset reduction trick, as 13972 // that will prevent the loads from being later combined (as they would 13973 // otherwise be duplicates). 13974 if (!findConsecutiveLoad(LD, DAG)) 13975 --IncValue; 13976 13977 SDValue Increment = 13978 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 13979 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 13980 13981 MachineMemOperand *ExtraMMO = 13982 MF.getMachineMemOperand(LD->getMemOperand(), 13983 1, 2*MemVT.getStoreSize()-1); 13984 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 13985 SDValue ExtraLoad = 13986 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 13987 DAG.getVTList(PermTy, MVT::Other), 13988 ExtraLoadOps, LDTy, ExtraMMO); 13989 13990 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 13991 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 13992 13993 // Because vperm has a big-endian bias, we must reverse the order 13994 // of the input vectors and complement the permute control vector 13995 // when generating little endian code. We have already handled the 13996 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 13997 // and ExtraLoad here. 13998 SDValue Perm; 13999 if (isLittleEndian) 14000 Perm = BuildIntrinsicOp(IntrPerm, 14001 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 14002 else 14003 Perm = BuildIntrinsicOp(IntrPerm, 14004 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 14005 14006 if (VT != PermTy) 14007 Perm = Subtarget.hasAltivec() ? 14008 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 14009 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 14010 DAG.getTargetConstant(1, dl, MVT::i64)); 14011 // second argument is 1 because this rounding 14012 // is always exact. 14013 14014 // The output of the permutation is our loaded result, the TokenFactor is 14015 // our new chain. 14016 DCI.CombineTo(N, Perm, TF); 14017 return SDValue(N, 0); 14018 } 14019 } 14020 break; 14021 case ISD::INTRINSIC_WO_CHAIN: { 14022 bool isLittleEndian = Subtarget.isLittleEndian(); 14023 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 14024 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 14025 : Intrinsic::ppc_altivec_lvsl); 14026 if ((IID == Intr || 14027 IID == Intrinsic::ppc_qpx_qvlpcld || 14028 IID == Intrinsic::ppc_qpx_qvlpcls) && 14029 N->getOperand(1)->getOpcode() == ISD::ADD) { 14030 SDValue Add = N->getOperand(1); 14031 14032 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 14033 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 14034 14035 if (DAG.MaskedValueIsZero(Add->getOperand(1), 14036 APInt::getAllOnesValue(Bits /* alignment */) 14037 .zext(Add.getScalarValueSizeInBits()))) { 14038 SDNode *BasePtr = Add->getOperand(0).getNode(); 14039 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14040 UE = BasePtr->use_end(); 14041 UI != UE; ++UI) { 14042 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14043 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 14044 // We've found another LVSL/LVSR, and this address is an aligned 14045 // multiple of that one. The results will be the same, so use the 14046 // one we've just found instead. 14047 14048 return SDValue(*UI, 0); 14049 } 14050 } 14051 } 14052 14053 if (isa<ConstantSDNode>(Add->getOperand(1))) { 14054 SDNode *BasePtr = Add->getOperand(0).getNode(); 14055 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14056 UE = BasePtr->use_end(); UI != UE; ++UI) { 14057 if (UI->getOpcode() == ISD::ADD && 14058 isa<ConstantSDNode>(UI->getOperand(1)) && 14059 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 14060 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 14061 (1ULL << Bits) == 0) { 14062 SDNode *OtherAdd = *UI; 14063 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 14064 VE = OtherAdd->use_end(); VI != VE; ++VI) { 14065 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14066 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 14067 return SDValue(*VI, 0); 14068 } 14069 } 14070 } 14071 } 14072 } 14073 } 14074 14075 // Combine vmaxsw/h/b(a, a's negation) to abs(a) 14076 // Expose the vabsduw/h/b opportunity for down stream 14077 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() && 14078 (IID == Intrinsic::ppc_altivec_vmaxsw || 14079 IID == Intrinsic::ppc_altivec_vmaxsh || 14080 IID == Intrinsic::ppc_altivec_vmaxsb)) { 14081 SDValue V1 = N->getOperand(1); 14082 SDValue V2 = N->getOperand(2); 14083 if ((V1.getSimpleValueType() == MVT::v4i32 || 14084 V1.getSimpleValueType() == MVT::v8i16 || 14085 V1.getSimpleValueType() == MVT::v16i8) && 14086 V1.getSimpleValueType() == V2.getSimpleValueType()) { 14087 // (0-a, a) 14088 if (V1.getOpcode() == ISD::SUB && 14089 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 14090 V1.getOperand(1) == V2) { 14091 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2); 14092 } 14093 // (a, 0-a) 14094 if (V2.getOpcode() == ISD::SUB && 14095 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 14096 V2.getOperand(1) == V1) { 14097 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14098 } 14099 // (x-y, y-x) 14100 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB && 14101 V1.getOperand(0) == V2.getOperand(1) && 14102 V1.getOperand(1) == V2.getOperand(0)) { 14103 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14104 } 14105 } 14106 } 14107 } 14108 14109 break; 14110 case ISD::INTRINSIC_W_CHAIN: 14111 // For little endian, VSX loads require generating lxvd2x/xxswapd. 14112 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 14113 if (Subtarget.needsSwapsForVSXMemOps()) { 14114 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14115 default: 14116 break; 14117 case Intrinsic::ppc_vsx_lxvw4x: 14118 case Intrinsic::ppc_vsx_lxvd2x: 14119 return expandVSXLoadForLE(N, DCI); 14120 } 14121 } 14122 break; 14123 case ISD::INTRINSIC_VOID: 14124 // For little endian, VSX stores require generating xxswapd/stxvd2x. 14125 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 14126 if (Subtarget.needsSwapsForVSXMemOps()) { 14127 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14128 default: 14129 break; 14130 case Intrinsic::ppc_vsx_stxvw4x: 14131 case Intrinsic::ppc_vsx_stxvd2x: 14132 return expandVSXStoreForLE(N, DCI); 14133 } 14134 } 14135 break; 14136 case ISD::BSWAP: 14137 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 14138 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 14139 N->getOperand(0).hasOneUse() && 14140 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 14141 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 14142 N->getValueType(0) == MVT::i64))) { 14143 SDValue Load = N->getOperand(0); 14144 LoadSDNode *LD = cast<LoadSDNode>(Load); 14145 // Create the byte-swapping load. 14146 SDValue Ops[] = { 14147 LD->getChain(), // Chain 14148 LD->getBasePtr(), // Ptr 14149 DAG.getValueType(N->getValueType(0)) // VT 14150 }; 14151 SDValue BSLoad = 14152 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 14153 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 14154 MVT::i64 : MVT::i32, MVT::Other), 14155 Ops, LD->getMemoryVT(), LD->getMemOperand()); 14156 14157 // If this is an i16 load, insert the truncate. 14158 SDValue ResVal = BSLoad; 14159 if (N->getValueType(0) == MVT::i16) 14160 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 14161 14162 // First, combine the bswap away. This makes the value produced by the 14163 // load dead. 14164 DCI.CombineTo(N, ResVal); 14165 14166 // Next, combine the load away, we give it a bogus result value but a real 14167 // chain result. The result value is dead because the bswap is dead. 14168 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 14169 14170 // Return N so it doesn't get rechecked! 14171 return SDValue(N, 0); 14172 } 14173 break; 14174 case PPCISD::VCMP: 14175 // If a VCMPo node already exists with exactly the same operands as this 14176 // node, use its result instead of this node (VCMPo computes both a CR6 and 14177 // a normal output). 14178 // 14179 if (!N->getOperand(0).hasOneUse() && 14180 !N->getOperand(1).hasOneUse() && 14181 !N->getOperand(2).hasOneUse()) { 14182 14183 // Scan all of the users of the LHS, looking for VCMPo's that match. 14184 SDNode *VCMPoNode = nullptr; 14185 14186 SDNode *LHSN = N->getOperand(0).getNode(); 14187 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 14188 UI != E; ++UI) 14189 if (UI->getOpcode() == PPCISD::VCMPo && 14190 UI->getOperand(1) == N->getOperand(1) && 14191 UI->getOperand(2) == N->getOperand(2) && 14192 UI->getOperand(0) == N->getOperand(0)) { 14193 VCMPoNode = *UI; 14194 break; 14195 } 14196 14197 // If there is no VCMPo node, or if the flag value has a single use, don't 14198 // transform this. 14199 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 14200 break; 14201 14202 // Look at the (necessarily single) use of the flag value. If it has a 14203 // chain, this transformation is more complex. Note that multiple things 14204 // could use the value result, which we should ignore. 14205 SDNode *FlagUser = nullptr; 14206 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 14207 FlagUser == nullptr; ++UI) { 14208 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 14209 SDNode *User = *UI; 14210 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 14211 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 14212 FlagUser = User; 14213 break; 14214 } 14215 } 14216 } 14217 14218 // If the user is a MFOCRF instruction, we know this is safe. 14219 // Otherwise we give up for right now. 14220 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 14221 return SDValue(VCMPoNode, 0); 14222 } 14223 break; 14224 case ISD::BRCOND: { 14225 SDValue Cond = N->getOperand(1); 14226 SDValue Target = N->getOperand(2); 14227 14228 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14229 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 14230 Intrinsic::loop_decrement) { 14231 14232 // We now need to make the intrinsic dead (it cannot be instruction 14233 // selected). 14234 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 14235 assert(Cond.getNode()->hasOneUse() && 14236 "Counter decrement has more than one use"); 14237 14238 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 14239 N->getOperand(0), Target); 14240 } 14241 } 14242 break; 14243 case ISD::BR_CC: { 14244 // If this is a branch on an altivec predicate comparison, lower this so 14245 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 14246 // lowering is done pre-legalize, because the legalizer lowers the predicate 14247 // compare down to code that is difficult to reassemble. 14248 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 14249 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 14250 14251 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 14252 // value. If so, pass-through the AND to get to the intrinsic. 14253 if (LHS.getOpcode() == ISD::AND && 14254 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 14255 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 14256 Intrinsic::loop_decrement && 14257 isa<ConstantSDNode>(LHS.getOperand(1)) && 14258 !isNullConstant(LHS.getOperand(1))) 14259 LHS = LHS.getOperand(0); 14260 14261 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14262 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 14263 Intrinsic::loop_decrement && 14264 isa<ConstantSDNode>(RHS)) { 14265 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 14266 "Counter decrement comparison is not EQ or NE"); 14267 14268 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14269 bool isBDNZ = (CC == ISD::SETEQ && Val) || 14270 (CC == ISD::SETNE && !Val); 14271 14272 // We now need to make the intrinsic dead (it cannot be instruction 14273 // selected). 14274 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 14275 assert(LHS.getNode()->hasOneUse() && 14276 "Counter decrement has more than one use"); 14277 14278 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 14279 N->getOperand(0), N->getOperand(4)); 14280 } 14281 14282 int CompareOpc; 14283 bool isDot; 14284 14285 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14286 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 14287 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 14288 assert(isDot && "Can't compare against a vector result!"); 14289 14290 // If this is a comparison against something other than 0/1, then we know 14291 // that the condition is never/always true. 14292 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14293 if (Val != 0 && Val != 1) { 14294 if (CC == ISD::SETEQ) // Cond never true, remove branch. 14295 return N->getOperand(0); 14296 // Always !=, turn it into an unconditional branch. 14297 return DAG.getNode(ISD::BR, dl, MVT::Other, 14298 N->getOperand(0), N->getOperand(4)); 14299 } 14300 14301 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 14302 14303 // Create the PPCISD altivec 'dot' comparison node. 14304 SDValue Ops[] = { 14305 LHS.getOperand(2), // LHS of compare 14306 LHS.getOperand(3), // RHS of compare 14307 DAG.getConstant(CompareOpc, dl, MVT::i32) 14308 }; 14309 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 14310 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 14311 14312 // Unpack the result based on how the target uses it. 14313 PPC::Predicate CompOpc; 14314 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 14315 default: // Can't happen, don't crash on invalid number though. 14316 case 0: // Branch on the value of the EQ bit of CR6. 14317 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 14318 break; 14319 case 1: // Branch on the inverted value of the EQ bit of CR6. 14320 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 14321 break; 14322 case 2: // Branch on the value of the LT bit of CR6. 14323 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 14324 break; 14325 case 3: // Branch on the inverted value of the LT bit of CR6. 14326 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 14327 break; 14328 } 14329 14330 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 14331 DAG.getConstant(CompOpc, dl, MVT::i32), 14332 DAG.getRegister(PPC::CR6, MVT::i32), 14333 N->getOperand(4), CompNode.getValue(1)); 14334 } 14335 break; 14336 } 14337 case ISD::BUILD_VECTOR: 14338 return DAGCombineBuildVector(N, DCI); 14339 case ISD::ABS: 14340 return combineABS(N, DCI); 14341 case ISD::VSELECT: 14342 return combineVSelect(N, DCI); 14343 } 14344 14345 return SDValue(); 14346 } 14347 14348 SDValue 14349 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 14350 SelectionDAG &DAG, 14351 SmallVectorImpl<SDNode *> &Created) const { 14352 // fold (sdiv X, pow2) 14353 EVT VT = N->getValueType(0); 14354 if (VT == MVT::i64 && !Subtarget.isPPC64()) 14355 return SDValue(); 14356 if ((VT != MVT::i32 && VT != MVT::i64) || 14357 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 14358 return SDValue(); 14359 14360 SDLoc DL(N); 14361 SDValue N0 = N->getOperand(0); 14362 14363 bool IsNegPow2 = (-Divisor).isPowerOf2(); 14364 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 14365 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 14366 14367 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 14368 Created.push_back(Op.getNode()); 14369 14370 if (IsNegPow2) { 14371 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 14372 Created.push_back(Op.getNode()); 14373 } 14374 14375 return Op; 14376 } 14377 14378 //===----------------------------------------------------------------------===// 14379 // Inline Assembly Support 14380 //===----------------------------------------------------------------------===// 14381 14382 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 14383 KnownBits &Known, 14384 const APInt &DemandedElts, 14385 const SelectionDAG &DAG, 14386 unsigned Depth) const { 14387 Known.resetAll(); 14388 switch (Op.getOpcode()) { 14389 default: break; 14390 case PPCISD::LBRX: { 14391 // lhbrx is known to have the top bits cleared out. 14392 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 14393 Known.Zero = 0xFFFF0000; 14394 break; 14395 } 14396 case ISD::INTRINSIC_WO_CHAIN: { 14397 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 14398 default: break; 14399 case Intrinsic::ppc_altivec_vcmpbfp_p: 14400 case Intrinsic::ppc_altivec_vcmpeqfp_p: 14401 case Intrinsic::ppc_altivec_vcmpequb_p: 14402 case Intrinsic::ppc_altivec_vcmpequh_p: 14403 case Intrinsic::ppc_altivec_vcmpequw_p: 14404 case Intrinsic::ppc_altivec_vcmpequd_p: 14405 case Intrinsic::ppc_altivec_vcmpgefp_p: 14406 case Intrinsic::ppc_altivec_vcmpgtfp_p: 14407 case Intrinsic::ppc_altivec_vcmpgtsb_p: 14408 case Intrinsic::ppc_altivec_vcmpgtsh_p: 14409 case Intrinsic::ppc_altivec_vcmpgtsw_p: 14410 case Intrinsic::ppc_altivec_vcmpgtsd_p: 14411 case Intrinsic::ppc_altivec_vcmpgtub_p: 14412 case Intrinsic::ppc_altivec_vcmpgtuh_p: 14413 case Intrinsic::ppc_altivec_vcmpgtuw_p: 14414 case Intrinsic::ppc_altivec_vcmpgtud_p: 14415 Known.Zero = ~1U; // All bits but the low one are known to be zero. 14416 break; 14417 } 14418 } 14419 } 14420 } 14421 14422 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 14423 switch (Subtarget.getCPUDirective()) { 14424 default: break; 14425 case PPC::DIR_970: 14426 case PPC::DIR_PWR4: 14427 case PPC::DIR_PWR5: 14428 case PPC::DIR_PWR5X: 14429 case PPC::DIR_PWR6: 14430 case PPC::DIR_PWR6X: 14431 case PPC::DIR_PWR7: 14432 case PPC::DIR_PWR8: 14433 case PPC::DIR_PWR9: 14434 case PPC::DIR_PWR_FUTURE: { 14435 if (!ML) 14436 break; 14437 14438 if (!DisableInnermostLoopAlign32) { 14439 // If the nested loop is an innermost loop, prefer to a 32-byte alignment, 14440 // so that we can decrease cache misses and branch-prediction misses. 14441 // Actual alignment of the loop will depend on the hotness check and other 14442 // logic in alignBlocks. 14443 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) 14444 return Align(32); 14445 } 14446 14447 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 14448 14449 // For small loops (between 5 and 8 instructions), align to a 32-byte 14450 // boundary so that the entire loop fits in one instruction-cache line. 14451 uint64_t LoopSize = 0; 14452 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 14453 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 14454 LoopSize += TII->getInstSizeInBytes(*J); 14455 if (LoopSize > 32) 14456 break; 14457 } 14458 14459 if (LoopSize > 16 && LoopSize <= 32) 14460 return Align(32); 14461 14462 break; 14463 } 14464 } 14465 14466 return TargetLowering::getPrefLoopAlignment(ML); 14467 } 14468 14469 /// getConstraintType - Given a constraint, return the type of 14470 /// constraint it is for this target. 14471 PPCTargetLowering::ConstraintType 14472 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 14473 if (Constraint.size() == 1) { 14474 switch (Constraint[0]) { 14475 default: break; 14476 case 'b': 14477 case 'r': 14478 case 'f': 14479 case 'd': 14480 case 'v': 14481 case 'y': 14482 return C_RegisterClass; 14483 case 'Z': 14484 // FIXME: While Z does indicate a memory constraint, it specifically 14485 // indicates an r+r address (used in conjunction with the 'y' modifier 14486 // in the replacement string). Currently, we're forcing the base 14487 // register to be r0 in the asm printer (which is interpreted as zero) 14488 // and forming the complete address in the second register. This is 14489 // suboptimal. 14490 return C_Memory; 14491 } 14492 } else if (Constraint == "wc") { // individual CR bits. 14493 return C_RegisterClass; 14494 } else if (Constraint == "wa" || Constraint == "wd" || 14495 Constraint == "wf" || Constraint == "ws" || 14496 Constraint == "wi" || Constraint == "ww") { 14497 return C_RegisterClass; // VSX registers. 14498 } 14499 return TargetLowering::getConstraintType(Constraint); 14500 } 14501 14502 /// Examine constraint type and operand type and determine a weight value. 14503 /// This object must already have been set up with the operand type 14504 /// and the current alternative constraint selected. 14505 TargetLowering::ConstraintWeight 14506 PPCTargetLowering::getSingleConstraintMatchWeight( 14507 AsmOperandInfo &info, const char *constraint) const { 14508 ConstraintWeight weight = CW_Invalid; 14509 Value *CallOperandVal = info.CallOperandVal; 14510 // If we don't have a value, we can't do a match, 14511 // but allow it at the lowest weight. 14512 if (!CallOperandVal) 14513 return CW_Default; 14514 Type *type = CallOperandVal->getType(); 14515 14516 // Look at the constraint type. 14517 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 14518 return CW_Register; // an individual CR bit. 14519 else if ((StringRef(constraint) == "wa" || 14520 StringRef(constraint) == "wd" || 14521 StringRef(constraint) == "wf") && 14522 type->isVectorTy()) 14523 return CW_Register; 14524 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64)) 14525 return CW_Register; // just hold 64-bit integers data. 14526 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 14527 return CW_Register; 14528 else if (StringRef(constraint) == "ww" && type->isFloatTy()) 14529 return CW_Register; 14530 14531 switch (*constraint) { 14532 default: 14533 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 14534 break; 14535 case 'b': 14536 if (type->isIntegerTy()) 14537 weight = CW_Register; 14538 break; 14539 case 'f': 14540 if (type->isFloatTy()) 14541 weight = CW_Register; 14542 break; 14543 case 'd': 14544 if (type->isDoubleTy()) 14545 weight = CW_Register; 14546 break; 14547 case 'v': 14548 if (type->isVectorTy()) 14549 weight = CW_Register; 14550 break; 14551 case 'y': 14552 weight = CW_Register; 14553 break; 14554 case 'Z': 14555 weight = CW_Memory; 14556 break; 14557 } 14558 return weight; 14559 } 14560 14561 std::pair<unsigned, const TargetRegisterClass *> 14562 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 14563 StringRef Constraint, 14564 MVT VT) const { 14565 if (Constraint.size() == 1) { 14566 // GCC RS6000 Constraint Letters 14567 switch (Constraint[0]) { 14568 case 'b': // R1-R31 14569 if (VT == MVT::i64 && Subtarget.isPPC64()) 14570 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 14571 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 14572 case 'r': // R0-R31 14573 if (VT == MVT::i64 && Subtarget.isPPC64()) 14574 return std::make_pair(0U, &PPC::G8RCRegClass); 14575 return std::make_pair(0U, &PPC::GPRCRegClass); 14576 // 'd' and 'f' constraints are both defined to be "the floating point 14577 // registers", where one is for 32-bit and the other for 64-bit. We don't 14578 // really care overly much here so just give them all the same reg classes. 14579 case 'd': 14580 case 'f': 14581 if (Subtarget.hasSPE()) { 14582 if (VT == MVT::f32 || VT == MVT::i32) 14583 return std::make_pair(0U, &PPC::GPRCRegClass); 14584 if (VT == MVT::f64 || VT == MVT::i64) 14585 return std::make_pair(0U, &PPC::SPERCRegClass); 14586 } else { 14587 if (VT == MVT::f32 || VT == MVT::i32) 14588 return std::make_pair(0U, &PPC::F4RCRegClass); 14589 if (VT == MVT::f64 || VT == MVT::i64) 14590 return std::make_pair(0U, &PPC::F8RCRegClass); 14591 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 14592 return std::make_pair(0U, &PPC::QFRCRegClass); 14593 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 14594 return std::make_pair(0U, &PPC::QSRCRegClass); 14595 } 14596 break; 14597 case 'v': 14598 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 14599 return std::make_pair(0U, &PPC::QFRCRegClass); 14600 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 14601 return std::make_pair(0U, &PPC::QSRCRegClass); 14602 if (Subtarget.hasAltivec()) 14603 return std::make_pair(0U, &PPC::VRRCRegClass); 14604 break; 14605 case 'y': // crrc 14606 return std::make_pair(0U, &PPC::CRRCRegClass); 14607 } 14608 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 14609 // An individual CR bit. 14610 return std::make_pair(0U, &PPC::CRBITRCRegClass); 14611 } else if ((Constraint == "wa" || Constraint == "wd" || 14612 Constraint == "wf" || Constraint == "wi") && 14613 Subtarget.hasVSX()) { 14614 return std::make_pair(0U, &PPC::VSRCRegClass); 14615 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) { 14616 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 14617 return std::make_pair(0U, &PPC::VSSRCRegClass); 14618 else 14619 return std::make_pair(0U, &PPC::VSFRCRegClass); 14620 } 14621 14622 // If we name a VSX register, we can't defer to the base class because it 14623 // will not recognize the correct register (their names will be VSL{0-31} 14624 // and V{0-31} so they won't match). So we match them here. 14625 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') { 14626 int VSNum = atoi(Constraint.data() + 3); 14627 assert(VSNum >= 0 && VSNum <= 63 && 14628 "Attempted to access a vsr out of range"); 14629 if (VSNum < 32) 14630 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass); 14631 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass); 14632 } 14633 std::pair<unsigned, const TargetRegisterClass *> R = 14634 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 14635 14636 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 14637 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 14638 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 14639 // register. 14640 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 14641 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 14642 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 14643 PPC::GPRCRegClass.contains(R.first)) 14644 return std::make_pair(TRI->getMatchingSuperReg(R.first, 14645 PPC::sub_32, &PPC::G8RCRegClass), 14646 &PPC::G8RCRegClass); 14647 14648 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 14649 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 14650 R.first = PPC::CR0; 14651 R.second = &PPC::CRRCRegClass; 14652 } 14653 14654 return R; 14655 } 14656 14657 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 14658 /// vector. If it is invalid, don't add anything to Ops. 14659 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 14660 std::string &Constraint, 14661 std::vector<SDValue>&Ops, 14662 SelectionDAG &DAG) const { 14663 SDValue Result; 14664 14665 // Only support length 1 constraints. 14666 if (Constraint.length() > 1) return; 14667 14668 char Letter = Constraint[0]; 14669 switch (Letter) { 14670 default: break; 14671 case 'I': 14672 case 'J': 14673 case 'K': 14674 case 'L': 14675 case 'M': 14676 case 'N': 14677 case 'O': 14678 case 'P': { 14679 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 14680 if (!CST) return; // Must be an immediate to match. 14681 SDLoc dl(Op); 14682 int64_t Value = CST->getSExtValue(); 14683 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 14684 // numbers are printed as such. 14685 switch (Letter) { 14686 default: llvm_unreachable("Unknown constraint letter!"); 14687 case 'I': // "I" is a signed 16-bit constant. 14688 if (isInt<16>(Value)) 14689 Result = DAG.getTargetConstant(Value, dl, TCVT); 14690 break; 14691 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 14692 if (isShiftedUInt<16, 16>(Value)) 14693 Result = DAG.getTargetConstant(Value, dl, TCVT); 14694 break; 14695 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 14696 if (isShiftedInt<16, 16>(Value)) 14697 Result = DAG.getTargetConstant(Value, dl, TCVT); 14698 break; 14699 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 14700 if (isUInt<16>(Value)) 14701 Result = DAG.getTargetConstant(Value, dl, TCVT); 14702 break; 14703 case 'M': // "M" is a constant that is greater than 31. 14704 if (Value > 31) 14705 Result = DAG.getTargetConstant(Value, dl, TCVT); 14706 break; 14707 case 'N': // "N" is a positive constant that is an exact power of two. 14708 if (Value > 0 && isPowerOf2_64(Value)) 14709 Result = DAG.getTargetConstant(Value, dl, TCVT); 14710 break; 14711 case 'O': // "O" is the constant zero. 14712 if (Value == 0) 14713 Result = DAG.getTargetConstant(Value, dl, TCVT); 14714 break; 14715 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 14716 if (isInt<16>(-Value)) 14717 Result = DAG.getTargetConstant(Value, dl, TCVT); 14718 break; 14719 } 14720 break; 14721 } 14722 } 14723 14724 if (Result.getNode()) { 14725 Ops.push_back(Result); 14726 return; 14727 } 14728 14729 // Handle standard constraint letters. 14730 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 14731 } 14732 14733 // isLegalAddressingMode - Return true if the addressing mode represented 14734 // by AM is legal for this target, for a load/store of the specified type. 14735 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 14736 const AddrMode &AM, Type *Ty, 14737 unsigned AS, Instruction *I) const { 14738 // PPC does not allow r+i addressing modes for vectors! 14739 if (Ty->isVectorTy() && AM.BaseOffs != 0) 14740 return false; 14741 14742 // PPC allows a sign-extended 16-bit immediate field. 14743 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 14744 return false; 14745 14746 // No global is ever allowed as a base. 14747 if (AM.BaseGV) 14748 return false; 14749 14750 // PPC only support r+r, 14751 switch (AM.Scale) { 14752 case 0: // "r+i" or just "i", depending on HasBaseReg. 14753 break; 14754 case 1: 14755 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 14756 return false; 14757 // Otherwise we have r+r or r+i. 14758 break; 14759 case 2: 14760 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 14761 return false; 14762 // Allow 2*r as r+r. 14763 break; 14764 default: 14765 // No other scales are supported. 14766 return false; 14767 } 14768 14769 return true; 14770 } 14771 14772 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 14773 SelectionDAG &DAG) const { 14774 MachineFunction &MF = DAG.getMachineFunction(); 14775 MachineFrameInfo &MFI = MF.getFrameInfo(); 14776 MFI.setReturnAddressIsTaken(true); 14777 14778 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 14779 return SDValue(); 14780 14781 SDLoc dl(Op); 14782 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14783 14784 // Make sure the function does not optimize away the store of the RA to 14785 // the stack. 14786 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 14787 FuncInfo->setLRStoreRequired(); 14788 bool isPPC64 = Subtarget.isPPC64(); 14789 auto PtrVT = getPointerTy(MF.getDataLayout()); 14790 14791 if (Depth > 0) { 14792 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 14793 SDValue Offset = 14794 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 14795 isPPC64 ? MVT::i64 : MVT::i32); 14796 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 14797 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 14798 MachinePointerInfo()); 14799 } 14800 14801 // Just load the return address off the stack. 14802 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 14803 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 14804 MachinePointerInfo()); 14805 } 14806 14807 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 14808 SelectionDAG &DAG) const { 14809 SDLoc dl(Op); 14810 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14811 14812 MachineFunction &MF = DAG.getMachineFunction(); 14813 MachineFrameInfo &MFI = MF.getFrameInfo(); 14814 MFI.setFrameAddressIsTaken(true); 14815 14816 EVT PtrVT = getPointerTy(MF.getDataLayout()); 14817 bool isPPC64 = PtrVT == MVT::i64; 14818 14819 // Naked functions never have a frame pointer, and so we use r1. For all 14820 // other functions, this decision must be delayed until during PEI. 14821 unsigned FrameReg; 14822 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 14823 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 14824 else 14825 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 14826 14827 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 14828 PtrVT); 14829 while (Depth--) 14830 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 14831 FrameAddr, MachinePointerInfo()); 14832 return FrameAddr; 14833 } 14834 14835 // FIXME? Maybe this could be a TableGen attribute on some registers and 14836 // this table could be generated automatically from RegInfo. 14837 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT, 14838 const MachineFunction &MF) const { 14839 bool isPPC64 = Subtarget.isPPC64(); 14840 14841 bool is64Bit = isPPC64 && VT == LLT::scalar(64); 14842 if (!is64Bit && VT != LLT::scalar(32)) 14843 report_fatal_error("Invalid register global variable type"); 14844 14845 Register Reg = StringSwitch<Register>(RegName) 14846 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 14847 .Case("r2", isPPC64 ? Register() : PPC::R2) 14848 .Case("r13", (is64Bit ? PPC::X13 : PPC::R13)) 14849 .Default(Register()); 14850 14851 if (Reg) 14852 return Reg; 14853 report_fatal_error("Invalid register name global variable"); 14854 } 14855 14856 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const { 14857 // 32-bit SVR4 ABI access everything as got-indirect. 14858 if (Subtarget.is32BitELFABI()) 14859 return true; 14860 14861 // AIX accesses everything indirectly through the TOC, which is similar to 14862 // the GOT. 14863 if (Subtarget.isAIXABI()) 14864 return true; 14865 14866 CodeModel::Model CModel = getTargetMachine().getCodeModel(); 14867 // If it is small or large code model, module locals are accessed 14868 // indirectly by loading their address from .toc/.got. 14869 if (CModel == CodeModel::Small || CModel == CodeModel::Large) 14870 return true; 14871 14872 // JumpTable and BlockAddress are accessed as got-indirect. 14873 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA)) 14874 return true; 14875 14876 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) 14877 return Subtarget.isGVIndirectSymbol(G->getGlobal()); 14878 14879 return false; 14880 } 14881 14882 bool 14883 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 14884 // The PowerPC target isn't yet aware of offsets. 14885 return false; 14886 } 14887 14888 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 14889 const CallInst &I, 14890 MachineFunction &MF, 14891 unsigned Intrinsic) const { 14892 switch (Intrinsic) { 14893 case Intrinsic::ppc_qpx_qvlfd: 14894 case Intrinsic::ppc_qpx_qvlfs: 14895 case Intrinsic::ppc_qpx_qvlfcd: 14896 case Intrinsic::ppc_qpx_qvlfcs: 14897 case Intrinsic::ppc_qpx_qvlfiwa: 14898 case Intrinsic::ppc_qpx_qvlfiwz: 14899 case Intrinsic::ppc_altivec_lvx: 14900 case Intrinsic::ppc_altivec_lvxl: 14901 case Intrinsic::ppc_altivec_lvebx: 14902 case Intrinsic::ppc_altivec_lvehx: 14903 case Intrinsic::ppc_altivec_lvewx: 14904 case Intrinsic::ppc_vsx_lxvd2x: 14905 case Intrinsic::ppc_vsx_lxvw4x: { 14906 EVT VT; 14907 switch (Intrinsic) { 14908 case Intrinsic::ppc_altivec_lvebx: 14909 VT = MVT::i8; 14910 break; 14911 case Intrinsic::ppc_altivec_lvehx: 14912 VT = MVT::i16; 14913 break; 14914 case Intrinsic::ppc_altivec_lvewx: 14915 VT = MVT::i32; 14916 break; 14917 case Intrinsic::ppc_vsx_lxvd2x: 14918 VT = MVT::v2f64; 14919 break; 14920 case Intrinsic::ppc_qpx_qvlfd: 14921 VT = MVT::v4f64; 14922 break; 14923 case Intrinsic::ppc_qpx_qvlfs: 14924 VT = MVT::v4f32; 14925 break; 14926 case Intrinsic::ppc_qpx_qvlfcd: 14927 VT = MVT::v2f64; 14928 break; 14929 case Intrinsic::ppc_qpx_qvlfcs: 14930 VT = MVT::v2f32; 14931 break; 14932 default: 14933 VT = MVT::v4i32; 14934 break; 14935 } 14936 14937 Info.opc = ISD::INTRINSIC_W_CHAIN; 14938 Info.memVT = VT; 14939 Info.ptrVal = I.getArgOperand(0); 14940 Info.offset = -VT.getStoreSize()+1; 14941 Info.size = 2*VT.getStoreSize()-1; 14942 Info.align = Align(1); 14943 Info.flags = MachineMemOperand::MOLoad; 14944 return true; 14945 } 14946 case Intrinsic::ppc_qpx_qvlfda: 14947 case Intrinsic::ppc_qpx_qvlfsa: 14948 case Intrinsic::ppc_qpx_qvlfcda: 14949 case Intrinsic::ppc_qpx_qvlfcsa: 14950 case Intrinsic::ppc_qpx_qvlfiwaa: 14951 case Intrinsic::ppc_qpx_qvlfiwza: { 14952 EVT VT; 14953 switch (Intrinsic) { 14954 case Intrinsic::ppc_qpx_qvlfda: 14955 VT = MVT::v4f64; 14956 break; 14957 case Intrinsic::ppc_qpx_qvlfsa: 14958 VT = MVT::v4f32; 14959 break; 14960 case Intrinsic::ppc_qpx_qvlfcda: 14961 VT = MVT::v2f64; 14962 break; 14963 case Intrinsic::ppc_qpx_qvlfcsa: 14964 VT = MVT::v2f32; 14965 break; 14966 default: 14967 VT = MVT::v4i32; 14968 break; 14969 } 14970 14971 Info.opc = ISD::INTRINSIC_W_CHAIN; 14972 Info.memVT = VT; 14973 Info.ptrVal = I.getArgOperand(0); 14974 Info.offset = 0; 14975 Info.size = VT.getStoreSize(); 14976 Info.align = Align(1); 14977 Info.flags = MachineMemOperand::MOLoad; 14978 return true; 14979 } 14980 case Intrinsic::ppc_qpx_qvstfd: 14981 case Intrinsic::ppc_qpx_qvstfs: 14982 case Intrinsic::ppc_qpx_qvstfcd: 14983 case Intrinsic::ppc_qpx_qvstfcs: 14984 case Intrinsic::ppc_qpx_qvstfiw: 14985 case Intrinsic::ppc_altivec_stvx: 14986 case Intrinsic::ppc_altivec_stvxl: 14987 case Intrinsic::ppc_altivec_stvebx: 14988 case Intrinsic::ppc_altivec_stvehx: 14989 case Intrinsic::ppc_altivec_stvewx: 14990 case Intrinsic::ppc_vsx_stxvd2x: 14991 case Intrinsic::ppc_vsx_stxvw4x: { 14992 EVT VT; 14993 switch (Intrinsic) { 14994 case Intrinsic::ppc_altivec_stvebx: 14995 VT = MVT::i8; 14996 break; 14997 case Intrinsic::ppc_altivec_stvehx: 14998 VT = MVT::i16; 14999 break; 15000 case Intrinsic::ppc_altivec_stvewx: 15001 VT = MVT::i32; 15002 break; 15003 case Intrinsic::ppc_vsx_stxvd2x: 15004 VT = MVT::v2f64; 15005 break; 15006 case Intrinsic::ppc_qpx_qvstfd: 15007 VT = MVT::v4f64; 15008 break; 15009 case Intrinsic::ppc_qpx_qvstfs: 15010 VT = MVT::v4f32; 15011 break; 15012 case Intrinsic::ppc_qpx_qvstfcd: 15013 VT = MVT::v2f64; 15014 break; 15015 case Intrinsic::ppc_qpx_qvstfcs: 15016 VT = MVT::v2f32; 15017 break; 15018 default: 15019 VT = MVT::v4i32; 15020 break; 15021 } 15022 15023 Info.opc = ISD::INTRINSIC_VOID; 15024 Info.memVT = VT; 15025 Info.ptrVal = I.getArgOperand(1); 15026 Info.offset = -VT.getStoreSize()+1; 15027 Info.size = 2*VT.getStoreSize()-1; 15028 Info.align = Align(1); 15029 Info.flags = MachineMemOperand::MOStore; 15030 return true; 15031 } 15032 case Intrinsic::ppc_qpx_qvstfda: 15033 case Intrinsic::ppc_qpx_qvstfsa: 15034 case Intrinsic::ppc_qpx_qvstfcda: 15035 case Intrinsic::ppc_qpx_qvstfcsa: 15036 case Intrinsic::ppc_qpx_qvstfiwa: { 15037 EVT VT; 15038 switch (Intrinsic) { 15039 case Intrinsic::ppc_qpx_qvstfda: 15040 VT = MVT::v4f64; 15041 break; 15042 case Intrinsic::ppc_qpx_qvstfsa: 15043 VT = MVT::v4f32; 15044 break; 15045 case Intrinsic::ppc_qpx_qvstfcda: 15046 VT = MVT::v2f64; 15047 break; 15048 case Intrinsic::ppc_qpx_qvstfcsa: 15049 VT = MVT::v2f32; 15050 break; 15051 default: 15052 VT = MVT::v4i32; 15053 break; 15054 } 15055 15056 Info.opc = ISD::INTRINSIC_VOID; 15057 Info.memVT = VT; 15058 Info.ptrVal = I.getArgOperand(1); 15059 Info.offset = 0; 15060 Info.size = VT.getStoreSize(); 15061 Info.align = Align(1); 15062 Info.flags = MachineMemOperand::MOStore; 15063 return true; 15064 } 15065 default: 15066 break; 15067 } 15068 15069 return false; 15070 } 15071 15072 /// getOptimalMemOpType - Returns the target specific optimal type for load 15073 /// and store operations as a result of memset, memcpy, and memmove 15074 /// lowering. If DstAlign is zero that means it's safe to destination 15075 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 15076 /// means there isn't a need to check it against alignment requirement, 15077 /// probably because the source does not need to be loaded. If 'IsMemset' is 15078 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 15079 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 15080 /// source is constant so it does not need to be loaded. 15081 /// It returns EVT::Other if the type should be determined using generic 15082 /// target-independent logic. 15083 EVT PPCTargetLowering::getOptimalMemOpType( 15084 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, 15085 bool ZeroMemset, bool MemcpyStrSrc, 15086 const AttributeList &FuncAttributes) const { 15087 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 15088 // When expanding a memset, require at least two QPX instructions to cover 15089 // the cost of loading the value to be stored from the constant pool. 15090 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 15091 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 15092 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) { 15093 return MVT::v4f64; 15094 } 15095 15096 // We should use Altivec/VSX loads and stores when available. For unaligned 15097 // addresses, unaligned VSX loads are only fast starting with the P8. 15098 if (Subtarget.hasAltivec() && Size >= 16 && 15099 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 15100 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 15101 return MVT::v4i32; 15102 } 15103 15104 if (Subtarget.isPPC64()) { 15105 return MVT::i64; 15106 } 15107 15108 return MVT::i32; 15109 } 15110 15111 /// Returns true if it is beneficial to convert a load of a constant 15112 /// to just the constant itself. 15113 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 15114 Type *Ty) const { 15115 assert(Ty->isIntegerTy()); 15116 15117 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 15118 return !(BitSize == 0 || BitSize > 64); 15119 } 15120 15121 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 15122 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 15123 return false; 15124 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 15125 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 15126 return NumBits1 == 64 && NumBits2 == 32; 15127 } 15128 15129 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 15130 if (!VT1.isInteger() || !VT2.isInteger()) 15131 return false; 15132 unsigned NumBits1 = VT1.getSizeInBits(); 15133 unsigned NumBits2 = VT2.getSizeInBits(); 15134 return NumBits1 == 64 && NumBits2 == 32; 15135 } 15136 15137 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 15138 // Generally speaking, zexts are not free, but they are free when they can be 15139 // folded with other operations. 15140 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 15141 EVT MemVT = LD->getMemoryVT(); 15142 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 15143 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 15144 (LD->getExtensionType() == ISD::NON_EXTLOAD || 15145 LD->getExtensionType() == ISD::ZEXTLOAD)) 15146 return true; 15147 } 15148 15149 // FIXME: Add other cases... 15150 // - 32-bit shifts with a zext to i64 15151 // - zext after ctlz, bswap, etc. 15152 // - zext after and by a constant mask 15153 15154 return TargetLowering::isZExtFree(Val, VT2); 15155 } 15156 15157 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 15158 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 15159 "invalid fpext types"); 15160 // Extending to float128 is not free. 15161 if (DestVT == MVT::f128) 15162 return false; 15163 return true; 15164 } 15165 15166 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 15167 return isInt<16>(Imm) || isUInt<16>(Imm); 15168 } 15169 15170 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 15171 return isInt<16>(Imm) || isUInt<16>(Imm); 15172 } 15173 15174 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 15175 unsigned, 15176 unsigned, 15177 MachineMemOperand::Flags, 15178 bool *Fast) const { 15179 if (DisablePPCUnaligned) 15180 return false; 15181 15182 // PowerPC supports unaligned memory access for simple non-vector types. 15183 // Although accessing unaligned addresses is not as efficient as accessing 15184 // aligned addresses, it is generally more efficient than manual expansion, 15185 // and generally only traps for software emulation when crossing page 15186 // boundaries. 15187 15188 if (!VT.isSimple()) 15189 return false; 15190 15191 if (VT.isFloatingPoint() && !Subtarget.allowsUnalignedFPAccess()) 15192 return false; 15193 15194 if (VT.getSimpleVT().isVector()) { 15195 if (Subtarget.hasVSX()) { 15196 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 15197 VT != MVT::v4f32 && VT != MVT::v4i32) 15198 return false; 15199 } else { 15200 return false; 15201 } 15202 } 15203 15204 if (VT == MVT::ppcf128) 15205 return false; 15206 15207 if (Fast) 15208 *Fast = true; 15209 15210 return true; 15211 } 15212 15213 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 15214 EVT VT) const { 15215 VT = VT.getScalarType(); 15216 15217 if (!VT.isSimple()) 15218 return false; 15219 15220 switch (VT.getSimpleVT().SimpleTy) { 15221 case MVT::f32: 15222 case MVT::f64: 15223 return true; 15224 case MVT::f128: 15225 return (EnableQuadPrecision && Subtarget.hasP9Vector()); 15226 default: 15227 break; 15228 } 15229 15230 return false; 15231 } 15232 15233 const MCPhysReg * 15234 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 15235 // LR is a callee-save register, but we must treat it as clobbered by any call 15236 // site. Hence we include LR in the scratch registers, which are in turn added 15237 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 15238 // to CTR, which is used by any indirect call. 15239 static const MCPhysReg ScratchRegs[] = { 15240 PPC::X12, PPC::LR8, PPC::CTR8, 0 15241 }; 15242 15243 return ScratchRegs; 15244 } 15245 15246 unsigned PPCTargetLowering::getExceptionPointerRegister( 15247 const Constant *PersonalityFn) const { 15248 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 15249 } 15250 15251 unsigned PPCTargetLowering::getExceptionSelectorRegister( 15252 const Constant *PersonalityFn) const { 15253 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 15254 } 15255 15256 bool 15257 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 15258 EVT VT , unsigned DefinedValues) const { 15259 if (VT == MVT::v2i64) 15260 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 15261 15262 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 15263 return true; 15264 15265 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 15266 } 15267 15268 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 15269 if (DisableILPPref || Subtarget.enableMachineScheduler()) 15270 return TargetLowering::getSchedulingPreference(N); 15271 15272 return Sched::ILP; 15273 } 15274 15275 // Create a fast isel object. 15276 FastISel * 15277 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 15278 const TargetLibraryInfo *LibInfo) const { 15279 return PPC::createFastISel(FuncInfo, LibInfo); 15280 } 15281 15282 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 15283 if (!Subtarget.isPPC64()) return; 15284 15285 // Update IsSplitCSR in PPCFunctionInfo 15286 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 15287 PFI->setIsSplitCSR(true); 15288 } 15289 15290 void PPCTargetLowering::insertCopiesSplitCSR( 15291 MachineBasicBlock *Entry, 15292 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 15293 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 15294 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 15295 if (!IStart) 15296 return; 15297 15298 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 15299 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 15300 MachineBasicBlock::iterator MBBI = Entry->begin(); 15301 for (const MCPhysReg *I = IStart; *I; ++I) { 15302 const TargetRegisterClass *RC = nullptr; 15303 if (PPC::G8RCRegClass.contains(*I)) 15304 RC = &PPC::G8RCRegClass; 15305 else if (PPC::F8RCRegClass.contains(*I)) 15306 RC = &PPC::F8RCRegClass; 15307 else if (PPC::CRRCRegClass.contains(*I)) 15308 RC = &PPC::CRRCRegClass; 15309 else if (PPC::VRRCRegClass.contains(*I)) 15310 RC = &PPC::VRRCRegClass; 15311 else 15312 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 15313 15314 Register NewVR = MRI->createVirtualRegister(RC); 15315 // Create copy from CSR to a virtual register. 15316 // FIXME: this currently does not emit CFI pseudo-instructions, it works 15317 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 15318 // nounwind. If we want to generalize this later, we may need to emit 15319 // CFI pseudo-instructions. 15320 assert(Entry->getParent()->getFunction().hasFnAttribute( 15321 Attribute::NoUnwind) && 15322 "Function should be nounwind in insertCopiesSplitCSR!"); 15323 Entry->addLiveIn(*I); 15324 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 15325 .addReg(*I); 15326 15327 // Insert the copy-back instructions right before the terminator. 15328 for (auto *Exit : Exits) 15329 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 15330 TII->get(TargetOpcode::COPY), *I) 15331 .addReg(NewVR); 15332 } 15333 } 15334 15335 // Override to enable LOAD_STACK_GUARD lowering on Linux. 15336 bool PPCTargetLowering::useLoadStackGuardNode() const { 15337 if (!Subtarget.isTargetLinux()) 15338 return TargetLowering::useLoadStackGuardNode(); 15339 return true; 15340 } 15341 15342 // Override to disable global variable loading on Linux. 15343 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 15344 if (!Subtarget.isTargetLinux()) 15345 return TargetLowering::insertSSPDeclarations(M); 15346 } 15347 15348 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 15349 bool ForCodeSize) const { 15350 if (!VT.isSimple() || !Subtarget.hasVSX()) 15351 return false; 15352 15353 switch(VT.getSimpleVT().SimpleTy) { 15354 default: 15355 // For FP types that are currently not supported by PPC backend, return 15356 // false. Examples: f16, f80. 15357 return false; 15358 case MVT::f32: 15359 case MVT::f64: 15360 case MVT::ppcf128: 15361 return Imm.isPosZero(); 15362 } 15363 } 15364 15365 // For vector shift operation op, fold 15366 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 15367 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 15368 SelectionDAG &DAG) { 15369 SDValue N0 = N->getOperand(0); 15370 SDValue N1 = N->getOperand(1); 15371 EVT VT = N0.getValueType(); 15372 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 15373 unsigned Opcode = N->getOpcode(); 15374 unsigned TargetOpcode; 15375 15376 switch (Opcode) { 15377 default: 15378 llvm_unreachable("Unexpected shift operation"); 15379 case ISD::SHL: 15380 TargetOpcode = PPCISD::SHL; 15381 break; 15382 case ISD::SRL: 15383 TargetOpcode = PPCISD::SRL; 15384 break; 15385 case ISD::SRA: 15386 TargetOpcode = PPCISD::SRA; 15387 break; 15388 } 15389 15390 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 15391 N1->getOpcode() == ISD::AND) 15392 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 15393 if (Mask->getZExtValue() == OpSizeInBits - 1) 15394 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 15395 15396 return SDValue(); 15397 } 15398 15399 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 15400 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15401 return Value; 15402 15403 SDValue N0 = N->getOperand(0); 15404 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 15405 if (!Subtarget.isISA3_0() || 15406 N0.getOpcode() != ISD::SIGN_EXTEND || 15407 N0.getOperand(0).getValueType() != MVT::i32 || 15408 CN1 == nullptr || N->getValueType(0) != MVT::i64) 15409 return SDValue(); 15410 15411 // We can't save an operation here if the value is already extended, and 15412 // the existing shift is easier to combine. 15413 SDValue ExtsSrc = N0.getOperand(0); 15414 if (ExtsSrc.getOpcode() == ISD::TRUNCATE && 15415 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext) 15416 return SDValue(); 15417 15418 SDLoc DL(N0); 15419 SDValue ShiftBy = SDValue(CN1, 0); 15420 // We want the shift amount to be i32 on the extswli, but the shift could 15421 // have an i64. 15422 if (ShiftBy.getValueType() == MVT::i64) 15423 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32); 15424 15425 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0), 15426 ShiftBy); 15427 } 15428 15429 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 15430 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15431 return Value; 15432 15433 return SDValue(); 15434 } 15435 15436 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 15437 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15438 return Value; 15439 15440 return SDValue(); 15441 } 15442 15443 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1)) 15444 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0)) 15445 // When C is zero, the equation (addi Z, -C) can be simplified to Z 15446 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types 15447 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, 15448 const PPCSubtarget &Subtarget) { 15449 if (!Subtarget.isPPC64()) 15450 return SDValue(); 15451 15452 SDValue LHS = N->getOperand(0); 15453 SDValue RHS = N->getOperand(1); 15454 15455 auto isZextOfCompareWithConstant = [](SDValue Op) { 15456 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() || 15457 Op.getValueType() != MVT::i64) 15458 return false; 15459 15460 SDValue Cmp = Op.getOperand(0); 15461 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() || 15462 Cmp.getOperand(0).getValueType() != MVT::i64) 15463 return false; 15464 15465 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) { 15466 int64_t NegConstant = 0 - Constant->getSExtValue(); 15467 // Due to the limitations of the addi instruction, 15468 // -C is required to be [-32768, 32767]. 15469 return isInt<16>(NegConstant); 15470 } 15471 15472 return false; 15473 }; 15474 15475 bool LHSHasPattern = isZextOfCompareWithConstant(LHS); 15476 bool RHSHasPattern = isZextOfCompareWithConstant(RHS); 15477 15478 // If there is a pattern, canonicalize a zext operand to the RHS. 15479 if (LHSHasPattern && !RHSHasPattern) 15480 std::swap(LHS, RHS); 15481 else if (!LHSHasPattern && !RHSHasPattern) 15482 return SDValue(); 15483 15484 SDLoc DL(N); 15485 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue); 15486 SDValue Cmp = RHS.getOperand(0); 15487 SDValue Z = Cmp.getOperand(0); 15488 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1)); 15489 15490 assert(Constant && "Constant Should not be a null pointer."); 15491 int64_t NegConstant = 0 - Constant->getSExtValue(); 15492 15493 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) { 15494 default: break; 15495 case ISD::SETNE: { 15496 // when C == 0 15497 // --> addze X, (addic Z, -1).carry 15498 // / 15499 // add X, (zext(setne Z, C))-- 15500 // \ when -32768 <= -C <= 32767 && C != 0 15501 // --> addze X, (addic (addi Z, -C), -1).carry 15502 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15503 DAG.getConstant(NegConstant, DL, MVT::i64)); 15504 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15505 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15506 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64)); 15507 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15508 SDValue(Addc.getNode(), 1)); 15509 } 15510 case ISD::SETEQ: { 15511 // when C == 0 15512 // --> addze X, (subfic Z, 0).carry 15513 // / 15514 // add X, (zext(sete Z, C))-- 15515 // \ when -32768 <= -C <= 32767 && C != 0 15516 // --> addze X, (subfic (addi Z, -C), 0).carry 15517 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15518 DAG.getConstant(NegConstant, DL, MVT::i64)); 15519 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15520 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15521 DAG.getConstant(0, DL, MVT::i64), AddOrZ); 15522 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15523 SDValue(Subc.getNode(), 1)); 15524 } 15525 } 15526 15527 return SDValue(); 15528 } 15529 15530 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const { 15531 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget)) 15532 return Value; 15533 15534 return SDValue(); 15535 } 15536 15537 // Detect TRUNCATE operations on bitcasts of float128 values. 15538 // What we are looking for here is the situtation where we extract a subset 15539 // of bits from a 128 bit float. 15540 // This can be of two forms: 15541 // 1) BITCAST of f128 feeding TRUNCATE 15542 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE 15543 // The reason this is required is because we do not have a legal i128 type 15544 // and so we want to prevent having to store the f128 and then reload part 15545 // of it. 15546 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N, 15547 DAGCombinerInfo &DCI) const { 15548 // If we are using CRBits then try that first. 15549 if (Subtarget.useCRBits()) { 15550 // Check if CRBits did anything and return that if it did. 15551 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI)) 15552 return CRTruncValue; 15553 } 15554 15555 SDLoc dl(N); 15556 SDValue Op0 = N->getOperand(0); 15557 15558 // Looking for a truncate of i128 to i64. 15559 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64) 15560 return SDValue(); 15561 15562 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0; 15563 15564 // SRL feeding TRUNCATE. 15565 if (Op0.getOpcode() == ISD::SRL) { 15566 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 15567 // The right shift has to be by 64 bits. 15568 if (!ConstNode || ConstNode->getZExtValue() != 64) 15569 return SDValue(); 15570 15571 // Switch the element number to extract. 15572 EltToExtract = EltToExtract ? 0 : 1; 15573 // Update Op0 past the SRL. 15574 Op0 = Op0.getOperand(0); 15575 } 15576 15577 // BITCAST feeding a TRUNCATE possibly via SRL. 15578 if (Op0.getOpcode() == ISD::BITCAST && 15579 Op0.getValueType() == MVT::i128 && 15580 Op0.getOperand(0).getValueType() == MVT::f128) { 15581 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0)); 15582 return DCI.DAG.getNode( 15583 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast, 15584 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32)); 15585 } 15586 return SDValue(); 15587 } 15588 15589 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const { 15590 SelectionDAG &DAG = DCI.DAG; 15591 15592 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1)); 15593 if (!ConstOpOrElement) 15594 return SDValue(); 15595 15596 // An imul is usually smaller than the alternative sequence for legal type. 15597 if (DAG.getMachineFunction().getFunction().hasMinSize() && 15598 isOperationLegal(ISD::MUL, N->getValueType(0))) 15599 return SDValue(); 15600 15601 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool { 15602 switch (this->Subtarget.getCPUDirective()) { 15603 default: 15604 // TODO: enhance the condition for subtarget before pwr8 15605 return false; 15606 case PPC::DIR_PWR8: 15607 // type mul add shl 15608 // scalar 4 1 1 15609 // vector 7 2 2 15610 return true; 15611 case PPC::DIR_PWR9: 15612 case PPC::DIR_PWR_FUTURE: 15613 // type mul add shl 15614 // scalar 5 2 2 15615 // vector 7 2 2 15616 15617 // The cycle RATIO of related operations are showed as a table above. 15618 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both 15619 // scalar and vector type. For 2 instrs patterns, add/sub + shl 15620 // are 4, it is always profitable; but for 3 instrs patterns 15621 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6. 15622 // So we should only do it for vector type. 15623 return IsAddOne && IsNeg ? VT.isVector() : true; 15624 } 15625 }; 15626 15627 EVT VT = N->getValueType(0); 15628 SDLoc DL(N); 15629 15630 const APInt &MulAmt = ConstOpOrElement->getAPIntValue(); 15631 bool IsNeg = MulAmt.isNegative(); 15632 APInt MulAmtAbs = MulAmt.abs(); 15633 15634 if ((MulAmtAbs - 1).isPowerOf2()) { 15635 // (mul x, 2^N + 1) => (add (shl x, N), x) 15636 // (mul x, -(2^N + 1)) => -(add (shl x, N), x) 15637 15638 if (!IsProfitable(IsNeg, true, VT)) 15639 return SDValue(); 15640 15641 SDValue Op0 = N->getOperand(0); 15642 SDValue Op1 = 15643 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15644 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT)); 15645 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); 15646 15647 if (!IsNeg) 15648 return Res; 15649 15650 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); 15651 } else if ((MulAmtAbs + 1).isPowerOf2()) { 15652 // (mul x, 2^N - 1) => (sub (shl x, N), x) 15653 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 15654 15655 if (!IsProfitable(IsNeg, false, VT)) 15656 return SDValue(); 15657 15658 SDValue Op0 = N->getOperand(0); 15659 SDValue Op1 = 15660 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15661 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT)); 15662 15663 if (!IsNeg) 15664 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0); 15665 else 15666 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); 15667 15668 } else { 15669 return SDValue(); 15670 } 15671 } 15672 15673 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 15674 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 15675 if (!Subtarget.is64BitELFABI()) 15676 return false; 15677 15678 // If not a tail call then no need to proceed. 15679 if (!CI->isTailCall()) 15680 return false; 15681 15682 // If sibling calls have been disabled and tail-calls aren't guaranteed 15683 // there is no reason to duplicate. 15684 auto &TM = getTargetMachine(); 15685 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 15686 return false; 15687 15688 // Can't tail call a function called indirectly, or if it has variadic args. 15689 const Function *Callee = CI->getCalledFunction(); 15690 if (!Callee || Callee->isVarArg()) 15691 return false; 15692 15693 // Make sure the callee and caller calling conventions are eligible for tco. 15694 const Function *Caller = CI->getParent()->getParent(); 15695 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 15696 CI->getCallingConv())) 15697 return false; 15698 15699 // If the function is local then we have a good chance at tail-calling it 15700 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 15701 } 15702 15703 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 15704 if (!Subtarget.hasVSX()) 15705 return false; 15706 if (Subtarget.hasP9Vector() && VT == MVT::f128) 15707 return true; 15708 return VT == MVT::f32 || VT == MVT::f64 || 15709 VT == MVT::v4f32 || VT == MVT::v2f64; 15710 } 15711 15712 bool PPCTargetLowering:: 15713 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 15714 const Value *Mask = AndI.getOperand(1); 15715 // If the mask is suitable for andi. or andis. we should sink the and. 15716 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 15717 // Can't handle constants wider than 64-bits. 15718 if (CI->getBitWidth() > 64) 15719 return false; 15720 int64_t ConstVal = CI->getZExtValue(); 15721 return isUInt<16>(ConstVal) || 15722 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 15723 } 15724 15725 // For non-constant masks, we can always use the record-form and. 15726 return true; 15727 } 15728 15729 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0) 15730 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0) 15731 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0) 15732 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0) 15733 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32 15734 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const { 15735 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here"); 15736 assert(Subtarget.hasP9Altivec() && 15737 "Only combine this when P9 altivec supported!"); 15738 EVT VT = N->getValueType(0); 15739 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 15740 return SDValue(); 15741 15742 SelectionDAG &DAG = DCI.DAG; 15743 SDLoc dl(N); 15744 if (N->getOperand(0).getOpcode() == ISD::SUB) { 15745 // Even for signed integers, if it's known to be positive (as signed 15746 // integer) due to zero-extended inputs. 15747 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode(); 15748 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode(); 15749 if ((SubOpcd0 == ISD::ZERO_EXTEND || 15750 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) && 15751 (SubOpcd1 == ISD::ZERO_EXTEND || 15752 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) { 15753 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 15754 N->getOperand(0)->getOperand(0), 15755 N->getOperand(0)->getOperand(1), 15756 DAG.getTargetConstant(0, dl, MVT::i32)); 15757 } 15758 15759 // For type v4i32, it can be optimized with xvnegsp + vabsduw 15760 if (N->getOperand(0).getValueType() == MVT::v4i32 && 15761 N->getOperand(0).hasOneUse()) { 15762 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 15763 N->getOperand(0)->getOperand(0), 15764 N->getOperand(0)->getOperand(1), 15765 DAG.getTargetConstant(1, dl, MVT::i32)); 15766 } 15767 } 15768 15769 return SDValue(); 15770 } 15771 15772 // For type v4i32/v8ii16/v16i8, transform 15773 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b) 15774 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b) 15775 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b) 15776 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b) 15777 SDValue PPCTargetLowering::combineVSelect(SDNode *N, 15778 DAGCombinerInfo &DCI) const { 15779 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here"); 15780 assert(Subtarget.hasP9Altivec() && 15781 "Only combine this when P9 altivec supported!"); 15782 15783 SelectionDAG &DAG = DCI.DAG; 15784 SDLoc dl(N); 15785 SDValue Cond = N->getOperand(0); 15786 SDValue TrueOpnd = N->getOperand(1); 15787 SDValue FalseOpnd = N->getOperand(2); 15788 EVT VT = N->getOperand(1).getValueType(); 15789 15790 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB || 15791 FalseOpnd.getOpcode() != ISD::SUB) 15792 return SDValue(); 15793 15794 // ABSD only available for type v4i32/v8i16/v16i8 15795 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 15796 return SDValue(); 15797 15798 // At least to save one more dependent computation 15799 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse())) 15800 return SDValue(); 15801 15802 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15803 15804 // Can only handle unsigned comparison here 15805 switch (CC) { 15806 default: 15807 return SDValue(); 15808 case ISD::SETUGT: 15809 case ISD::SETUGE: 15810 break; 15811 case ISD::SETULT: 15812 case ISD::SETULE: 15813 std::swap(TrueOpnd, FalseOpnd); 15814 break; 15815 } 15816 15817 SDValue CmpOpnd1 = Cond.getOperand(0); 15818 SDValue CmpOpnd2 = Cond.getOperand(1); 15819 15820 // SETCC CmpOpnd1 CmpOpnd2 cond 15821 // TrueOpnd = CmpOpnd1 - CmpOpnd2 15822 // FalseOpnd = CmpOpnd2 - CmpOpnd1 15823 if (TrueOpnd.getOperand(0) == CmpOpnd1 && 15824 TrueOpnd.getOperand(1) == CmpOpnd2 && 15825 FalseOpnd.getOperand(0) == CmpOpnd2 && 15826 FalseOpnd.getOperand(1) == CmpOpnd1) { 15827 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(), 15828 CmpOpnd1, CmpOpnd2, 15829 DAG.getTargetConstant(0, dl, MVT::i32)); 15830 } 15831 15832 return SDValue(); 15833 } 15834