1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the PPCISelLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PPCISelLowering.h" 14 #include "MCTargetDesc/PPCPredicates.h" 15 #include "PPC.h" 16 #include "PPCCCState.h" 17 #include "PPCCallingConv.h" 18 #include "PPCFrameLowering.h" 19 #include "PPCInstrInfo.h" 20 #include "PPCMachineFunctionInfo.h" 21 #include "PPCPerfectShuffle.h" 22 #include "PPCRegisterInfo.h" 23 #include "PPCSubtarget.h" 24 #include "PPCTargetMachine.h" 25 #include "llvm/ADT/APFloat.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/CodeGen/CallingConvLower.h" 38 #include "llvm/CodeGen/ISDOpcodes.h" 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineFrameInfo.h" 41 #include "llvm/CodeGen/MachineFunction.h" 42 #include "llvm/CodeGen/MachineInstr.h" 43 #include "llvm/CodeGen/MachineInstrBuilder.h" 44 #include "llvm/CodeGen/MachineJumpTableInfo.h" 45 #include "llvm/CodeGen/MachineLoopInfo.h" 46 #include "llvm/CodeGen/MachineMemOperand.h" 47 #include "llvm/CodeGen/MachineModuleInfo.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetRegisterInfo.h" 56 #include "llvm/CodeGen/ValueTypes.h" 57 #include "llvm/IR/CallSite.h" 58 #include "llvm/IR/CallingConv.h" 59 #include "llvm/IR/Constant.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/DataLayout.h" 62 #include "llvm/IR/DebugLoc.h" 63 #include "llvm/IR/DerivedTypes.h" 64 #include "llvm/IR/Function.h" 65 #include "llvm/IR/GlobalValue.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/Intrinsics.h" 69 #include "llvm/IR/Module.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/MC/MCContext.h" 74 #include "llvm/MC/MCExpr.h" 75 #include "llvm/MC/MCRegisterInfo.h" 76 #include "llvm/MC/MCSymbolXCOFF.h" 77 #include "llvm/Support/AtomicOrdering.h" 78 #include "llvm/Support/BranchProbability.h" 79 #include "llvm/Support/Casting.h" 80 #include "llvm/Support/CodeGen.h" 81 #include "llvm/Support/CommandLine.h" 82 #include "llvm/Support/Compiler.h" 83 #include "llvm/Support/Debug.h" 84 #include "llvm/Support/ErrorHandling.h" 85 #include "llvm/Support/Format.h" 86 #include "llvm/Support/KnownBits.h" 87 #include "llvm/Support/MachineValueType.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include "llvm/Target/TargetMachine.h" 91 #include "llvm/Target/TargetOptions.h" 92 #include <algorithm> 93 #include <cassert> 94 #include <cstdint> 95 #include <iterator> 96 #include <list> 97 #include <utility> 98 #include <vector> 99 100 using namespace llvm; 101 102 #define DEBUG_TYPE "ppc-lowering" 103 104 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 105 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 106 107 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 108 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 109 110 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 111 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 112 113 static cl::opt<bool> DisableSCO("disable-ppc-sco", 114 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 115 116 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", 117 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden); 118 119 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision", 120 cl::desc("enable quad precision float support on ppc"), cl::Hidden); 121 122 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables", 123 cl::desc("use absolute jump tables on ppc"), cl::Hidden); 124 125 STATISTIC(NumTailCalls, "Number of tail calls"); 126 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 127 128 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 129 130 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl); 131 132 // FIXME: Remove this once the bug has been fixed! 133 extern cl::opt<bool> ANDIGlueBug; 134 135 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 136 const PPCSubtarget &STI) 137 : TargetLowering(TM), Subtarget(STI) { 138 // Use _setjmp/_longjmp instead of setjmp/longjmp. 139 setUseUnderscoreSetJmp(true); 140 setUseUnderscoreLongJmp(true); 141 142 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 143 // arguments are at least 4/8 bytes aligned. 144 bool isPPC64 = Subtarget.isPPC64(); 145 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); 146 147 // Set up the register classes. 148 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 149 if (!useSoftFloat()) { 150 if (hasSPE()) { 151 addRegisterClass(MVT::f32, &PPC::GPRCRegClass); 152 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 153 } else { 154 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 155 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 156 } 157 } 158 159 // Match BITREVERSE to customized fast code sequence in the td file. 160 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 161 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 162 163 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 164 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 165 166 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 167 for (MVT VT : MVT::integer_valuetypes()) { 168 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 169 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 170 } 171 172 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 173 174 // PowerPC has pre-inc load and store's. 175 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 176 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 177 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 178 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 179 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 180 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 181 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 182 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 183 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 184 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 185 if (!Subtarget.hasSPE()) { 186 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 187 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 188 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 189 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 190 } 191 192 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 193 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 194 for (MVT VT : ScalarIntVTs) { 195 setOperationAction(ISD::ADDC, VT, Legal); 196 setOperationAction(ISD::ADDE, VT, Legal); 197 setOperationAction(ISD::SUBC, VT, Legal); 198 setOperationAction(ISD::SUBE, VT, Legal); 199 } 200 201 if (Subtarget.useCRBits()) { 202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 203 204 if (isPPC64 || Subtarget.hasFPCVT()) { 205 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 206 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 207 isPPC64 ? MVT::i64 : MVT::i32); 208 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 209 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 210 isPPC64 ? MVT::i64 : MVT::i32); 211 } else { 212 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 213 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 214 } 215 216 // PowerPC does not support direct load/store of condition registers. 217 setOperationAction(ISD::LOAD, MVT::i1, Custom); 218 setOperationAction(ISD::STORE, MVT::i1, Custom); 219 220 // FIXME: Remove this once the ANDI glue bug is fixed: 221 if (ANDIGlueBug) 222 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 223 224 for (MVT VT : MVT::integer_valuetypes()) { 225 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 226 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 227 setTruncStoreAction(VT, MVT::i1, Expand); 228 } 229 230 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 231 } 232 233 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 234 // PPC (the libcall is not available). 235 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 236 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 237 238 // We do not currently implement these libm ops for PowerPC. 239 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 240 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 241 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 242 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 243 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 244 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 245 246 // PowerPC has no SREM/UREM instructions unless we are on P9 247 // On P9 we may use a hardware instruction to compute the remainder. 248 // The instructions are not legalized directly because in the cases where the 249 // result of both the remainder and the division is required it is more 250 // efficient to compute the remainder from the result of the division rather 251 // than use the remainder instruction. 252 if (Subtarget.isISA3_0()) { 253 setOperationAction(ISD::SREM, MVT::i32, Custom); 254 setOperationAction(ISD::UREM, MVT::i32, Custom); 255 setOperationAction(ISD::SREM, MVT::i64, Custom); 256 setOperationAction(ISD::UREM, MVT::i64, Custom); 257 } else { 258 setOperationAction(ISD::SREM, MVT::i32, Expand); 259 setOperationAction(ISD::UREM, MVT::i32, Expand); 260 setOperationAction(ISD::SREM, MVT::i64, Expand); 261 setOperationAction(ISD::UREM, MVT::i64, Expand); 262 } 263 264 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 265 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 266 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 267 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 268 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 269 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 270 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 271 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 272 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 273 274 // We don't support sin/cos/sqrt/fmod/pow 275 setOperationAction(ISD::FSIN , MVT::f64, Expand); 276 setOperationAction(ISD::FCOS , MVT::f64, Expand); 277 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 278 setOperationAction(ISD::FREM , MVT::f64, Expand); 279 setOperationAction(ISD::FPOW , MVT::f64, Expand); 280 setOperationAction(ISD::FSIN , MVT::f32, Expand); 281 setOperationAction(ISD::FCOS , MVT::f32, Expand); 282 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 283 setOperationAction(ISD::FREM , MVT::f32, Expand); 284 setOperationAction(ISD::FPOW , MVT::f32, Expand); 285 if (Subtarget.hasSPE()) { 286 setOperationAction(ISD::FMA , MVT::f64, Expand); 287 setOperationAction(ISD::FMA , MVT::f32, Expand); 288 } else { 289 setOperationAction(ISD::FMA , MVT::f64, Legal); 290 setOperationAction(ISD::FMA , MVT::f32, Legal); 291 } 292 293 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 294 295 // If we're enabling GP optimizations, use hardware square root 296 if (!Subtarget.hasFSQRT() && 297 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 298 Subtarget.hasFRE())) 299 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 300 301 if (!Subtarget.hasFSQRT() && 302 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 303 Subtarget.hasFRES())) 304 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 305 306 if (Subtarget.hasFCPSGN()) { 307 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 308 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 309 } else { 310 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 311 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 312 } 313 314 if (Subtarget.hasFPRND()) { 315 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 316 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 317 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 318 setOperationAction(ISD::FROUND, MVT::f64, Legal); 319 320 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 321 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 322 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 323 setOperationAction(ISD::FROUND, MVT::f32, Legal); 324 } 325 326 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 327 // to speed up scalar BSWAP64. 328 // CTPOP or CTTZ were introduced in P8/P9 respectively 329 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 330 if (Subtarget.hasP9Vector()) 331 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 332 else 333 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 334 if (Subtarget.isISA3_0()) { 335 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 336 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 337 } else { 338 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 339 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 340 } 341 342 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 343 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 344 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 345 } else { 346 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 347 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 348 } 349 350 // PowerPC does not have ROTR 351 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 352 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 353 354 if (!Subtarget.useCRBits()) { 355 // PowerPC does not have Select 356 setOperationAction(ISD::SELECT, MVT::i32, Expand); 357 setOperationAction(ISD::SELECT, MVT::i64, Expand); 358 setOperationAction(ISD::SELECT, MVT::f32, Expand); 359 setOperationAction(ISD::SELECT, MVT::f64, Expand); 360 } 361 362 // PowerPC wants to turn select_cc of FP into fsel when possible. 363 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 364 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 365 366 // PowerPC wants to optimize integer setcc a bit 367 if (!Subtarget.useCRBits()) 368 setOperationAction(ISD::SETCC, MVT::i32, Custom); 369 370 // PowerPC does not have BRCOND which requires SetCC 371 if (!Subtarget.useCRBits()) 372 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 373 374 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 375 376 if (Subtarget.hasSPE()) { 377 // SPE has built-in conversions 378 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 379 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 380 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 381 } else { 382 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 383 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 384 385 // PowerPC does not have [U|S]INT_TO_FP 386 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 387 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 388 } 389 390 if (Subtarget.hasDirectMove() && isPPC64) { 391 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 392 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 393 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 394 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 395 } else { 396 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 397 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 398 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 399 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 400 } 401 402 // We cannot sextinreg(i1). Expand to shifts. 403 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 404 405 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 406 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 407 // support continuation, user-level threading, and etc.. As a result, no 408 // other SjLj exception interfaces are implemented and please don't build 409 // your own exception handling based on them. 410 // LLVM/Clang supports zero-cost DWARF exception handling. 411 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 412 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 413 414 // We want to legalize GlobalAddress and ConstantPool nodes into the 415 // appropriate instructions to materialize the address. 416 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 417 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 418 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 419 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 420 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 421 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 422 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 423 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 424 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 425 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 426 427 // TRAP is legal. 428 setOperationAction(ISD::TRAP, MVT::Other, Legal); 429 430 // TRAMPOLINE is custom lowered. 431 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 432 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 433 434 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 435 setOperationAction(ISD::VASTART , MVT::Other, Custom); 436 437 if (Subtarget.is64BitELFABI()) { 438 // VAARG always uses double-word chunks, so promote anything smaller. 439 setOperationAction(ISD::VAARG, MVT::i1, Promote); 440 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); 441 setOperationAction(ISD::VAARG, MVT::i8, Promote); 442 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); 443 setOperationAction(ISD::VAARG, MVT::i16, Promote); 444 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); 445 setOperationAction(ISD::VAARG, MVT::i32, Promote); 446 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); 447 setOperationAction(ISD::VAARG, MVT::Other, Expand); 448 } else if (Subtarget.is32BitELFABI()) { 449 // VAARG is custom lowered with the 32-bit SVR4 ABI. 450 setOperationAction(ISD::VAARG, MVT::Other, Custom); 451 setOperationAction(ISD::VAARG, MVT::i64, Custom); 452 } else 453 setOperationAction(ISD::VAARG, MVT::Other, Expand); 454 455 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 456 if (Subtarget.is32BitELFABI()) 457 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 458 else 459 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 460 461 // Use the default implementation. 462 setOperationAction(ISD::VAEND , MVT::Other, Expand); 463 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 464 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 465 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 466 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 467 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 468 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 469 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 470 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 471 472 // We want to custom lower some of our intrinsics. 473 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 474 475 // To handle counter-based loop conditions. 476 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 477 478 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 479 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 480 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 481 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 482 483 // Comparisons that require checking two conditions. 484 if (Subtarget.hasSPE()) { 485 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 486 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 487 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 488 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 489 } 490 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 491 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 492 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 493 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 494 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 495 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 496 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 497 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 498 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 499 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 500 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 501 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 502 503 if (Subtarget.has64BitSupport()) { 504 // They also have instructions for converting between i64 and fp. 505 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 506 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 507 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 508 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 509 // This is just the low 32 bits of a (signed) fp->i64 conversion. 510 // We cannot do this with Promote because i64 is not a legal type. 511 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 512 513 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 514 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 515 } else { 516 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 517 if (Subtarget.hasSPE()) 518 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 519 else 520 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 521 } 522 523 // With the instructions enabled under FPCVT, we can do everything. 524 if (Subtarget.hasFPCVT()) { 525 if (Subtarget.has64BitSupport()) { 526 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 527 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 528 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 529 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 530 } 531 532 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 533 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 534 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 535 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 536 } 537 538 if (Subtarget.use64BitRegs()) { 539 // 64-bit PowerPC implementations can support i64 types directly 540 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 541 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 542 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 543 // 64-bit PowerPC wants to expand i128 shifts itself. 544 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 545 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 546 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 547 } else { 548 // 32-bit PowerPC wants to expand i64 shifts itself. 549 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 550 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 551 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 552 } 553 554 if (Subtarget.hasVSX()) { 555 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); 556 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); 557 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); 558 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); 559 } 560 561 if (Subtarget.hasAltivec()) { 562 // First set operation action for all vector types to expand. Then we 563 // will selectively turn on ones that can be effectively codegen'd. 564 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 565 // add/sub are legal for all supported vector VT's. 566 setOperationAction(ISD::ADD, VT, Legal); 567 setOperationAction(ISD::SUB, VT, Legal); 568 569 // For v2i64, these are only valid with P8Vector. This is corrected after 570 // the loop. 571 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { 572 setOperationAction(ISD::SMAX, VT, Legal); 573 setOperationAction(ISD::SMIN, VT, Legal); 574 setOperationAction(ISD::UMAX, VT, Legal); 575 setOperationAction(ISD::UMIN, VT, Legal); 576 } 577 else { 578 setOperationAction(ISD::SMAX, VT, Expand); 579 setOperationAction(ISD::SMIN, VT, Expand); 580 setOperationAction(ISD::UMAX, VT, Expand); 581 setOperationAction(ISD::UMIN, VT, Expand); 582 } 583 584 if (Subtarget.hasVSX()) { 585 setOperationAction(ISD::FMAXNUM, VT, Legal); 586 setOperationAction(ISD::FMINNUM, VT, Legal); 587 } 588 589 // Vector instructions introduced in P8 590 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 591 setOperationAction(ISD::CTPOP, VT, Legal); 592 setOperationAction(ISD::CTLZ, VT, Legal); 593 } 594 else { 595 setOperationAction(ISD::CTPOP, VT, Expand); 596 setOperationAction(ISD::CTLZ, VT, Expand); 597 } 598 599 // Vector instructions introduced in P9 600 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 601 setOperationAction(ISD::CTTZ, VT, Legal); 602 else 603 setOperationAction(ISD::CTTZ, VT, Expand); 604 605 // We promote all shuffles to v16i8. 606 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 607 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 608 609 // We promote all non-typed operations to v4i32. 610 setOperationAction(ISD::AND , VT, Promote); 611 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 612 setOperationAction(ISD::OR , VT, Promote); 613 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 614 setOperationAction(ISD::XOR , VT, Promote); 615 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 616 setOperationAction(ISD::LOAD , VT, Promote); 617 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 618 setOperationAction(ISD::SELECT, VT, Promote); 619 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 620 setOperationAction(ISD::VSELECT, VT, Legal); 621 setOperationAction(ISD::SELECT_CC, VT, Promote); 622 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 623 setOperationAction(ISD::STORE, VT, Promote); 624 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 625 626 // No other operations are legal. 627 setOperationAction(ISD::MUL , VT, Expand); 628 setOperationAction(ISD::SDIV, VT, Expand); 629 setOperationAction(ISD::SREM, VT, Expand); 630 setOperationAction(ISD::UDIV, VT, Expand); 631 setOperationAction(ISD::UREM, VT, Expand); 632 setOperationAction(ISD::FDIV, VT, Expand); 633 setOperationAction(ISD::FREM, VT, Expand); 634 setOperationAction(ISD::FNEG, VT, Expand); 635 setOperationAction(ISD::FSQRT, VT, Expand); 636 setOperationAction(ISD::FLOG, VT, Expand); 637 setOperationAction(ISD::FLOG10, VT, Expand); 638 setOperationAction(ISD::FLOG2, VT, Expand); 639 setOperationAction(ISD::FEXP, VT, Expand); 640 setOperationAction(ISD::FEXP2, VT, Expand); 641 setOperationAction(ISD::FSIN, VT, Expand); 642 setOperationAction(ISD::FCOS, VT, Expand); 643 setOperationAction(ISD::FABS, VT, Expand); 644 setOperationAction(ISD::FFLOOR, VT, Expand); 645 setOperationAction(ISD::FCEIL, VT, Expand); 646 setOperationAction(ISD::FTRUNC, VT, Expand); 647 setOperationAction(ISD::FRINT, VT, Expand); 648 setOperationAction(ISD::FNEARBYINT, VT, Expand); 649 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 650 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 651 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 652 setOperationAction(ISD::MULHU, VT, Expand); 653 setOperationAction(ISD::MULHS, VT, Expand); 654 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 655 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 656 setOperationAction(ISD::UDIVREM, VT, Expand); 657 setOperationAction(ISD::SDIVREM, VT, Expand); 658 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 659 setOperationAction(ISD::FPOW, VT, Expand); 660 setOperationAction(ISD::BSWAP, VT, Expand); 661 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 662 setOperationAction(ISD::ROTL, VT, Expand); 663 setOperationAction(ISD::ROTR, VT, Expand); 664 665 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 666 setTruncStoreAction(VT, InnerVT, Expand); 667 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 668 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 669 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 670 } 671 } 672 if (!Subtarget.hasP8Vector()) { 673 setOperationAction(ISD::SMAX, MVT::v2i64, Expand); 674 setOperationAction(ISD::SMIN, MVT::v2i64, Expand); 675 setOperationAction(ISD::UMAX, MVT::v2i64, Expand); 676 setOperationAction(ISD::UMIN, MVT::v2i64, Expand); 677 } 678 679 for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8}) 680 setOperationAction(ISD::ABS, VT, Custom); 681 682 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 683 // with merges, splats, etc. 684 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 685 686 // Vector truncates to sub-word integer that fit in an Altivec/VSX register 687 // are cheap, so handle them before they get expanded to scalar. 688 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); 689 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); 690 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); 691 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); 692 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); 693 694 setOperationAction(ISD::AND , MVT::v4i32, Legal); 695 setOperationAction(ISD::OR , MVT::v4i32, Legal); 696 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 697 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 698 setOperationAction(ISD::SELECT, MVT::v4i32, 699 Subtarget.useCRBits() ? Legal : Expand); 700 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 701 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 702 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 703 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 704 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 705 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 706 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 707 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 708 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 709 710 // Without hasP8Altivec set, v2i64 SMAX isn't available. 711 // But ABS custom lowering requires SMAX support. 712 if (!Subtarget.hasP8Altivec()) 713 setOperationAction(ISD::ABS, MVT::v2i64, Expand); 714 715 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 716 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 717 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 718 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 719 720 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 721 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 722 723 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 724 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 725 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 726 } 727 728 if (Subtarget.hasP8Altivec()) 729 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 730 else 731 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 732 733 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 734 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 735 736 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 737 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 738 739 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 740 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 741 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 742 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 743 744 // Altivec does not contain unordered floating-point compare instructions 745 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 746 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 747 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 748 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 749 750 if (Subtarget.hasVSX()) { 751 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 752 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 753 if (Subtarget.hasP8Vector()) { 754 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 755 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 756 } 757 if (Subtarget.hasDirectMove() && isPPC64) { 758 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 759 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 760 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 761 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 762 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 763 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 764 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 765 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 766 } 767 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 768 769 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 770 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 771 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 772 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 773 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 774 775 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 776 777 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 778 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 779 780 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 781 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 782 783 // Share the Altivec comparison restrictions. 784 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 785 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 786 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 787 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 788 789 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 790 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 791 792 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 793 794 if (Subtarget.hasP8Vector()) 795 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 796 797 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 798 799 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 800 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 801 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 802 803 if (Subtarget.hasP8Altivec()) { 804 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 805 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 806 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 807 808 // 128 bit shifts can be accomplished via 3 instructions for SHL and 809 // SRL, but not for SRA because of the instructions available: 810 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 811 // doing 812 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 813 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 814 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 815 816 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 817 } 818 else { 819 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 820 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 821 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 822 823 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 824 825 // VSX v2i64 only supports non-arithmetic operations. 826 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 827 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 828 } 829 830 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 831 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 832 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 833 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 834 835 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 836 837 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 838 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 839 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 840 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 841 842 // Custom handling for partial vectors of integers converted to 843 // floating point. We already have optimal handling for v2i32 through 844 // the DAG combine, so those aren't necessary. 845 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); 846 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 847 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); 848 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 849 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); 850 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); 851 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); 852 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 853 854 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 855 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 856 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 857 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 858 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 859 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); 860 861 if (Subtarget.hasDirectMove()) 862 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 863 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 864 865 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 866 } 867 868 if (Subtarget.hasP8Altivec()) { 869 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 870 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 871 } 872 873 if (Subtarget.hasP9Vector()) { 874 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 875 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 876 877 // 128 bit shifts can be accomplished via 3 instructions for SHL and 878 // SRL, but not for SRA because of the instructions available: 879 // VS{RL} and VS{RL}O. 880 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 881 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 882 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 883 884 if (EnableQuadPrecision) { 885 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 886 setOperationAction(ISD::FADD, MVT::f128, Legal); 887 setOperationAction(ISD::FSUB, MVT::f128, Legal); 888 setOperationAction(ISD::FDIV, MVT::f128, Legal); 889 setOperationAction(ISD::FMUL, MVT::f128, Legal); 890 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 891 // No extending loads to f128 on PPC. 892 for (MVT FPT : MVT::fp_valuetypes()) 893 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 894 setOperationAction(ISD::FMA, MVT::f128, Legal); 895 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 896 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 897 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 898 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 899 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 900 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 901 902 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 903 setOperationAction(ISD::FRINT, MVT::f128, Legal); 904 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 905 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 906 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 907 setOperationAction(ISD::FROUND, MVT::f128, Legal); 908 909 setOperationAction(ISD::SELECT, MVT::f128, Expand); 910 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 911 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 912 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 913 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 914 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 915 // No implementation for these ops for PowerPC. 916 setOperationAction(ISD::FSIN , MVT::f128, Expand); 917 setOperationAction(ISD::FCOS , MVT::f128, Expand); 918 setOperationAction(ISD::FPOW, MVT::f128, Expand); 919 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 920 setOperationAction(ISD::FREM, MVT::f128, Expand); 921 } 922 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 923 924 } 925 926 if (Subtarget.hasP9Altivec()) { 927 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 928 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 929 } 930 } 931 932 if (Subtarget.hasQPX()) { 933 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 934 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 935 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 936 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 937 938 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 939 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 940 941 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 942 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 943 944 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 945 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 946 947 if (!Subtarget.useCRBits()) 948 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 949 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 950 951 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 952 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 953 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 954 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 955 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 956 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 957 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 958 959 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 960 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 961 962 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 963 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 964 965 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 966 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 967 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 968 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 969 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 970 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 971 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 972 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 973 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 974 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 975 976 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 977 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 978 979 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 980 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 981 982 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 983 984 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 985 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 986 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 987 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 988 989 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 990 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 991 992 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 993 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 994 995 if (!Subtarget.useCRBits()) 996 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 997 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 998 999 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 1000 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 1001 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 1002 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 1003 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 1004 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 1005 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 1006 1007 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 1008 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 1009 1010 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 1011 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 1012 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 1013 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 1014 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 1015 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 1016 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 1017 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 1018 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 1019 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 1020 1021 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 1022 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 1023 1024 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 1025 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 1026 1027 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 1028 1029 setOperationAction(ISD::AND , MVT::v4i1, Legal); 1030 setOperationAction(ISD::OR , MVT::v4i1, Legal); 1031 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 1032 1033 if (!Subtarget.useCRBits()) 1034 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 1035 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 1036 1037 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 1038 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 1039 1040 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 1041 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 1042 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 1043 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 1044 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 1045 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 1046 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 1047 1048 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 1049 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 1050 1051 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 1052 1053 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1054 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 1055 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 1056 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 1057 1058 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 1059 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 1060 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 1061 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 1062 1063 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 1064 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 1065 1066 // These need to set FE_INEXACT, and so cannot be vectorized here. 1067 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 1068 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 1069 1070 if (TM.Options.UnsafeFPMath) { 1071 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1072 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1073 1074 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 1075 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 1076 } else { 1077 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 1078 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 1079 1080 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 1081 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 1082 } 1083 } 1084 1085 if (Subtarget.has64BitSupport()) 1086 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1087 1088 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1089 1090 if (!isPPC64) { 1091 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1092 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1093 } 1094 1095 setBooleanContents(ZeroOrOneBooleanContent); 1096 1097 if (Subtarget.hasAltivec()) { 1098 // Altivec instructions set fields to all zeros or all ones. 1099 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1100 } 1101 1102 if (!isPPC64) { 1103 // These libcalls are not available in 32-bit. 1104 setLibcallName(RTLIB::SHL_I128, nullptr); 1105 setLibcallName(RTLIB::SRL_I128, nullptr); 1106 setLibcallName(RTLIB::SRA_I128, nullptr); 1107 } 1108 1109 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1110 1111 // We have target-specific dag combine patterns for the following nodes: 1112 setTargetDAGCombine(ISD::ADD); 1113 setTargetDAGCombine(ISD::SHL); 1114 setTargetDAGCombine(ISD::SRA); 1115 setTargetDAGCombine(ISD::SRL); 1116 setTargetDAGCombine(ISD::MUL); 1117 setTargetDAGCombine(ISD::SINT_TO_FP); 1118 setTargetDAGCombine(ISD::BUILD_VECTOR); 1119 if (Subtarget.hasFPCVT()) 1120 setTargetDAGCombine(ISD::UINT_TO_FP); 1121 setTargetDAGCombine(ISD::LOAD); 1122 setTargetDAGCombine(ISD::STORE); 1123 setTargetDAGCombine(ISD::BR_CC); 1124 if (Subtarget.useCRBits()) 1125 setTargetDAGCombine(ISD::BRCOND); 1126 setTargetDAGCombine(ISD::BSWAP); 1127 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1128 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1129 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1130 1131 setTargetDAGCombine(ISD::SIGN_EXTEND); 1132 setTargetDAGCombine(ISD::ZERO_EXTEND); 1133 setTargetDAGCombine(ISD::ANY_EXTEND); 1134 1135 setTargetDAGCombine(ISD::TRUNCATE); 1136 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1137 1138 1139 if (Subtarget.useCRBits()) { 1140 setTargetDAGCombine(ISD::TRUNCATE); 1141 setTargetDAGCombine(ISD::SETCC); 1142 setTargetDAGCombine(ISD::SELECT_CC); 1143 } 1144 1145 // Use reciprocal estimates. 1146 if (TM.Options.UnsafeFPMath) { 1147 setTargetDAGCombine(ISD::FDIV); 1148 setTargetDAGCombine(ISD::FSQRT); 1149 } 1150 1151 if (Subtarget.hasP9Altivec()) { 1152 setTargetDAGCombine(ISD::ABS); 1153 setTargetDAGCombine(ISD::VSELECT); 1154 } 1155 1156 // Darwin long double math library functions have $LDBL128 appended. 1157 if (Subtarget.isDarwin()) { 1158 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1159 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1160 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1161 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1162 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1163 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1164 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1165 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1166 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1167 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1168 } 1169 1170 if (EnableQuadPrecision) { 1171 setLibcallName(RTLIB::LOG_F128, "logf128"); 1172 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1173 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1174 setLibcallName(RTLIB::EXP_F128, "expf128"); 1175 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1176 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1177 setLibcallName(RTLIB::COS_F128, "cosf128"); 1178 setLibcallName(RTLIB::POW_F128, "powf128"); 1179 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1180 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1181 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 1182 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1183 } 1184 1185 // With 32 condition bits, we don't need to sink (and duplicate) compares 1186 // aggressively in CodeGenPrep. 1187 if (Subtarget.useCRBits()) { 1188 setHasMultipleConditionRegisters(); 1189 setJumpIsExpensive(); 1190 } 1191 1192 setMinFunctionAlignment(Align(4)); 1193 if (Subtarget.isDarwin()) 1194 setPrefFunctionAlignment(Align(16)); 1195 1196 switch (Subtarget.getDarwinDirective()) { 1197 default: break; 1198 case PPC::DIR_970: 1199 case PPC::DIR_A2: 1200 case PPC::DIR_E500: 1201 case PPC::DIR_E500mc: 1202 case PPC::DIR_E5500: 1203 case PPC::DIR_PWR4: 1204 case PPC::DIR_PWR5: 1205 case PPC::DIR_PWR5X: 1206 case PPC::DIR_PWR6: 1207 case PPC::DIR_PWR6X: 1208 case PPC::DIR_PWR7: 1209 case PPC::DIR_PWR8: 1210 case PPC::DIR_PWR9: 1211 setPrefLoopAlignment(Align(16)); 1212 setPrefFunctionAlignment(Align(16)); 1213 break; 1214 } 1215 1216 if (Subtarget.enableMachineScheduler()) 1217 setSchedulingPreference(Sched::Source); 1218 else 1219 setSchedulingPreference(Sched::Hybrid); 1220 1221 computeRegisterProperties(STI.getRegisterInfo()); 1222 1223 // The Freescale cores do better with aggressive inlining of memcpy and 1224 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1225 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1226 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1227 MaxStoresPerMemset = 32; 1228 MaxStoresPerMemsetOptSize = 16; 1229 MaxStoresPerMemcpy = 32; 1230 MaxStoresPerMemcpyOptSize = 8; 1231 MaxStoresPerMemmove = 32; 1232 MaxStoresPerMemmoveOptSize = 8; 1233 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1234 // The A2 also benefits from (very) aggressive inlining of memcpy and 1235 // friends. The overhead of a the function call, even when warm, can be 1236 // over one hundred cycles. 1237 MaxStoresPerMemset = 128; 1238 MaxStoresPerMemcpy = 128; 1239 MaxStoresPerMemmove = 128; 1240 MaxLoadsPerMemcmp = 128; 1241 } else { 1242 MaxLoadsPerMemcmp = 8; 1243 MaxLoadsPerMemcmpOptSize = 4; 1244 } 1245 } 1246 1247 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1248 /// the desired ByVal argument alignment. 1249 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1250 unsigned MaxMaxAlign) { 1251 if (MaxAlign == MaxMaxAlign) 1252 return; 1253 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1254 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1255 MaxAlign = 32; 1256 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1257 MaxAlign = 16; 1258 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1259 unsigned EltAlign = 0; 1260 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1261 if (EltAlign > MaxAlign) 1262 MaxAlign = EltAlign; 1263 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1264 for (auto *EltTy : STy->elements()) { 1265 unsigned EltAlign = 0; 1266 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1267 if (EltAlign > MaxAlign) 1268 MaxAlign = EltAlign; 1269 if (MaxAlign == MaxMaxAlign) 1270 break; 1271 } 1272 } 1273 } 1274 1275 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1276 /// function arguments in the caller parameter area. 1277 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1278 const DataLayout &DL) const { 1279 // Darwin passes everything on 4 byte boundary. 1280 if (Subtarget.isDarwin()) 1281 return 4; 1282 1283 // 16byte and wider vectors are passed on 16byte boundary. 1284 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1285 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1286 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1287 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1288 return Align; 1289 } 1290 1291 bool PPCTargetLowering::useSoftFloat() const { 1292 return Subtarget.useSoftFloat(); 1293 } 1294 1295 bool PPCTargetLowering::hasSPE() const { 1296 return Subtarget.hasSPE(); 1297 } 1298 1299 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { 1300 return VT.isScalarInteger(); 1301 } 1302 1303 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1304 switch ((PPCISD::NodeType)Opcode) { 1305 case PPCISD::FIRST_NUMBER: break; 1306 case PPCISD::FSEL: return "PPCISD::FSEL"; 1307 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP"; 1308 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP"; 1309 case PPCISD::FCFID: return "PPCISD::FCFID"; 1310 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1311 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1312 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1313 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1314 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1315 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1316 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1317 case PPCISD::FP_TO_UINT_IN_VSR: 1318 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1319 case PPCISD::FP_TO_SINT_IN_VSR: 1320 return "PPCISD::FP_TO_SINT_IN_VSR"; 1321 case PPCISD::FRE: return "PPCISD::FRE"; 1322 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1323 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1324 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1325 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1326 case PPCISD::VPERM: return "PPCISD::VPERM"; 1327 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1328 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1329 case PPCISD::XXREVERSE: return "PPCISD::XXREVERSE"; 1330 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1331 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1332 case PPCISD::CMPB: return "PPCISD::CMPB"; 1333 case PPCISD::Hi: return "PPCISD::Hi"; 1334 case PPCISD::Lo: return "PPCISD::Lo"; 1335 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1336 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1337 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1338 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1339 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1340 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1341 case PPCISD::SRL: return "PPCISD::SRL"; 1342 case PPCISD::SRA: return "PPCISD::SRA"; 1343 case PPCISD::SHL: return "PPCISD::SHL"; 1344 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1345 case PPCISD::CALL: return "PPCISD::CALL"; 1346 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1347 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1348 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1349 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1350 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1351 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1352 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1353 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1354 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1355 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1356 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1357 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1358 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1359 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1360 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1361 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1362 case PPCISD::VCMP: return "PPCISD::VCMP"; 1363 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1364 case PPCISD::LBRX: return "PPCISD::LBRX"; 1365 case PPCISD::STBRX: return "PPCISD::STBRX"; 1366 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1367 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1368 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1369 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1370 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1371 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1372 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1373 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1374 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE"; 1375 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE"; 1376 case PPCISD::ST_VSR_SCAL_INT: 1377 return "PPCISD::ST_VSR_SCAL_INT"; 1378 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1379 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1380 case PPCISD::BDZ: return "PPCISD::BDZ"; 1381 case PPCISD::MFFS: return "PPCISD::MFFS"; 1382 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1383 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1384 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1385 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1386 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1387 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1388 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1389 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1390 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1391 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1392 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1393 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1394 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1395 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1396 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1397 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1398 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1399 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1400 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1401 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1402 case PPCISD::SC: return "PPCISD::SC"; 1403 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1404 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1405 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1406 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1407 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1408 case PPCISD::VABSD: return "PPCISD::VABSD"; 1409 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1410 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1411 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1412 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1413 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1414 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1415 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1416 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64"; 1417 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE"; 1418 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; 1419 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; 1420 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; 1421 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; 1422 } 1423 return nullptr; 1424 } 1425 1426 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1427 EVT VT) const { 1428 if (!VT.isVector()) 1429 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1430 1431 if (Subtarget.hasQPX()) 1432 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1433 1434 return VT.changeVectorElementTypeToInteger(); 1435 } 1436 1437 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1438 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1439 return true; 1440 } 1441 1442 //===----------------------------------------------------------------------===// 1443 // Node matching predicates, for use by the tblgen matching code. 1444 //===----------------------------------------------------------------------===// 1445 1446 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1447 static bool isFloatingPointZero(SDValue Op) { 1448 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1449 return CFP->getValueAPF().isZero(); 1450 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1451 // Maybe this has already been legalized into the constant pool? 1452 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1453 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1454 return CFP->getValueAPF().isZero(); 1455 } 1456 return false; 1457 } 1458 1459 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1460 /// true if Op is undef or if it matches the specified value. 1461 static bool isConstantOrUndef(int Op, int Val) { 1462 return Op < 0 || Op == Val; 1463 } 1464 1465 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1466 /// VPKUHUM instruction. 1467 /// The ShuffleKind distinguishes between big-endian operations with 1468 /// two different inputs (0), either-endian operations with two identical 1469 /// inputs (1), and little-endian operations with two different inputs (2). 1470 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1471 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1472 SelectionDAG &DAG) { 1473 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1474 if (ShuffleKind == 0) { 1475 if (IsLE) 1476 return false; 1477 for (unsigned i = 0; i != 16; ++i) 1478 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1479 return false; 1480 } else if (ShuffleKind == 2) { 1481 if (!IsLE) 1482 return false; 1483 for (unsigned i = 0; i != 16; ++i) 1484 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1485 return false; 1486 } else if (ShuffleKind == 1) { 1487 unsigned j = IsLE ? 0 : 1; 1488 for (unsigned i = 0; i != 8; ++i) 1489 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1490 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1491 return false; 1492 } 1493 return true; 1494 } 1495 1496 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1497 /// VPKUWUM instruction. 1498 /// The ShuffleKind distinguishes between big-endian operations with 1499 /// two different inputs (0), either-endian operations with two identical 1500 /// inputs (1), and little-endian operations with two different inputs (2). 1501 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1502 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1503 SelectionDAG &DAG) { 1504 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1505 if (ShuffleKind == 0) { 1506 if (IsLE) 1507 return false; 1508 for (unsigned i = 0; i != 16; i += 2) 1509 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1510 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1511 return false; 1512 } else if (ShuffleKind == 2) { 1513 if (!IsLE) 1514 return false; 1515 for (unsigned i = 0; i != 16; i += 2) 1516 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1517 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1518 return false; 1519 } else if (ShuffleKind == 1) { 1520 unsigned j = IsLE ? 0 : 2; 1521 for (unsigned i = 0; i != 8; i += 2) 1522 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1523 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1524 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1525 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1526 return false; 1527 } 1528 return true; 1529 } 1530 1531 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1532 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1533 /// current subtarget. 1534 /// 1535 /// The ShuffleKind distinguishes between big-endian operations with 1536 /// two different inputs (0), either-endian operations with two identical 1537 /// inputs (1), and little-endian operations with two different inputs (2). 1538 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1539 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1540 SelectionDAG &DAG) { 1541 const PPCSubtarget& Subtarget = 1542 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1543 if (!Subtarget.hasP8Vector()) 1544 return false; 1545 1546 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1547 if (ShuffleKind == 0) { 1548 if (IsLE) 1549 return false; 1550 for (unsigned i = 0; i != 16; i += 4) 1551 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1552 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1553 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1554 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1555 return false; 1556 } else if (ShuffleKind == 2) { 1557 if (!IsLE) 1558 return false; 1559 for (unsigned i = 0; i != 16; i += 4) 1560 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1561 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1562 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1563 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1564 return false; 1565 } else if (ShuffleKind == 1) { 1566 unsigned j = IsLE ? 0 : 4; 1567 for (unsigned i = 0; i != 8; i += 4) 1568 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1569 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1570 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1571 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1572 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1573 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1574 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1575 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1576 return false; 1577 } 1578 return true; 1579 } 1580 1581 /// isVMerge - Common function, used to match vmrg* shuffles. 1582 /// 1583 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1584 unsigned LHSStart, unsigned RHSStart) { 1585 if (N->getValueType(0) != MVT::v16i8) 1586 return false; 1587 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1588 "Unsupported merge size!"); 1589 1590 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1591 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1592 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1593 LHSStart+j+i*UnitSize) || 1594 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1595 RHSStart+j+i*UnitSize)) 1596 return false; 1597 } 1598 return true; 1599 } 1600 1601 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1602 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1603 /// The ShuffleKind distinguishes between big-endian merges with two 1604 /// different inputs (0), either-endian merges with two identical inputs (1), 1605 /// and little-endian merges with two different inputs (2). For the latter, 1606 /// the input operands are swapped (see PPCInstrAltivec.td). 1607 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1608 unsigned ShuffleKind, SelectionDAG &DAG) { 1609 if (DAG.getDataLayout().isLittleEndian()) { 1610 if (ShuffleKind == 1) // unary 1611 return isVMerge(N, UnitSize, 0, 0); 1612 else if (ShuffleKind == 2) // swapped 1613 return isVMerge(N, UnitSize, 0, 16); 1614 else 1615 return false; 1616 } else { 1617 if (ShuffleKind == 1) // unary 1618 return isVMerge(N, UnitSize, 8, 8); 1619 else if (ShuffleKind == 0) // normal 1620 return isVMerge(N, UnitSize, 8, 24); 1621 else 1622 return false; 1623 } 1624 } 1625 1626 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1627 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1628 /// The ShuffleKind distinguishes between big-endian merges with two 1629 /// different inputs (0), either-endian merges with two identical inputs (1), 1630 /// and little-endian merges with two different inputs (2). For the latter, 1631 /// the input operands are swapped (see PPCInstrAltivec.td). 1632 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1633 unsigned ShuffleKind, SelectionDAG &DAG) { 1634 if (DAG.getDataLayout().isLittleEndian()) { 1635 if (ShuffleKind == 1) // unary 1636 return isVMerge(N, UnitSize, 8, 8); 1637 else if (ShuffleKind == 2) // swapped 1638 return isVMerge(N, UnitSize, 8, 24); 1639 else 1640 return false; 1641 } else { 1642 if (ShuffleKind == 1) // unary 1643 return isVMerge(N, UnitSize, 0, 0); 1644 else if (ShuffleKind == 0) // normal 1645 return isVMerge(N, UnitSize, 0, 16); 1646 else 1647 return false; 1648 } 1649 } 1650 1651 /** 1652 * Common function used to match vmrgew and vmrgow shuffles 1653 * 1654 * The indexOffset determines whether to look for even or odd words in 1655 * the shuffle mask. This is based on the of the endianness of the target 1656 * machine. 1657 * - Little Endian: 1658 * - Use offset of 0 to check for odd elements 1659 * - Use offset of 4 to check for even elements 1660 * - Big Endian: 1661 * - Use offset of 0 to check for even elements 1662 * - Use offset of 4 to check for odd elements 1663 * A detailed description of the vector element ordering for little endian and 1664 * big endian can be found at 1665 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1666 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1667 * compiler differences mean to you 1668 * 1669 * The mask to the shuffle vector instruction specifies the indices of the 1670 * elements from the two input vectors to place in the result. The elements are 1671 * numbered in array-access order, starting with the first vector. These vectors 1672 * are always of type v16i8, thus each vector will contain 16 elements of size 1673 * 8. More info on the shuffle vector can be found in the 1674 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1675 * Language Reference. 1676 * 1677 * The RHSStartValue indicates whether the same input vectors are used (unary) 1678 * or two different input vectors are used, based on the following: 1679 * - If the instruction uses the same vector for both inputs, the range of the 1680 * indices will be 0 to 15. In this case, the RHSStart value passed should 1681 * be 0. 1682 * - If the instruction has two different vectors then the range of the 1683 * indices will be 0 to 31. In this case, the RHSStart value passed should 1684 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1685 * to 31 specify elements in the second vector). 1686 * 1687 * \param[in] N The shuffle vector SD Node to analyze 1688 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1689 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1690 * vector to the shuffle_vector instruction 1691 * \return true iff this shuffle vector represents an even or odd word merge 1692 */ 1693 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1694 unsigned RHSStartValue) { 1695 if (N->getValueType(0) != MVT::v16i8) 1696 return false; 1697 1698 for (unsigned i = 0; i < 2; ++i) 1699 for (unsigned j = 0; j < 4; ++j) 1700 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1701 i*RHSStartValue+j+IndexOffset) || 1702 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1703 i*RHSStartValue+j+IndexOffset+8)) 1704 return false; 1705 return true; 1706 } 1707 1708 /** 1709 * Determine if the specified shuffle mask is suitable for the vmrgew or 1710 * vmrgow instructions. 1711 * 1712 * \param[in] N The shuffle vector SD Node to analyze 1713 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1714 * \param[in] ShuffleKind Identify the type of merge: 1715 * - 0 = big-endian merge with two different inputs; 1716 * - 1 = either-endian merge with two identical inputs; 1717 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1718 * little-endian merges). 1719 * \param[in] DAG The current SelectionDAG 1720 * \return true iff this shuffle mask 1721 */ 1722 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1723 unsigned ShuffleKind, SelectionDAG &DAG) { 1724 if (DAG.getDataLayout().isLittleEndian()) { 1725 unsigned indexOffset = CheckEven ? 4 : 0; 1726 if (ShuffleKind == 1) // Unary 1727 return isVMerge(N, indexOffset, 0); 1728 else if (ShuffleKind == 2) // swapped 1729 return isVMerge(N, indexOffset, 16); 1730 else 1731 return false; 1732 } 1733 else { 1734 unsigned indexOffset = CheckEven ? 0 : 4; 1735 if (ShuffleKind == 1) // Unary 1736 return isVMerge(N, indexOffset, 0); 1737 else if (ShuffleKind == 0) // Normal 1738 return isVMerge(N, indexOffset, 16); 1739 else 1740 return false; 1741 } 1742 return false; 1743 } 1744 1745 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1746 /// amount, otherwise return -1. 1747 /// The ShuffleKind distinguishes between big-endian operations with two 1748 /// different inputs (0), either-endian operations with two identical inputs 1749 /// (1), and little-endian operations with two different inputs (2). For the 1750 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1751 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1752 SelectionDAG &DAG) { 1753 if (N->getValueType(0) != MVT::v16i8) 1754 return -1; 1755 1756 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1757 1758 // Find the first non-undef value in the shuffle mask. 1759 unsigned i; 1760 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1761 /*search*/; 1762 1763 if (i == 16) return -1; // all undef. 1764 1765 // Otherwise, check to see if the rest of the elements are consecutively 1766 // numbered from this value. 1767 unsigned ShiftAmt = SVOp->getMaskElt(i); 1768 if (ShiftAmt < i) return -1; 1769 1770 ShiftAmt -= i; 1771 bool isLE = DAG.getDataLayout().isLittleEndian(); 1772 1773 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1774 // Check the rest of the elements to see if they are consecutive. 1775 for (++i; i != 16; ++i) 1776 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1777 return -1; 1778 } else if (ShuffleKind == 1) { 1779 // Check the rest of the elements to see if they are consecutive. 1780 for (++i; i != 16; ++i) 1781 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1782 return -1; 1783 } else 1784 return -1; 1785 1786 if (isLE) 1787 ShiftAmt = 16 - ShiftAmt; 1788 1789 return ShiftAmt; 1790 } 1791 1792 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1793 /// specifies a splat of a single element that is suitable for input to 1794 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.). 1795 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1796 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && 1797 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes"); 1798 1799 // The consecutive indices need to specify an element, not part of two 1800 // different elements. So abandon ship early if this isn't the case. 1801 if (N->getMaskElt(0) % EltSize != 0) 1802 return false; 1803 1804 // This is a splat operation if each element of the permute is the same, and 1805 // if the value doesn't reference the second vector. 1806 unsigned ElementBase = N->getMaskElt(0); 1807 1808 // FIXME: Handle UNDEF elements too! 1809 if (ElementBase >= 16) 1810 return false; 1811 1812 // Check that the indices are consecutive, in the case of a multi-byte element 1813 // splatted with a v16i8 mask. 1814 for (unsigned i = 1; i != EltSize; ++i) 1815 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1816 return false; 1817 1818 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1819 if (N->getMaskElt(i) < 0) continue; 1820 for (unsigned j = 0; j != EltSize; ++j) 1821 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1822 return false; 1823 } 1824 return true; 1825 } 1826 1827 /// Check that the mask is shuffling N byte elements. Within each N byte 1828 /// element of the mask, the indices could be either in increasing or 1829 /// decreasing order as long as they are consecutive. 1830 /// \param[in] N the shuffle vector SD Node to analyze 1831 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1832 /// Word/DoubleWord/QuadWord). 1833 /// \param[in] StepLen the delta indices number among the N byte element, if 1834 /// the mask is in increasing/decreasing order then it is 1/-1. 1835 /// \return true iff the mask is shuffling N byte elements. 1836 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1837 int StepLen) { 1838 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1839 "Unexpected element width."); 1840 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1841 1842 unsigned NumOfElem = 16 / Width; 1843 unsigned MaskVal[16]; // Width is never greater than 16 1844 for (unsigned i = 0; i < NumOfElem; ++i) { 1845 MaskVal[0] = N->getMaskElt(i * Width); 1846 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1847 return false; 1848 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1849 return false; 1850 } 1851 1852 for (unsigned int j = 1; j < Width; ++j) { 1853 MaskVal[j] = N->getMaskElt(i * Width + j); 1854 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1855 return false; 1856 } 1857 } 1858 } 1859 1860 return true; 1861 } 1862 1863 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1864 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1865 if (!isNByteElemShuffleMask(N, 4, 1)) 1866 return false; 1867 1868 // Now we look at mask elements 0,4,8,12 1869 unsigned M0 = N->getMaskElt(0) / 4; 1870 unsigned M1 = N->getMaskElt(4) / 4; 1871 unsigned M2 = N->getMaskElt(8) / 4; 1872 unsigned M3 = N->getMaskElt(12) / 4; 1873 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1874 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1875 1876 // Below, let H and L be arbitrary elements of the shuffle mask 1877 // where H is in the range [4,7] and L is in the range [0,3]. 1878 // H, 1, 2, 3 or L, 5, 6, 7 1879 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1880 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1881 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1882 InsertAtByte = IsLE ? 12 : 0; 1883 Swap = M0 < 4; 1884 return true; 1885 } 1886 // 0, H, 2, 3 or 4, L, 6, 7 1887 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1888 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1889 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1890 InsertAtByte = IsLE ? 8 : 4; 1891 Swap = M1 < 4; 1892 return true; 1893 } 1894 // 0, 1, H, 3 or 4, 5, L, 7 1895 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1896 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1897 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1898 InsertAtByte = IsLE ? 4 : 8; 1899 Swap = M2 < 4; 1900 return true; 1901 } 1902 // 0, 1, 2, H or 4, 5, 6, L 1903 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1904 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1905 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1906 InsertAtByte = IsLE ? 0 : 12; 1907 Swap = M3 < 4; 1908 return true; 1909 } 1910 1911 // If both vector operands for the shuffle are the same vector, the mask will 1912 // contain only elements from the first one and the second one will be undef. 1913 if (N->getOperand(1).isUndef()) { 1914 ShiftElts = 0; 1915 Swap = true; 1916 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1917 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1918 InsertAtByte = IsLE ? 12 : 0; 1919 return true; 1920 } 1921 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1922 InsertAtByte = IsLE ? 8 : 4; 1923 return true; 1924 } 1925 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1926 InsertAtByte = IsLE ? 4 : 8; 1927 return true; 1928 } 1929 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1930 InsertAtByte = IsLE ? 0 : 12; 1931 return true; 1932 } 1933 } 1934 1935 return false; 1936 } 1937 1938 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1939 bool &Swap, bool IsLE) { 1940 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1941 // Ensure each byte index of the word is consecutive. 1942 if (!isNByteElemShuffleMask(N, 4, 1)) 1943 return false; 1944 1945 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1946 unsigned M0 = N->getMaskElt(0) / 4; 1947 unsigned M1 = N->getMaskElt(4) / 4; 1948 unsigned M2 = N->getMaskElt(8) / 4; 1949 unsigned M3 = N->getMaskElt(12) / 4; 1950 1951 // If both vector operands for the shuffle are the same vector, the mask will 1952 // contain only elements from the first one and the second one will be undef. 1953 if (N->getOperand(1).isUndef()) { 1954 assert(M0 < 4 && "Indexing into an undef vector?"); 1955 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1956 return false; 1957 1958 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1959 Swap = false; 1960 return true; 1961 } 1962 1963 // Ensure each word index of the ShuffleVector Mask is consecutive. 1964 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1965 return false; 1966 1967 if (IsLE) { 1968 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1969 // Input vectors don't need to be swapped if the leading element 1970 // of the result is one of the 3 left elements of the second vector 1971 // (or if there is no shift to be done at all). 1972 Swap = false; 1973 ShiftElts = (8 - M0) % 8; 1974 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 1975 // Input vectors need to be swapped if the leading element 1976 // of the result is one of the 3 left elements of the first vector 1977 // (or if we're shifting by 4 - thereby simply swapping the vectors). 1978 Swap = true; 1979 ShiftElts = (4 - M0) % 4; 1980 } 1981 1982 return true; 1983 } else { // BE 1984 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 1985 // Input vectors don't need to be swapped if the leading element 1986 // of the result is one of the 4 elements of the first vector. 1987 Swap = false; 1988 ShiftElts = M0; 1989 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 1990 // Input vectors need to be swapped if the leading element 1991 // of the result is one of the 4 elements of the right vector. 1992 Swap = true; 1993 ShiftElts = M0 - 4; 1994 } 1995 1996 return true; 1997 } 1998 } 1999 2000 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 2001 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2002 2003 if (!isNByteElemShuffleMask(N, Width, -1)) 2004 return false; 2005 2006 for (int i = 0; i < 16; i += Width) 2007 if (N->getMaskElt(i) != i + Width - 1) 2008 return false; 2009 2010 return true; 2011 } 2012 2013 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 2014 return isXXBRShuffleMaskHelper(N, 2); 2015 } 2016 2017 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 2018 return isXXBRShuffleMaskHelper(N, 4); 2019 } 2020 2021 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 2022 return isXXBRShuffleMaskHelper(N, 8); 2023 } 2024 2025 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 2026 return isXXBRShuffleMaskHelper(N, 16); 2027 } 2028 2029 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 2030 /// if the inputs to the instruction should be swapped and set \p DM to the 2031 /// value for the immediate. 2032 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 2033 /// AND element 0 of the result comes from the first input (LE) or second input 2034 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 2035 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 2036 /// mask. 2037 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 2038 bool &Swap, bool IsLE) { 2039 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2040 2041 // Ensure each byte index of the double word is consecutive. 2042 if (!isNByteElemShuffleMask(N, 8, 1)) 2043 return false; 2044 2045 unsigned M0 = N->getMaskElt(0) / 8; 2046 unsigned M1 = N->getMaskElt(8) / 8; 2047 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 2048 2049 // If both vector operands for the shuffle are the same vector, the mask will 2050 // contain only elements from the first one and the second one will be undef. 2051 if (N->getOperand(1).isUndef()) { 2052 if ((M0 | M1) < 2) { 2053 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 2054 Swap = false; 2055 return true; 2056 } else 2057 return false; 2058 } 2059 2060 if (IsLE) { 2061 if (M0 > 1 && M1 < 2) { 2062 Swap = false; 2063 } else if (M0 < 2 && M1 > 1) { 2064 M0 = (M0 + 2) % 4; 2065 M1 = (M1 + 2) % 4; 2066 Swap = true; 2067 } else 2068 return false; 2069 2070 // Note: if control flow comes here that means Swap is already set above 2071 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2072 return true; 2073 } else { // BE 2074 if (M0 < 2 && M1 > 1) { 2075 Swap = false; 2076 } else if (M0 > 1 && M1 < 2) { 2077 M0 = (M0 + 2) % 4; 2078 M1 = (M1 + 2) % 4; 2079 Swap = true; 2080 } else 2081 return false; 2082 2083 // Note: if control flow comes here that means Swap is already set above 2084 DM = (M0 << 1) + (M1 & 1); 2085 return true; 2086 } 2087 } 2088 2089 2090 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is 2091 /// appropriate for PPC mnemonics (which have a big endian bias - namely 2092 /// elements are counted from the left of the vector register). 2093 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, 2094 SelectionDAG &DAG) { 2095 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2096 assert(isSplatShuffleMask(SVOp, EltSize)); 2097 if (DAG.getDataLayout().isLittleEndian()) 2098 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2099 else 2100 return SVOp->getMaskElt(0) / EltSize; 2101 } 2102 2103 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2104 /// by using a vspltis[bhw] instruction of the specified element size, return 2105 /// the constant being splatted. The ByteSize field indicates the number of 2106 /// bytes of each element [124] -> [bhw]. 2107 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2108 SDValue OpVal(nullptr, 0); 2109 2110 // If ByteSize of the splat is bigger than the element size of the 2111 // build_vector, then we have a case where we are checking for a splat where 2112 // multiple elements of the buildvector are folded together into a single 2113 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2114 unsigned EltSize = 16/N->getNumOperands(); 2115 if (EltSize < ByteSize) { 2116 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2117 SDValue UniquedVals[4]; 2118 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2119 2120 // See if all of the elements in the buildvector agree across. 2121 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2122 if (N->getOperand(i).isUndef()) continue; 2123 // If the element isn't a constant, bail fully out. 2124 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2125 2126 if (!UniquedVals[i&(Multiple-1)].getNode()) 2127 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2128 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2129 return SDValue(); // no match. 2130 } 2131 2132 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2133 // either constant or undef values that are identical for each chunk. See 2134 // if these chunks can form into a larger vspltis*. 2135 2136 // Check to see if all of the leading entries are either 0 or -1. If 2137 // neither, then this won't fit into the immediate field. 2138 bool LeadingZero = true; 2139 bool LeadingOnes = true; 2140 for (unsigned i = 0; i != Multiple-1; ++i) { 2141 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2142 2143 LeadingZero &= isNullConstant(UniquedVals[i]); 2144 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2145 } 2146 // Finally, check the least significant entry. 2147 if (LeadingZero) { 2148 if (!UniquedVals[Multiple-1].getNode()) 2149 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2150 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2151 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2152 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2153 } 2154 if (LeadingOnes) { 2155 if (!UniquedVals[Multiple-1].getNode()) 2156 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2157 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2158 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2159 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2160 } 2161 2162 return SDValue(); 2163 } 2164 2165 // Check to see if this buildvec has a single non-undef value in its elements. 2166 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2167 if (N->getOperand(i).isUndef()) continue; 2168 if (!OpVal.getNode()) 2169 OpVal = N->getOperand(i); 2170 else if (OpVal != N->getOperand(i)) 2171 return SDValue(); 2172 } 2173 2174 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2175 2176 unsigned ValSizeInBytes = EltSize; 2177 uint64_t Value = 0; 2178 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2179 Value = CN->getZExtValue(); 2180 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2181 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2182 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2183 } 2184 2185 // If the splat value is larger than the element value, then we can never do 2186 // this splat. The only case that we could fit the replicated bits into our 2187 // immediate field for would be zero, and we prefer to use vxor for it. 2188 if (ValSizeInBytes < ByteSize) return SDValue(); 2189 2190 // If the element value is larger than the splat value, check if it consists 2191 // of a repeated bit pattern of size ByteSize. 2192 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2193 return SDValue(); 2194 2195 // Properly sign extend the value. 2196 int MaskVal = SignExtend32(Value, ByteSize * 8); 2197 2198 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2199 if (MaskVal == 0) return SDValue(); 2200 2201 // Finally, if this value fits in a 5 bit sext field, return it 2202 if (SignExtend32<5>(MaskVal) == MaskVal) 2203 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2204 return SDValue(); 2205 } 2206 2207 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2208 /// amount, otherwise return -1. 2209 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2210 EVT VT = N->getValueType(0); 2211 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2212 return -1; 2213 2214 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2215 2216 // Find the first non-undef value in the shuffle mask. 2217 unsigned i; 2218 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2219 /*search*/; 2220 2221 if (i == 4) return -1; // all undef. 2222 2223 // Otherwise, check to see if the rest of the elements are consecutively 2224 // numbered from this value. 2225 unsigned ShiftAmt = SVOp->getMaskElt(i); 2226 if (ShiftAmt < i) return -1; 2227 ShiftAmt -= i; 2228 2229 // Check the rest of the elements to see if they are consecutive. 2230 for (++i; i != 4; ++i) 2231 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2232 return -1; 2233 2234 return ShiftAmt; 2235 } 2236 2237 //===----------------------------------------------------------------------===// 2238 // Addressing Mode Selection 2239 //===----------------------------------------------------------------------===// 2240 2241 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2242 /// or 64-bit immediate, and if the value can be accurately represented as a 2243 /// sign extension from a 16-bit value. If so, this returns true and the 2244 /// immediate. 2245 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2246 if (!isa<ConstantSDNode>(N)) 2247 return false; 2248 2249 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2250 if (N->getValueType(0) == MVT::i32) 2251 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2252 else 2253 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2254 } 2255 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2256 return isIntS16Immediate(Op.getNode(), Imm); 2257 } 2258 2259 2260 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can 2261 /// be represented as an indexed [r+r] operation. 2262 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, 2263 SDValue &Index, 2264 SelectionDAG &DAG) const { 2265 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 2266 UI != E; ++UI) { 2267 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) { 2268 if (Memop->getMemoryVT() == MVT::f64) { 2269 Base = N.getOperand(0); 2270 Index = N.getOperand(1); 2271 return true; 2272 } 2273 } 2274 } 2275 return false; 2276 } 2277 2278 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2279 /// can be represented as an indexed [r+r] operation. Returns false if it 2280 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is 2281 /// non-zero and N can be represented by a base register plus a signed 16-bit 2282 /// displacement, make a more precise judgement by checking (displacement % \p 2283 /// EncodingAlignment). 2284 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2285 SDValue &Index, SelectionDAG &DAG, 2286 unsigned EncodingAlignment) const { 2287 int16_t imm = 0; 2288 if (N.getOpcode() == ISD::ADD) { 2289 // Is there any SPE load/store (f64), which can't handle 16bit offset? 2290 // SPE load/store can only handle 8-bit offsets. 2291 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) 2292 return true; 2293 if (isIntS16Immediate(N.getOperand(1), imm) && 2294 (!EncodingAlignment || !(imm % EncodingAlignment))) 2295 return false; // r+i 2296 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2297 return false; // r+i 2298 2299 Base = N.getOperand(0); 2300 Index = N.getOperand(1); 2301 return true; 2302 } else if (N.getOpcode() == ISD::OR) { 2303 if (isIntS16Immediate(N.getOperand(1), imm) && 2304 (!EncodingAlignment || !(imm % EncodingAlignment))) 2305 return false; // r+i can fold it if we can. 2306 2307 // If this is an or of disjoint bitfields, we can codegen this as an add 2308 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2309 // disjoint. 2310 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2311 2312 if (LHSKnown.Zero.getBoolValue()) { 2313 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); 2314 // If all of the bits are known zero on the LHS or RHS, the add won't 2315 // carry. 2316 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2317 Base = N.getOperand(0); 2318 Index = N.getOperand(1); 2319 return true; 2320 } 2321 } 2322 } 2323 2324 return false; 2325 } 2326 2327 // If we happen to be doing an i64 load or store into a stack slot that has 2328 // less than a 4-byte alignment, then the frame-index elimination may need to 2329 // use an indexed load or store instruction (because the offset may not be a 2330 // multiple of 4). The extra register needed to hold the offset comes from the 2331 // register scavenger, and it is possible that the scavenger will need to use 2332 // an emergency spill slot. As a result, we need to make sure that a spill slot 2333 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2334 // stack slot. 2335 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2336 // FIXME: This does not handle the LWA case. 2337 if (VT != MVT::i64) 2338 return; 2339 2340 // NOTE: We'll exclude negative FIs here, which come from argument 2341 // lowering, because there are no known test cases triggering this problem 2342 // using packed structures (or similar). We can remove this exclusion if 2343 // we find such a test case. The reason why this is so test-case driven is 2344 // because this entire 'fixup' is only to prevent crashes (from the 2345 // register scavenger) on not-really-valid inputs. For example, if we have: 2346 // %a = alloca i1 2347 // %b = bitcast i1* %a to i64* 2348 // store i64* a, i64 b 2349 // then the store should really be marked as 'align 1', but is not. If it 2350 // were marked as 'align 1' then the indexed form would have been 2351 // instruction-selected initially, and the problem this 'fixup' is preventing 2352 // won't happen regardless. 2353 if (FrameIdx < 0) 2354 return; 2355 2356 MachineFunction &MF = DAG.getMachineFunction(); 2357 MachineFrameInfo &MFI = MF.getFrameInfo(); 2358 2359 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2360 if (Align >= 4) 2361 return; 2362 2363 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2364 FuncInfo->setHasNonRISpills(); 2365 } 2366 2367 /// Returns true if the address N can be represented by a base register plus 2368 /// a signed 16-bit displacement [r+imm], and if it is not better 2369 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept 2370 /// displacements that are multiples of that value. 2371 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2372 SDValue &Base, 2373 SelectionDAG &DAG, 2374 unsigned EncodingAlignment) const { 2375 // FIXME dl should come from parent load or store, not from address 2376 SDLoc dl(N); 2377 // If this can be more profitably realized as r+r, fail. 2378 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment)) 2379 return false; 2380 2381 if (N.getOpcode() == ISD::ADD) { 2382 int16_t imm = 0; 2383 if (isIntS16Immediate(N.getOperand(1), imm) && 2384 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { 2385 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2386 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2387 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2388 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2389 } else { 2390 Base = N.getOperand(0); 2391 } 2392 return true; // [r+i] 2393 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2394 // Match LOAD (ADD (X, Lo(G))). 2395 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2396 && "Cannot handle constant offsets yet!"); 2397 Disp = N.getOperand(1).getOperand(0); // The global address. 2398 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2399 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2400 Disp.getOpcode() == ISD::TargetConstantPool || 2401 Disp.getOpcode() == ISD::TargetJumpTable); 2402 Base = N.getOperand(0); 2403 return true; // [&g+r] 2404 } 2405 } else if (N.getOpcode() == ISD::OR) { 2406 int16_t imm = 0; 2407 if (isIntS16Immediate(N.getOperand(1), imm) && 2408 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { 2409 // If this is an or of disjoint bitfields, we can codegen this as an add 2410 // (for better address arithmetic) if the LHS and RHS of the OR are 2411 // provably disjoint. 2412 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2413 2414 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2415 // If all of the bits are known zero on the LHS or RHS, the add won't 2416 // carry. 2417 if (FrameIndexSDNode *FI = 2418 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2419 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2420 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2421 } else { 2422 Base = N.getOperand(0); 2423 } 2424 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2425 return true; 2426 } 2427 } 2428 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2429 // Loading from a constant address. 2430 2431 // If this address fits entirely in a 16-bit sext immediate field, codegen 2432 // this as "d, 0" 2433 int16_t Imm; 2434 if (isIntS16Immediate(CN, Imm) && 2435 (!EncodingAlignment || (Imm % EncodingAlignment) == 0)) { 2436 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2437 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2438 CN->getValueType(0)); 2439 return true; 2440 } 2441 2442 // Handle 32-bit sext immediates with LIS + addr mode. 2443 if ((CN->getValueType(0) == MVT::i32 || 2444 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2445 (!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) { 2446 int Addr = (int)CN->getZExtValue(); 2447 2448 // Otherwise, break this down into an LIS + disp. 2449 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2450 2451 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2452 MVT::i32); 2453 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2454 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2455 return true; 2456 } 2457 } 2458 2459 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2460 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2461 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2462 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2463 } else 2464 Base = N; 2465 return true; // [r+0] 2466 } 2467 2468 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2469 /// represented as an indexed [r+r] operation. 2470 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2471 SDValue &Index, 2472 SelectionDAG &DAG) const { 2473 // Check to see if we can easily represent this as an [r+r] address. This 2474 // will fail if it thinks that the address is more profitably represented as 2475 // reg+imm, e.g. where imm = 0. 2476 if (SelectAddressRegReg(N, Base, Index, DAG)) 2477 return true; 2478 2479 // If the address is the result of an add, we will utilize the fact that the 2480 // address calculation includes an implicit add. However, we can reduce 2481 // register pressure if we do not materialize a constant just for use as the 2482 // index register. We only get rid of the add if it is not an add of a 2483 // value and a 16-bit signed constant and both have a single use. 2484 int16_t imm = 0; 2485 if (N.getOpcode() == ISD::ADD && 2486 (!isIntS16Immediate(N.getOperand(1), imm) || 2487 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2488 Base = N.getOperand(0); 2489 Index = N.getOperand(1); 2490 return true; 2491 } 2492 2493 // Otherwise, do it the hard way, using R0 as the base register. 2494 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2495 N.getValueType()); 2496 Index = N; 2497 return true; 2498 } 2499 2500 /// Returns true if we should use a direct load into vector instruction 2501 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. 2502 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { 2503 2504 // If there are any other uses other than scalar to vector, then we should 2505 // keep it as a scalar load -> direct move pattern to prevent multiple 2506 // loads. 2507 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 2508 if (!LD) 2509 return false; 2510 2511 EVT MemVT = LD->getMemoryVT(); 2512 if (!MemVT.isSimple()) 2513 return false; 2514 switch(MemVT.getSimpleVT().SimpleTy) { 2515 case MVT::i64: 2516 break; 2517 case MVT::i32: 2518 if (!ST.hasP8Vector()) 2519 return false; 2520 break; 2521 case MVT::i16: 2522 case MVT::i8: 2523 if (!ST.hasP9Vector()) 2524 return false; 2525 break; 2526 default: 2527 return false; 2528 } 2529 2530 SDValue LoadedVal(N, 0); 2531 if (!LoadedVal.hasOneUse()) 2532 return false; 2533 2534 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); 2535 UI != UE; ++UI) 2536 if (UI.getUse().get().getResNo() == 0 && 2537 UI->getOpcode() != ISD::SCALAR_TO_VECTOR) 2538 return false; 2539 2540 return true; 2541 } 2542 2543 /// getPreIndexedAddressParts - returns true by value, base pointer and 2544 /// offset pointer and addressing mode by reference if the node's address 2545 /// can be legally represented as pre-indexed load / store address. 2546 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2547 SDValue &Offset, 2548 ISD::MemIndexedMode &AM, 2549 SelectionDAG &DAG) const { 2550 if (DisablePPCPreinc) return false; 2551 2552 bool isLoad = true; 2553 SDValue Ptr; 2554 EVT VT; 2555 unsigned Alignment; 2556 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2557 Ptr = LD->getBasePtr(); 2558 VT = LD->getMemoryVT(); 2559 Alignment = LD->getAlignment(); 2560 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2561 Ptr = ST->getBasePtr(); 2562 VT = ST->getMemoryVT(); 2563 Alignment = ST->getAlignment(); 2564 isLoad = false; 2565 } else 2566 return false; 2567 2568 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector 2569 // instructions because we can fold these into a more efficient instruction 2570 // instead, (such as LXSD). 2571 if (isLoad && usePartialVectorLoads(N, Subtarget)) { 2572 return false; 2573 } 2574 2575 // PowerPC doesn't have preinc load/store instructions for vectors (except 2576 // for QPX, which does have preinc r+r forms). 2577 if (VT.isVector()) { 2578 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2579 return false; 2580 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2581 AM = ISD::PRE_INC; 2582 return true; 2583 } 2584 } 2585 2586 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2587 // Common code will reject creating a pre-inc form if the base pointer 2588 // is a frame index, or if N is a store and the base pointer is either 2589 // the same as or a predecessor of the value being stored. Check for 2590 // those situations here, and try with swapped Base/Offset instead. 2591 bool Swap = false; 2592 2593 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2594 Swap = true; 2595 else if (!isLoad) { 2596 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2597 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2598 Swap = true; 2599 } 2600 2601 if (Swap) 2602 std::swap(Base, Offset); 2603 2604 AM = ISD::PRE_INC; 2605 return true; 2606 } 2607 2608 // LDU/STU can only handle immediates that are a multiple of 4. 2609 if (VT != MVT::i64) { 2610 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2611 return false; 2612 } else { 2613 // LDU/STU need an address with at least 4-byte alignment. 2614 if (Alignment < 4) 2615 return false; 2616 2617 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2618 return false; 2619 } 2620 2621 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2622 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2623 // sext i32 to i64 when addr mode is r+i. 2624 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2625 LD->getExtensionType() == ISD::SEXTLOAD && 2626 isa<ConstantSDNode>(Offset)) 2627 return false; 2628 } 2629 2630 AM = ISD::PRE_INC; 2631 return true; 2632 } 2633 2634 //===----------------------------------------------------------------------===// 2635 // LowerOperation implementation 2636 //===----------------------------------------------------------------------===// 2637 2638 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2639 /// and LoOpFlags to the target MO flags. 2640 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2641 unsigned &HiOpFlags, unsigned &LoOpFlags, 2642 const GlobalValue *GV = nullptr) { 2643 HiOpFlags = PPCII::MO_HA; 2644 LoOpFlags = PPCII::MO_LO; 2645 2646 // Don't use the pic base if not in PIC relocation model. 2647 if (IsPIC) { 2648 HiOpFlags |= PPCII::MO_PIC_FLAG; 2649 LoOpFlags |= PPCII::MO_PIC_FLAG; 2650 } 2651 2652 // If this is a reference to a global value that requires a non-lazy-ptr, make 2653 // sure that instruction lowering adds it. 2654 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2655 HiOpFlags |= PPCII::MO_NLP_FLAG; 2656 LoOpFlags |= PPCII::MO_NLP_FLAG; 2657 2658 if (GV->hasHiddenVisibility()) { 2659 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2660 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2661 } 2662 } 2663 } 2664 2665 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2666 SelectionDAG &DAG) { 2667 SDLoc DL(HiPart); 2668 EVT PtrVT = HiPart.getValueType(); 2669 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2670 2671 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2672 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2673 2674 // With PIC, the first instruction is actually "GR+hi(&G)". 2675 if (isPIC) 2676 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2677 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2678 2679 // Generate non-pic code that has direct accesses to the constant pool. 2680 // The address of the global is just (hi(&g)+lo(&g)). 2681 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2682 } 2683 2684 static void setUsesTOCBasePtr(MachineFunction &MF) { 2685 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2686 FuncInfo->setUsesTOCBasePtr(); 2687 } 2688 2689 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2690 setUsesTOCBasePtr(DAG.getMachineFunction()); 2691 } 2692 2693 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, 2694 SDValue GA) const { 2695 const bool Is64Bit = Subtarget.isPPC64(); 2696 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2697 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) 2698 : Subtarget.isAIXABI() 2699 ? DAG.getRegister(PPC::R2, VT) 2700 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2701 SDValue Ops[] = { GA, Reg }; 2702 return DAG.getMemIntrinsicNode( 2703 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2704 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, 2705 MachineMemOperand::MOLoad); 2706 } 2707 2708 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2709 SelectionDAG &DAG) const { 2710 EVT PtrVT = Op.getValueType(); 2711 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2712 const Constant *C = CP->getConstVal(); 2713 2714 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2715 // The actual address of the GlobalValue is stored in the TOC. 2716 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2717 setUsesTOCBasePtr(DAG); 2718 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2719 return getTOCEntry(DAG, SDLoc(CP), GA); 2720 } 2721 2722 unsigned MOHiFlag, MOLoFlag; 2723 bool IsPIC = isPositionIndependent(); 2724 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2725 2726 if (IsPIC && Subtarget.isSVR4ABI()) { 2727 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2728 PPCII::MO_PIC_FLAG); 2729 return getTOCEntry(DAG, SDLoc(CP), GA); 2730 } 2731 2732 SDValue CPIHi = 2733 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2734 SDValue CPILo = 2735 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2736 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2737 } 2738 2739 // For 64-bit PowerPC, prefer the more compact relative encodings. 2740 // This trades 32 bits per jump table entry for one or two instructions 2741 // on the jump site. 2742 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2743 if (isJumpTableRelative()) 2744 return MachineJumpTableInfo::EK_LabelDifference32; 2745 2746 return TargetLowering::getJumpTableEncoding(); 2747 } 2748 2749 bool PPCTargetLowering::isJumpTableRelative() const { 2750 if (UseAbsoluteJumpTables) 2751 return false; 2752 if (Subtarget.isPPC64()) 2753 return true; 2754 return TargetLowering::isJumpTableRelative(); 2755 } 2756 2757 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2758 SelectionDAG &DAG) const { 2759 if (!Subtarget.isPPC64()) 2760 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2761 2762 switch (getTargetMachine().getCodeModel()) { 2763 case CodeModel::Small: 2764 case CodeModel::Medium: 2765 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2766 default: 2767 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2768 getPointerTy(DAG.getDataLayout())); 2769 } 2770 } 2771 2772 const MCExpr * 2773 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2774 unsigned JTI, 2775 MCContext &Ctx) const { 2776 if (!Subtarget.isPPC64()) 2777 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2778 2779 switch (getTargetMachine().getCodeModel()) { 2780 case CodeModel::Small: 2781 case CodeModel::Medium: 2782 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2783 default: 2784 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2785 } 2786 } 2787 2788 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2789 EVT PtrVT = Op.getValueType(); 2790 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2791 2792 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2793 // The actual address of the GlobalValue is stored in the TOC. 2794 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2795 setUsesTOCBasePtr(DAG); 2796 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2797 return getTOCEntry(DAG, SDLoc(JT), GA); 2798 } 2799 2800 unsigned MOHiFlag, MOLoFlag; 2801 bool IsPIC = isPositionIndependent(); 2802 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2803 2804 if (IsPIC && Subtarget.isSVR4ABI()) { 2805 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2806 PPCII::MO_PIC_FLAG); 2807 return getTOCEntry(DAG, SDLoc(GA), GA); 2808 } 2809 2810 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2811 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2812 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2813 } 2814 2815 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2816 SelectionDAG &DAG) const { 2817 EVT PtrVT = Op.getValueType(); 2818 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2819 const BlockAddress *BA = BASDN->getBlockAddress(); 2820 2821 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2822 // The actual BlockAddress is stored in the TOC. 2823 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2824 setUsesTOCBasePtr(DAG); 2825 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2826 return getTOCEntry(DAG, SDLoc(BASDN), GA); 2827 } 2828 2829 // 32-bit position-independent ELF stores the BlockAddress in the .got. 2830 if (Subtarget.is32BitELFABI() && isPositionIndependent()) 2831 return getTOCEntry( 2832 DAG, SDLoc(BASDN), 2833 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); 2834 2835 unsigned MOHiFlag, MOLoFlag; 2836 bool IsPIC = isPositionIndependent(); 2837 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2838 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2839 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2840 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2841 } 2842 2843 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2844 SelectionDAG &DAG) const { 2845 // FIXME: TLS addresses currently use medium model code sequences, 2846 // which is the most useful form. Eventually support for small and 2847 // large models could be added if users need it, at the cost of 2848 // additional complexity. 2849 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2850 if (DAG.getTarget().useEmulatedTLS()) 2851 return LowerToTLSEmulatedModel(GA, DAG); 2852 2853 SDLoc dl(GA); 2854 const GlobalValue *GV = GA->getGlobal(); 2855 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2856 bool is64bit = Subtarget.isPPC64(); 2857 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 2858 PICLevel::Level picLevel = M->getPICLevel(); 2859 2860 const TargetMachine &TM = getTargetMachine(); 2861 TLSModel::Model Model = TM.getTLSModel(GV); 2862 2863 if (Model == TLSModel::LocalExec) { 2864 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2865 PPCII::MO_TPREL_HA); 2866 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2867 PPCII::MO_TPREL_LO); 2868 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2869 : DAG.getRegister(PPC::R2, MVT::i32); 2870 2871 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2872 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2873 } 2874 2875 if (Model == TLSModel::InitialExec) { 2876 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2877 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2878 PPCII::MO_TLS); 2879 SDValue GOTPtr; 2880 if (is64bit) { 2881 setUsesTOCBasePtr(DAG); 2882 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2883 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2884 PtrVT, GOTReg, TGA); 2885 } else { 2886 if (!TM.isPositionIndependent()) 2887 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2888 else if (picLevel == PICLevel::SmallPIC) 2889 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2890 else 2891 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2892 } 2893 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2894 PtrVT, TGA, GOTPtr); 2895 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2896 } 2897 2898 if (Model == TLSModel::GeneralDynamic) { 2899 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2900 SDValue GOTPtr; 2901 if (is64bit) { 2902 setUsesTOCBasePtr(DAG); 2903 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2904 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2905 GOTReg, TGA); 2906 } else { 2907 if (picLevel == PICLevel::SmallPIC) 2908 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2909 else 2910 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2911 } 2912 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2913 GOTPtr, TGA, TGA); 2914 } 2915 2916 if (Model == TLSModel::LocalDynamic) { 2917 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2918 SDValue GOTPtr; 2919 if (is64bit) { 2920 setUsesTOCBasePtr(DAG); 2921 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2922 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2923 GOTReg, TGA); 2924 } else { 2925 if (picLevel == PICLevel::SmallPIC) 2926 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2927 else 2928 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2929 } 2930 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2931 PtrVT, GOTPtr, TGA, TGA); 2932 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2933 PtrVT, TLSAddr, TGA); 2934 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2935 } 2936 2937 llvm_unreachable("Unknown TLS model!"); 2938 } 2939 2940 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2941 SelectionDAG &DAG) const { 2942 EVT PtrVT = Op.getValueType(); 2943 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2944 SDLoc DL(GSDN); 2945 const GlobalValue *GV = GSDN->getGlobal(); 2946 2947 // 64-bit SVR4 ABI & AIX ABI code is always position-independent. 2948 // The actual address of the GlobalValue is stored in the TOC. 2949 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2950 setUsesTOCBasePtr(DAG); 2951 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2952 return getTOCEntry(DAG, DL, GA); 2953 } 2954 2955 unsigned MOHiFlag, MOLoFlag; 2956 bool IsPIC = isPositionIndependent(); 2957 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2958 2959 if (IsPIC && Subtarget.isSVR4ABI()) { 2960 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2961 GSDN->getOffset(), 2962 PPCII::MO_PIC_FLAG); 2963 return getTOCEntry(DAG, DL, GA); 2964 } 2965 2966 SDValue GAHi = 2967 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2968 SDValue GALo = 2969 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2970 2971 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2972 2973 // If the global reference is actually to a non-lazy-pointer, we have to do an 2974 // extra load to get the address of the global. 2975 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2976 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2977 return Ptr; 2978 } 2979 2980 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2981 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2982 SDLoc dl(Op); 2983 2984 if (Op.getValueType() == MVT::v2i64) { 2985 // When the operands themselves are v2i64 values, we need to do something 2986 // special because VSX has no underlying comparison operations for these. 2987 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2988 // Equality can be handled by casting to the legal type for Altivec 2989 // comparisons, everything else needs to be expanded. 2990 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2991 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2992 DAG.getSetCC(dl, MVT::v4i32, 2993 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2994 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2995 CC)); 2996 } 2997 2998 return SDValue(); 2999 } 3000 3001 // We handle most of these in the usual way. 3002 return Op; 3003 } 3004 3005 // If we're comparing for equality to zero, expose the fact that this is 3006 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 3007 // fold the new nodes. 3008 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 3009 return V; 3010 3011 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3012 // Leave comparisons against 0 and -1 alone for now, since they're usually 3013 // optimized. FIXME: revisit this when we can custom lower all setcc 3014 // optimizations. 3015 if (C->isAllOnesValue() || C->isNullValue()) 3016 return SDValue(); 3017 } 3018 3019 // If we have an integer seteq/setne, turn it into a compare against zero 3020 // by xor'ing the rhs with the lhs, which is faster than setting a 3021 // condition register, reading it back out, and masking the correct bit. The 3022 // normal approach here uses sub to do this instead of xor. Using xor exposes 3023 // the result to other bit-twiddling opportunities. 3024 EVT LHSVT = Op.getOperand(0).getValueType(); 3025 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 3026 EVT VT = Op.getValueType(); 3027 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 3028 Op.getOperand(1)); 3029 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 3030 } 3031 return SDValue(); 3032 } 3033 3034 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 3035 SDNode *Node = Op.getNode(); 3036 EVT VT = Node->getValueType(0); 3037 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3038 SDValue InChain = Node->getOperand(0); 3039 SDValue VAListPtr = Node->getOperand(1); 3040 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 3041 SDLoc dl(Node); 3042 3043 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 3044 3045 // gpr_index 3046 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3047 VAListPtr, MachinePointerInfo(SV), MVT::i8); 3048 InChain = GprIndex.getValue(1); 3049 3050 if (VT == MVT::i64) { 3051 // Check if GprIndex is even 3052 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 3053 DAG.getConstant(1, dl, MVT::i32)); 3054 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 3055 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 3056 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 3057 DAG.getConstant(1, dl, MVT::i32)); 3058 // Align GprIndex to be even if it isn't 3059 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 3060 GprIndex); 3061 } 3062 3063 // fpr index is 1 byte after gpr 3064 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3065 DAG.getConstant(1, dl, MVT::i32)); 3066 3067 // fpr 3068 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3069 FprPtr, MachinePointerInfo(SV), MVT::i8); 3070 InChain = FprIndex.getValue(1); 3071 3072 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3073 DAG.getConstant(8, dl, MVT::i32)); 3074 3075 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3076 DAG.getConstant(4, dl, MVT::i32)); 3077 3078 // areas 3079 SDValue OverflowArea = 3080 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 3081 InChain = OverflowArea.getValue(1); 3082 3083 SDValue RegSaveArea = 3084 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 3085 InChain = RegSaveArea.getValue(1); 3086 3087 // select overflow_area if index > 8 3088 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 3089 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 3090 3091 // adjustment constant gpr_index * 4/8 3092 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 3093 VT.isInteger() ? GprIndex : FprIndex, 3094 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 3095 MVT::i32)); 3096 3097 // OurReg = RegSaveArea + RegConstant 3098 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 3099 RegConstant); 3100 3101 // Floating types are 32 bytes into RegSaveArea 3102 if (VT.isFloatingPoint()) 3103 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 3104 DAG.getConstant(32, dl, MVT::i32)); 3105 3106 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 3107 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 3108 VT.isInteger() ? GprIndex : FprIndex, 3109 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 3110 MVT::i32)); 3111 3112 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 3113 VT.isInteger() ? VAListPtr : FprPtr, 3114 MachinePointerInfo(SV), MVT::i8); 3115 3116 // determine if we should load from reg_save_area or overflow_area 3117 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 3118 3119 // increase overflow_area by 4/8 if gpr/fpr > 8 3120 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 3121 DAG.getConstant(VT.isInteger() ? 4 : 8, 3122 dl, MVT::i32)); 3123 3124 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 3125 OverflowAreaPlusN); 3126 3127 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 3128 MachinePointerInfo(), MVT::i32); 3129 3130 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 3131 } 3132 3133 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 3134 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 3135 3136 // We have to copy the entire va_list struct: 3137 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 3138 return DAG.getMemcpy(Op.getOperand(0), Op, 3139 Op.getOperand(1), Op.getOperand(2), 3140 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 3141 false, MachinePointerInfo(), MachinePointerInfo()); 3142 } 3143 3144 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 3145 SelectionDAG &DAG) const { 3146 return Op.getOperand(0); 3147 } 3148 3149 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 3150 SelectionDAG &DAG) const { 3151 SDValue Chain = Op.getOperand(0); 3152 SDValue Trmp = Op.getOperand(1); // trampoline 3153 SDValue FPtr = Op.getOperand(2); // nested function 3154 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 3155 SDLoc dl(Op); 3156 3157 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3158 bool isPPC64 = (PtrVT == MVT::i64); 3159 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3160 3161 TargetLowering::ArgListTy Args; 3162 TargetLowering::ArgListEntry Entry; 3163 3164 Entry.Ty = IntPtrTy; 3165 Entry.Node = Trmp; Args.push_back(Entry); 3166 3167 // TrampSize == (isPPC64 ? 48 : 40); 3168 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3169 isPPC64 ? MVT::i64 : MVT::i32); 3170 Args.push_back(Entry); 3171 3172 Entry.Node = FPtr; Args.push_back(Entry); 3173 Entry.Node = Nest; Args.push_back(Entry); 3174 3175 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3176 TargetLowering::CallLoweringInfo CLI(DAG); 3177 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3178 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3179 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3180 3181 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3182 return CallResult.second; 3183 } 3184 3185 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3186 MachineFunction &MF = DAG.getMachineFunction(); 3187 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3188 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3189 3190 SDLoc dl(Op); 3191 3192 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 3193 // vastart just stores the address of the VarArgsFrameIndex slot into the 3194 // memory location argument. 3195 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3196 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3197 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3198 MachinePointerInfo(SV)); 3199 } 3200 3201 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3202 // We suppose the given va_list is already allocated. 3203 // 3204 // typedef struct { 3205 // char gpr; /* index into the array of 8 GPRs 3206 // * stored in the register save area 3207 // * gpr=0 corresponds to r3, 3208 // * gpr=1 to r4, etc. 3209 // */ 3210 // char fpr; /* index into the array of 8 FPRs 3211 // * stored in the register save area 3212 // * fpr=0 corresponds to f1, 3213 // * fpr=1 to f2, etc. 3214 // */ 3215 // char *overflow_arg_area; 3216 // /* location on stack that holds 3217 // * the next overflow argument 3218 // */ 3219 // char *reg_save_area; 3220 // /* where r3:r10 and f1:f8 (if saved) 3221 // * are stored 3222 // */ 3223 // } va_list[1]; 3224 3225 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3226 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3227 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3228 PtrVT); 3229 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3230 PtrVT); 3231 3232 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3233 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3234 3235 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3236 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3237 3238 uint64_t FPROffset = 1; 3239 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3240 3241 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3242 3243 // Store first byte : number of int regs 3244 SDValue firstStore = 3245 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3246 MachinePointerInfo(SV), MVT::i8); 3247 uint64_t nextOffset = FPROffset; 3248 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3249 ConstFPROffset); 3250 3251 // Store second byte : number of float regs 3252 SDValue secondStore = 3253 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3254 MachinePointerInfo(SV, nextOffset), MVT::i8); 3255 nextOffset += StackOffset; 3256 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3257 3258 // Store second word : arguments given on stack 3259 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3260 MachinePointerInfo(SV, nextOffset)); 3261 nextOffset += FrameOffset; 3262 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3263 3264 // Store third word : arguments given in registers 3265 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3266 MachinePointerInfo(SV, nextOffset)); 3267 } 3268 3269 /// FPR - The set of FP registers that should be allocated for arguments 3270 /// on Darwin and AIX. 3271 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3272 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3273 PPC::F11, PPC::F12, PPC::F13}; 3274 3275 /// QFPR - The set of QPX registers that should be allocated for arguments. 3276 static const MCPhysReg QFPR[] = { 3277 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3278 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3279 3280 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3281 /// the stack. 3282 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3283 unsigned PtrByteSize) { 3284 unsigned ArgSize = ArgVT.getStoreSize(); 3285 if (Flags.isByVal()) 3286 ArgSize = Flags.getByValSize(); 3287 3288 // Round up to multiples of the pointer size, except for array members, 3289 // which are always packed. 3290 if (!Flags.isInConsecutiveRegs()) 3291 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3292 3293 return ArgSize; 3294 } 3295 3296 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3297 /// on the stack. 3298 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3299 ISD::ArgFlagsTy Flags, 3300 unsigned PtrByteSize) { 3301 unsigned Align = PtrByteSize; 3302 3303 // Altivec parameters are padded to a 16 byte boundary. 3304 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3305 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3306 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3307 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3308 Align = 16; 3309 // QPX vector types stored in double-precision are padded to a 32 byte 3310 // boundary. 3311 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3312 Align = 32; 3313 3314 // ByVal parameters are aligned as requested. 3315 if (Flags.isByVal()) { 3316 unsigned BVAlign = Flags.getByValAlign(); 3317 if (BVAlign > PtrByteSize) { 3318 if (BVAlign % PtrByteSize != 0) 3319 llvm_unreachable( 3320 "ByVal alignment is not a multiple of the pointer size"); 3321 3322 Align = BVAlign; 3323 } 3324 } 3325 3326 // Array members are always packed to their original alignment. 3327 if (Flags.isInConsecutiveRegs()) { 3328 // If the array member was split into multiple registers, the first 3329 // needs to be aligned to the size of the full type. (Except for 3330 // ppcf128, which is only aligned as its f64 components.) 3331 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3332 Align = OrigVT.getStoreSize(); 3333 else 3334 Align = ArgVT.getStoreSize(); 3335 } 3336 3337 return Align; 3338 } 3339 3340 /// CalculateStackSlotUsed - Return whether this argument will use its 3341 /// stack slot (instead of being passed in registers). ArgOffset, 3342 /// AvailableFPRs, and AvailableVRs must hold the current argument 3343 /// position, and will be updated to account for this argument. 3344 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3345 ISD::ArgFlagsTy Flags, 3346 unsigned PtrByteSize, 3347 unsigned LinkageSize, 3348 unsigned ParamAreaSize, 3349 unsigned &ArgOffset, 3350 unsigned &AvailableFPRs, 3351 unsigned &AvailableVRs, bool HasQPX) { 3352 bool UseMemory = false; 3353 3354 // Respect alignment of argument on the stack. 3355 unsigned Align = 3356 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3357 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3358 // If there's no space left in the argument save area, we must 3359 // use memory (this check also catches zero-sized arguments). 3360 if (ArgOffset >= LinkageSize + ParamAreaSize) 3361 UseMemory = true; 3362 3363 // Allocate argument on the stack. 3364 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3365 if (Flags.isInConsecutiveRegsLast()) 3366 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3367 // If we overran the argument save area, we must use memory 3368 // (this check catches arguments passed partially in memory) 3369 if (ArgOffset > LinkageSize + ParamAreaSize) 3370 UseMemory = true; 3371 3372 // However, if the argument is actually passed in an FPR or a VR, 3373 // we don't use memory after all. 3374 if (!Flags.isByVal()) { 3375 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3376 // QPX registers overlap with the scalar FP registers. 3377 (HasQPX && (ArgVT == MVT::v4f32 || 3378 ArgVT == MVT::v4f64 || 3379 ArgVT == MVT::v4i1))) 3380 if (AvailableFPRs > 0) { 3381 --AvailableFPRs; 3382 return false; 3383 } 3384 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3385 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3386 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3387 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3388 if (AvailableVRs > 0) { 3389 --AvailableVRs; 3390 return false; 3391 } 3392 } 3393 3394 return UseMemory; 3395 } 3396 3397 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3398 /// ensure minimum alignment required for target. 3399 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3400 unsigned NumBytes) { 3401 unsigned TargetAlign = Lowering->getStackAlignment(); 3402 unsigned AlignMask = TargetAlign - 1; 3403 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3404 return NumBytes; 3405 } 3406 3407 SDValue PPCTargetLowering::LowerFormalArguments( 3408 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3409 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3410 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3411 if (Subtarget.is64BitELFABI()) 3412 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3413 InVals); 3414 else if (Subtarget.is32BitELFABI()) 3415 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3416 InVals); 3417 3418 // FIXME: We are using this for both AIX and Darwin. We should add appropriate 3419 // AIX testing, and rename it appropriately. 3420 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG, 3421 InVals); 3422 } 3423 3424 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3425 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3426 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3427 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3428 3429 // 32-bit SVR4 ABI Stack Frame Layout: 3430 // +-----------------------------------+ 3431 // +--> | Back chain | 3432 // | +-----------------------------------+ 3433 // | | Floating-point register save area | 3434 // | +-----------------------------------+ 3435 // | | General register save area | 3436 // | +-----------------------------------+ 3437 // | | CR save word | 3438 // | +-----------------------------------+ 3439 // | | VRSAVE save word | 3440 // | +-----------------------------------+ 3441 // | | Alignment padding | 3442 // | +-----------------------------------+ 3443 // | | Vector register save area | 3444 // | +-----------------------------------+ 3445 // | | Local variable space | 3446 // | +-----------------------------------+ 3447 // | | Parameter list area | 3448 // | +-----------------------------------+ 3449 // | | LR save word | 3450 // | +-----------------------------------+ 3451 // SP--> +--- | Back chain | 3452 // +-----------------------------------+ 3453 // 3454 // Specifications: 3455 // System V Application Binary Interface PowerPC Processor Supplement 3456 // AltiVec Technology Programming Interface Manual 3457 3458 MachineFunction &MF = DAG.getMachineFunction(); 3459 MachineFrameInfo &MFI = MF.getFrameInfo(); 3460 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3461 3462 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3463 // Potential tail calls could cause overwriting of argument stack slots. 3464 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3465 (CallConv == CallingConv::Fast)); 3466 unsigned PtrByteSize = 4; 3467 3468 // Assign locations to all of the incoming arguments. 3469 SmallVector<CCValAssign, 16> ArgLocs; 3470 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3471 *DAG.getContext()); 3472 3473 // Reserve space for the linkage area on the stack. 3474 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3475 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3476 if (useSoftFloat()) 3477 CCInfo.PreAnalyzeFormalArguments(Ins); 3478 3479 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3480 CCInfo.clearWasPPCF128(); 3481 3482 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3483 CCValAssign &VA = ArgLocs[i]; 3484 3485 // Arguments stored in registers. 3486 if (VA.isRegLoc()) { 3487 const TargetRegisterClass *RC; 3488 EVT ValVT = VA.getValVT(); 3489 3490 switch (ValVT.getSimpleVT().SimpleTy) { 3491 default: 3492 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3493 case MVT::i1: 3494 case MVT::i32: 3495 RC = &PPC::GPRCRegClass; 3496 break; 3497 case MVT::f32: 3498 if (Subtarget.hasP8Vector()) 3499 RC = &PPC::VSSRCRegClass; 3500 else if (Subtarget.hasSPE()) 3501 RC = &PPC::GPRCRegClass; 3502 else 3503 RC = &PPC::F4RCRegClass; 3504 break; 3505 case MVT::f64: 3506 if (Subtarget.hasVSX()) 3507 RC = &PPC::VSFRCRegClass; 3508 else if (Subtarget.hasSPE()) 3509 // SPE passes doubles in GPR pairs. 3510 RC = &PPC::GPRCRegClass; 3511 else 3512 RC = &PPC::F8RCRegClass; 3513 break; 3514 case MVT::v16i8: 3515 case MVT::v8i16: 3516 case MVT::v4i32: 3517 RC = &PPC::VRRCRegClass; 3518 break; 3519 case MVT::v4f32: 3520 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3521 break; 3522 case MVT::v2f64: 3523 case MVT::v2i64: 3524 RC = &PPC::VRRCRegClass; 3525 break; 3526 case MVT::v4f64: 3527 RC = &PPC::QFRCRegClass; 3528 break; 3529 case MVT::v4i1: 3530 RC = &PPC::QBRCRegClass; 3531 break; 3532 } 3533 3534 SDValue ArgValue; 3535 // Transform the arguments stored in physical registers into 3536 // virtual ones. 3537 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) { 3538 assert(i + 1 < e && "No second half of double precision argument"); 3539 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC); 3540 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC); 3541 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32); 3542 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32); 3543 if (!Subtarget.isLittleEndian()) 3544 std::swap (ArgValueLo, ArgValueHi); 3545 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo, 3546 ArgValueHi); 3547 } else { 3548 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3549 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3550 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3551 if (ValVT == MVT::i1) 3552 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3553 } 3554 3555 InVals.push_back(ArgValue); 3556 } else { 3557 // Argument stored in memory. 3558 assert(VA.isMemLoc()); 3559 3560 // Get the extended size of the argument type in stack 3561 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3562 // Get the actual size of the argument type 3563 unsigned ObjSize = VA.getValVT().getStoreSize(); 3564 unsigned ArgOffset = VA.getLocMemOffset(); 3565 // Stack objects in PPC32 are right justified. 3566 ArgOffset += ArgSize - ObjSize; 3567 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); 3568 3569 // Create load nodes to retrieve arguments from the stack. 3570 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3571 InVals.push_back( 3572 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3573 } 3574 } 3575 3576 // Assign locations to all of the incoming aggregate by value arguments. 3577 // Aggregates passed by value are stored in the local variable space of the 3578 // caller's stack frame, right above the parameter list area. 3579 SmallVector<CCValAssign, 16> ByValArgLocs; 3580 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3581 ByValArgLocs, *DAG.getContext()); 3582 3583 // Reserve stack space for the allocations in CCInfo. 3584 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3585 3586 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3587 3588 // Area that is at least reserved in the caller of this function. 3589 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3590 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3591 3592 // Set the size that is at least reserved in caller of this function. Tail 3593 // call optimized function's reserved stack space needs to be aligned so that 3594 // taking the difference between two stack areas will result in an aligned 3595 // stack. 3596 MinReservedArea = 3597 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3598 FuncInfo->setMinReservedArea(MinReservedArea); 3599 3600 SmallVector<SDValue, 8> MemOps; 3601 3602 // If the function takes variable number of arguments, make a frame index for 3603 // the start of the first vararg value... for expansion of llvm.va_start. 3604 if (isVarArg) { 3605 static const MCPhysReg GPArgRegs[] = { 3606 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3607 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3608 }; 3609 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3610 3611 static const MCPhysReg FPArgRegs[] = { 3612 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3613 PPC::F8 3614 }; 3615 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3616 3617 if (useSoftFloat() || hasSPE()) 3618 NumFPArgRegs = 0; 3619 3620 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3621 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3622 3623 // Make room for NumGPArgRegs and NumFPArgRegs. 3624 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3625 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3626 3627 FuncInfo->setVarArgsStackOffset( 3628 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3629 CCInfo.getNextStackOffset(), true)); 3630 3631 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3632 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3633 3634 // The fixed integer arguments of a variadic function are stored to the 3635 // VarArgsFrameIndex on the stack so that they may be loaded by 3636 // dereferencing the result of va_next. 3637 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3638 // Get an existing live-in vreg, or add a new one. 3639 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3640 if (!VReg) 3641 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3642 3643 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3644 SDValue Store = 3645 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3646 MemOps.push_back(Store); 3647 // Increment the address by four for the next argument to store 3648 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3649 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3650 } 3651 3652 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3653 // is set. 3654 // The double arguments are stored to the VarArgsFrameIndex 3655 // on the stack. 3656 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3657 // Get an existing live-in vreg, or add a new one. 3658 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3659 if (!VReg) 3660 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3661 3662 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3663 SDValue Store = 3664 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3665 MemOps.push_back(Store); 3666 // Increment the address by eight for the next argument to store 3667 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3668 PtrVT); 3669 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3670 } 3671 } 3672 3673 if (!MemOps.empty()) 3674 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3675 3676 return Chain; 3677 } 3678 3679 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3680 // value to MVT::i64 and then truncate to the correct register size. 3681 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3682 EVT ObjectVT, SelectionDAG &DAG, 3683 SDValue ArgVal, 3684 const SDLoc &dl) const { 3685 if (Flags.isSExt()) 3686 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3687 DAG.getValueType(ObjectVT)); 3688 else if (Flags.isZExt()) 3689 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3690 DAG.getValueType(ObjectVT)); 3691 3692 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3693 } 3694 3695 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3696 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3697 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3698 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3699 // TODO: add description of PPC stack frame format, or at least some docs. 3700 // 3701 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3702 bool isLittleEndian = Subtarget.isLittleEndian(); 3703 MachineFunction &MF = DAG.getMachineFunction(); 3704 MachineFrameInfo &MFI = MF.getFrameInfo(); 3705 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3706 3707 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3708 "fastcc not supported on varargs functions"); 3709 3710 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3711 // Potential tail calls could cause overwriting of argument stack slots. 3712 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3713 (CallConv == CallingConv::Fast)); 3714 unsigned PtrByteSize = 8; 3715 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3716 3717 static const MCPhysReg GPR[] = { 3718 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3719 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3720 }; 3721 static const MCPhysReg VR[] = { 3722 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3723 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3724 }; 3725 3726 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3727 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3728 const unsigned Num_VR_Regs = array_lengthof(VR); 3729 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3730 3731 // Do a first pass over the arguments to determine whether the ABI 3732 // guarantees that our caller has allocated the parameter save area 3733 // on its stack frame. In the ELFv1 ABI, this is always the case; 3734 // in the ELFv2 ABI, it is true if this is a vararg function or if 3735 // any parameter is located in a stack slot. 3736 3737 bool HasParameterArea = !isELFv2ABI || isVarArg; 3738 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3739 unsigned NumBytes = LinkageSize; 3740 unsigned AvailableFPRs = Num_FPR_Regs; 3741 unsigned AvailableVRs = Num_VR_Regs; 3742 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3743 if (Ins[i].Flags.isNest()) 3744 continue; 3745 3746 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3747 PtrByteSize, LinkageSize, ParamAreaSize, 3748 NumBytes, AvailableFPRs, AvailableVRs, 3749 Subtarget.hasQPX())) 3750 HasParameterArea = true; 3751 } 3752 3753 // Add DAG nodes to load the arguments or copy them out of registers. On 3754 // entry to a function on PPC, the arguments start after the linkage area, 3755 // although the first ones are often in registers. 3756 3757 unsigned ArgOffset = LinkageSize; 3758 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3759 unsigned &QFPR_idx = FPR_idx; 3760 SmallVector<SDValue, 8> MemOps; 3761 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3762 unsigned CurArgIdx = 0; 3763 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3764 SDValue ArgVal; 3765 bool needsLoad = false; 3766 EVT ObjectVT = Ins[ArgNo].VT; 3767 EVT OrigVT = Ins[ArgNo].ArgVT; 3768 unsigned ObjSize = ObjectVT.getStoreSize(); 3769 unsigned ArgSize = ObjSize; 3770 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3771 if (Ins[ArgNo].isOrigArg()) { 3772 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3773 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3774 } 3775 // We re-align the argument offset for each argument, except when using the 3776 // fast calling convention, when we need to make sure we do that only when 3777 // we'll actually use a stack slot. 3778 unsigned CurArgOffset, Align; 3779 auto ComputeArgOffset = [&]() { 3780 /* Respect alignment of argument on the stack. */ 3781 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3782 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3783 CurArgOffset = ArgOffset; 3784 }; 3785 3786 if (CallConv != CallingConv::Fast) { 3787 ComputeArgOffset(); 3788 3789 /* Compute GPR index associated with argument offset. */ 3790 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3791 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3792 } 3793 3794 // FIXME the codegen can be much improved in some cases. 3795 // We do not have to keep everything in memory. 3796 if (Flags.isByVal()) { 3797 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3798 3799 if (CallConv == CallingConv::Fast) 3800 ComputeArgOffset(); 3801 3802 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3803 ObjSize = Flags.getByValSize(); 3804 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3805 // Empty aggregate parameters do not take up registers. Examples: 3806 // struct { } a; 3807 // union { } b; 3808 // int c[0]; 3809 // etc. However, we have to provide a place-holder in InVals, so 3810 // pretend we have an 8-byte item at the current address for that 3811 // purpose. 3812 if (!ObjSize) { 3813 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3814 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3815 InVals.push_back(FIN); 3816 continue; 3817 } 3818 3819 // Create a stack object covering all stack doublewords occupied 3820 // by the argument. If the argument is (fully or partially) on 3821 // the stack, or if the argument is fully in registers but the 3822 // caller has allocated the parameter save anyway, we can refer 3823 // directly to the caller's stack frame. Otherwise, create a 3824 // local copy in our own frame. 3825 int FI; 3826 if (HasParameterArea || 3827 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3828 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3829 else 3830 FI = MFI.CreateStackObject(ArgSize, Align, false); 3831 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3832 3833 // Handle aggregates smaller than 8 bytes. 3834 if (ObjSize < PtrByteSize) { 3835 // The value of the object is its address, which differs from the 3836 // address of the enclosing doubleword on big-endian systems. 3837 SDValue Arg = FIN; 3838 if (!isLittleEndian) { 3839 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3840 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3841 } 3842 InVals.push_back(Arg); 3843 3844 if (GPR_idx != Num_GPR_Regs) { 3845 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3846 FuncInfo->addLiveInAttr(VReg, Flags); 3847 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3848 SDValue Store; 3849 3850 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3851 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3852 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3853 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3854 MachinePointerInfo(&*FuncArg), ObjType); 3855 } else { 3856 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3857 // store the whole register as-is to the parameter save area 3858 // slot. 3859 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3860 MachinePointerInfo(&*FuncArg)); 3861 } 3862 3863 MemOps.push_back(Store); 3864 } 3865 // Whether we copied from a register or not, advance the offset 3866 // into the parameter save area by a full doubleword. 3867 ArgOffset += PtrByteSize; 3868 continue; 3869 } 3870 3871 // The value of the object is its address, which is the address of 3872 // its first stack doubleword. 3873 InVals.push_back(FIN); 3874 3875 // Store whatever pieces of the object are in registers to memory. 3876 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3877 if (GPR_idx == Num_GPR_Regs) 3878 break; 3879 3880 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3881 FuncInfo->addLiveInAttr(VReg, Flags); 3882 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3883 SDValue Addr = FIN; 3884 if (j) { 3885 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3886 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3887 } 3888 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3889 MachinePointerInfo(&*FuncArg, j)); 3890 MemOps.push_back(Store); 3891 ++GPR_idx; 3892 } 3893 ArgOffset += ArgSize; 3894 continue; 3895 } 3896 3897 switch (ObjectVT.getSimpleVT().SimpleTy) { 3898 default: llvm_unreachable("Unhandled argument type!"); 3899 case MVT::i1: 3900 case MVT::i32: 3901 case MVT::i64: 3902 if (Flags.isNest()) { 3903 // The 'nest' parameter, if any, is passed in R11. 3904 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3905 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3906 3907 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3908 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3909 3910 break; 3911 } 3912 3913 // These can be scalar arguments or elements of an integer array type 3914 // passed directly. Clang may use those instead of "byval" aggregate 3915 // types to avoid forcing arguments to memory unnecessarily. 3916 if (GPR_idx != Num_GPR_Regs) { 3917 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3918 FuncInfo->addLiveInAttr(VReg, Flags); 3919 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3920 3921 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3922 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3923 // value to MVT::i64 and then truncate to the correct register size. 3924 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3925 } else { 3926 if (CallConv == CallingConv::Fast) 3927 ComputeArgOffset(); 3928 3929 needsLoad = true; 3930 ArgSize = PtrByteSize; 3931 } 3932 if (CallConv != CallingConv::Fast || needsLoad) 3933 ArgOffset += 8; 3934 break; 3935 3936 case MVT::f32: 3937 case MVT::f64: 3938 // These can be scalar arguments or elements of a float array type 3939 // passed directly. The latter are used to implement ELFv2 homogenous 3940 // float aggregates. 3941 if (FPR_idx != Num_FPR_Regs) { 3942 unsigned VReg; 3943 3944 if (ObjectVT == MVT::f32) 3945 VReg = MF.addLiveIn(FPR[FPR_idx], 3946 Subtarget.hasP8Vector() 3947 ? &PPC::VSSRCRegClass 3948 : &PPC::F4RCRegClass); 3949 else 3950 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3951 ? &PPC::VSFRCRegClass 3952 : &PPC::F8RCRegClass); 3953 3954 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3955 ++FPR_idx; 3956 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3957 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3958 // once we support fp <-> gpr moves. 3959 3960 // This can only ever happen in the presence of f32 array types, 3961 // since otherwise we never run out of FPRs before running out 3962 // of GPRs. 3963 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3964 FuncInfo->addLiveInAttr(VReg, Flags); 3965 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3966 3967 if (ObjectVT == MVT::f32) { 3968 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3969 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3970 DAG.getConstant(32, dl, MVT::i32)); 3971 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3972 } 3973 3974 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3975 } else { 3976 if (CallConv == CallingConv::Fast) 3977 ComputeArgOffset(); 3978 3979 needsLoad = true; 3980 } 3981 3982 // When passing an array of floats, the array occupies consecutive 3983 // space in the argument area; only round up to the next doubleword 3984 // at the end of the array. Otherwise, each float takes 8 bytes. 3985 if (CallConv != CallingConv::Fast || needsLoad) { 3986 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3987 ArgOffset += ArgSize; 3988 if (Flags.isInConsecutiveRegsLast()) 3989 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3990 } 3991 break; 3992 case MVT::v4f32: 3993 case MVT::v4i32: 3994 case MVT::v8i16: 3995 case MVT::v16i8: 3996 case MVT::v2f64: 3997 case MVT::v2i64: 3998 case MVT::v1i128: 3999 case MVT::f128: 4000 if (!Subtarget.hasQPX()) { 4001 // These can be scalar arguments or elements of a vector array type 4002 // passed directly. The latter are used to implement ELFv2 homogenous 4003 // vector aggregates. 4004 if (VR_idx != Num_VR_Regs) { 4005 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4006 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4007 ++VR_idx; 4008 } else { 4009 if (CallConv == CallingConv::Fast) 4010 ComputeArgOffset(); 4011 needsLoad = true; 4012 } 4013 if (CallConv != CallingConv::Fast || needsLoad) 4014 ArgOffset += 16; 4015 break; 4016 } // not QPX 4017 4018 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 4019 "Invalid QPX parameter type"); 4020 LLVM_FALLTHROUGH; 4021 4022 case MVT::v4f64: 4023 case MVT::v4i1: 4024 // QPX vectors are treated like their scalar floating-point subregisters 4025 // (except that they're larger). 4026 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 4027 if (QFPR_idx != Num_QFPR_Regs) { 4028 const TargetRegisterClass *RC; 4029 switch (ObjectVT.getSimpleVT().SimpleTy) { 4030 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 4031 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 4032 default: RC = &PPC::QBRCRegClass; break; 4033 } 4034 4035 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 4036 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4037 ++QFPR_idx; 4038 } else { 4039 if (CallConv == CallingConv::Fast) 4040 ComputeArgOffset(); 4041 needsLoad = true; 4042 } 4043 if (CallConv != CallingConv::Fast || needsLoad) 4044 ArgOffset += Sz; 4045 break; 4046 } 4047 4048 // We need to load the argument to a virtual register if we determined 4049 // above that we ran out of physical registers of the appropriate type. 4050 if (needsLoad) { 4051 if (ObjSize < ArgSize && !isLittleEndian) 4052 CurArgOffset += ArgSize - ObjSize; 4053 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 4054 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4055 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4056 } 4057 4058 InVals.push_back(ArgVal); 4059 } 4060 4061 // Area that is at least reserved in the caller of this function. 4062 unsigned MinReservedArea; 4063 if (HasParameterArea) 4064 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 4065 else 4066 MinReservedArea = LinkageSize; 4067 4068 // Set the size that is at least reserved in caller of this function. Tail 4069 // call optimized functions' reserved stack space needs to be aligned so that 4070 // taking the difference between two stack areas will result in an aligned 4071 // stack. 4072 MinReservedArea = 4073 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4074 FuncInfo->setMinReservedArea(MinReservedArea); 4075 4076 // If the function takes variable number of arguments, make a frame index for 4077 // the start of the first vararg value... for expansion of llvm.va_start. 4078 if (isVarArg) { 4079 int Depth = ArgOffset; 4080 4081 FuncInfo->setVarArgsFrameIndex( 4082 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4083 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4084 4085 // If this function is vararg, store any remaining integer argument regs 4086 // to their spots on the stack so that they may be loaded by dereferencing 4087 // the result of va_next. 4088 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4089 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4090 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4091 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4092 SDValue Store = 4093 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4094 MemOps.push_back(Store); 4095 // Increment the address by four for the next argument to store 4096 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4097 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4098 } 4099 } 4100 4101 if (!MemOps.empty()) 4102 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4103 4104 return Chain; 4105 } 4106 4107 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 4108 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4109 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4110 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4111 // TODO: add description of PPC stack frame format, or at least some docs. 4112 // 4113 MachineFunction &MF = DAG.getMachineFunction(); 4114 MachineFrameInfo &MFI = MF.getFrameInfo(); 4115 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4116 4117 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4118 bool isPPC64 = PtrVT == MVT::i64; 4119 // Potential tail calls could cause overwriting of argument stack slots. 4120 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4121 (CallConv == CallingConv::Fast)); 4122 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4123 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4124 unsigned ArgOffset = LinkageSize; 4125 // Area that is at least reserved in caller of this function. 4126 unsigned MinReservedArea = ArgOffset; 4127 4128 static const MCPhysReg GPR_32[] = { // 32-bit registers. 4129 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4130 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4131 }; 4132 static const MCPhysReg GPR_64[] = { // 64-bit registers. 4133 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4134 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4135 }; 4136 static const MCPhysReg VR[] = { 4137 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4138 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4139 }; 4140 4141 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 4142 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4143 const unsigned Num_VR_Regs = array_lengthof( VR); 4144 4145 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4146 4147 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 4148 4149 // In 32-bit non-varargs functions, the stack space for vectors is after the 4150 // stack space for non-vectors. We do not use this space unless we have 4151 // too many vectors to fit in registers, something that only occurs in 4152 // constructed examples:), but we have to walk the arglist to figure 4153 // that out...for the pathological case, compute VecArgOffset as the 4154 // start of the vector parameter area. Computing VecArgOffset is the 4155 // entire point of the following loop. 4156 unsigned VecArgOffset = ArgOffset; 4157 if (!isVarArg && !isPPC64) { 4158 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 4159 ++ArgNo) { 4160 EVT ObjectVT = Ins[ArgNo].VT; 4161 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4162 4163 if (Flags.isByVal()) { 4164 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 4165 unsigned ObjSize = Flags.getByValSize(); 4166 unsigned ArgSize = 4167 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4168 VecArgOffset += ArgSize; 4169 continue; 4170 } 4171 4172 switch(ObjectVT.getSimpleVT().SimpleTy) { 4173 default: llvm_unreachable("Unhandled argument type!"); 4174 case MVT::i1: 4175 case MVT::i32: 4176 case MVT::f32: 4177 VecArgOffset += 4; 4178 break; 4179 case MVT::i64: // PPC64 4180 case MVT::f64: 4181 // FIXME: We are guaranteed to be !isPPC64 at this point. 4182 // Does MVT::i64 apply? 4183 VecArgOffset += 8; 4184 break; 4185 case MVT::v4f32: 4186 case MVT::v4i32: 4187 case MVT::v8i16: 4188 case MVT::v16i8: 4189 // Nothing to do, we're only looking at Nonvector args here. 4190 break; 4191 } 4192 } 4193 } 4194 // We've found where the vector parameter area in memory is. Skip the 4195 // first 12 parameters; these don't use that memory. 4196 VecArgOffset = ((VecArgOffset+15)/16)*16; 4197 VecArgOffset += 12*16; 4198 4199 // Add DAG nodes to load the arguments or copy them out of registers. On 4200 // entry to a function on PPC, the arguments start after the linkage area, 4201 // although the first ones are often in registers. 4202 4203 SmallVector<SDValue, 8> MemOps; 4204 unsigned nAltivecParamsAtEnd = 0; 4205 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4206 unsigned CurArgIdx = 0; 4207 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4208 SDValue ArgVal; 4209 bool needsLoad = false; 4210 EVT ObjectVT = Ins[ArgNo].VT; 4211 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 4212 unsigned ArgSize = ObjSize; 4213 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4214 if (Ins[ArgNo].isOrigArg()) { 4215 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4216 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4217 } 4218 unsigned CurArgOffset = ArgOffset; 4219 4220 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4221 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4222 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4223 if (isVarArg || isPPC64) { 4224 MinReservedArea = ((MinReservedArea+15)/16)*16; 4225 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4226 Flags, 4227 PtrByteSize); 4228 } else nAltivecParamsAtEnd++; 4229 } else 4230 // Calculate min reserved area. 4231 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4232 Flags, 4233 PtrByteSize); 4234 4235 // FIXME the codegen can be much improved in some cases. 4236 // We do not have to keep everything in memory. 4237 if (Flags.isByVal()) { 4238 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4239 4240 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4241 ObjSize = Flags.getByValSize(); 4242 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4243 // Objects of size 1 and 2 are right justified, everything else is 4244 // left justified. This means the memory address is adjusted forwards. 4245 if (ObjSize==1 || ObjSize==2) { 4246 CurArgOffset = CurArgOffset + (4 - ObjSize); 4247 } 4248 // The value of the object is its address. 4249 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4250 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4251 InVals.push_back(FIN); 4252 if (ObjSize==1 || ObjSize==2) { 4253 if (GPR_idx != Num_GPR_Regs) { 4254 unsigned VReg; 4255 if (isPPC64) 4256 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4257 else 4258 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4259 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4260 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4261 SDValue Store = 4262 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4263 MachinePointerInfo(&*FuncArg), ObjType); 4264 MemOps.push_back(Store); 4265 ++GPR_idx; 4266 } 4267 4268 ArgOffset += PtrByteSize; 4269 4270 continue; 4271 } 4272 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4273 // Store whatever pieces of the object are in registers 4274 // to memory. ArgOffset will be the address of the beginning 4275 // of the object. 4276 if (GPR_idx != Num_GPR_Regs) { 4277 unsigned VReg; 4278 if (isPPC64) 4279 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4280 else 4281 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4282 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4283 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4284 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4285 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4286 MachinePointerInfo(&*FuncArg, j)); 4287 MemOps.push_back(Store); 4288 ++GPR_idx; 4289 ArgOffset += PtrByteSize; 4290 } else { 4291 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4292 break; 4293 } 4294 } 4295 continue; 4296 } 4297 4298 switch (ObjectVT.getSimpleVT().SimpleTy) { 4299 default: llvm_unreachable("Unhandled argument type!"); 4300 case MVT::i1: 4301 case MVT::i32: 4302 if (!isPPC64) { 4303 if (GPR_idx != Num_GPR_Regs) { 4304 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4305 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4306 4307 if (ObjectVT == MVT::i1) 4308 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4309 4310 ++GPR_idx; 4311 } else { 4312 needsLoad = true; 4313 ArgSize = PtrByteSize; 4314 } 4315 // All int arguments reserve stack space in the Darwin ABI. 4316 ArgOffset += PtrByteSize; 4317 break; 4318 } 4319 LLVM_FALLTHROUGH; 4320 case MVT::i64: // PPC64 4321 if (GPR_idx != Num_GPR_Regs) { 4322 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4323 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4324 4325 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4326 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4327 // value to MVT::i64 and then truncate to the correct register size. 4328 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4329 4330 ++GPR_idx; 4331 } else { 4332 needsLoad = true; 4333 ArgSize = PtrByteSize; 4334 } 4335 // All int arguments reserve stack space in the Darwin ABI. 4336 ArgOffset += 8; 4337 break; 4338 4339 case MVT::f32: 4340 case MVT::f64: 4341 // Every 4 bytes of argument space consumes one of the GPRs available for 4342 // argument passing. 4343 if (GPR_idx != Num_GPR_Regs) { 4344 ++GPR_idx; 4345 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4346 ++GPR_idx; 4347 } 4348 if (FPR_idx != Num_FPR_Regs) { 4349 unsigned VReg; 4350 4351 if (ObjectVT == MVT::f32) 4352 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4353 else 4354 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4355 4356 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4357 ++FPR_idx; 4358 } else { 4359 needsLoad = true; 4360 } 4361 4362 // All FP arguments reserve stack space in the Darwin ABI. 4363 ArgOffset += isPPC64 ? 8 : ObjSize; 4364 break; 4365 case MVT::v4f32: 4366 case MVT::v4i32: 4367 case MVT::v8i16: 4368 case MVT::v16i8: 4369 // Note that vector arguments in registers don't reserve stack space, 4370 // except in varargs functions. 4371 if (VR_idx != Num_VR_Regs) { 4372 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4373 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4374 if (isVarArg) { 4375 while ((ArgOffset % 16) != 0) { 4376 ArgOffset += PtrByteSize; 4377 if (GPR_idx != Num_GPR_Regs) 4378 GPR_idx++; 4379 } 4380 ArgOffset += 16; 4381 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4382 } 4383 ++VR_idx; 4384 } else { 4385 if (!isVarArg && !isPPC64) { 4386 // Vectors go after all the nonvectors. 4387 CurArgOffset = VecArgOffset; 4388 VecArgOffset += 16; 4389 } else { 4390 // Vectors are aligned. 4391 ArgOffset = ((ArgOffset+15)/16)*16; 4392 CurArgOffset = ArgOffset; 4393 ArgOffset += 16; 4394 } 4395 needsLoad = true; 4396 } 4397 break; 4398 } 4399 4400 // We need to load the argument to a virtual register if we determined above 4401 // that we ran out of physical registers of the appropriate type. 4402 if (needsLoad) { 4403 int FI = MFI.CreateFixedObject(ObjSize, 4404 CurArgOffset + (ArgSize - ObjSize), 4405 isImmutable); 4406 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4407 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4408 } 4409 4410 InVals.push_back(ArgVal); 4411 } 4412 4413 // Allow for Altivec parameters at the end, if needed. 4414 if (nAltivecParamsAtEnd) { 4415 MinReservedArea = ((MinReservedArea+15)/16)*16; 4416 MinReservedArea += 16*nAltivecParamsAtEnd; 4417 } 4418 4419 // Area that is at least reserved in the caller of this function. 4420 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4421 4422 // Set the size that is at least reserved in caller of this function. Tail 4423 // call optimized functions' reserved stack space needs to be aligned so that 4424 // taking the difference between two stack areas will result in an aligned 4425 // stack. 4426 MinReservedArea = 4427 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4428 FuncInfo->setMinReservedArea(MinReservedArea); 4429 4430 // If the function takes variable number of arguments, make a frame index for 4431 // the start of the first vararg value... for expansion of llvm.va_start. 4432 if (isVarArg) { 4433 int Depth = ArgOffset; 4434 4435 FuncInfo->setVarArgsFrameIndex( 4436 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4437 Depth, true)); 4438 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4439 4440 // If this function is vararg, store any remaining integer argument regs 4441 // to their spots on the stack so that they may be loaded by dereferencing 4442 // the result of va_next. 4443 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4444 unsigned VReg; 4445 4446 if (isPPC64) 4447 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4448 else 4449 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4450 4451 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4452 SDValue Store = 4453 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4454 MemOps.push_back(Store); 4455 // Increment the address by four for the next argument to store 4456 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4457 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4458 } 4459 } 4460 4461 if (!MemOps.empty()) 4462 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4463 4464 return Chain; 4465 } 4466 4467 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4468 /// adjusted to accommodate the arguments for the tailcall. 4469 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4470 unsigned ParamSize) { 4471 4472 if (!isTailCall) return 0; 4473 4474 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4475 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4476 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4477 // Remember only if the new adjustment is bigger. 4478 if (SPDiff < FI->getTailCallSPDelta()) 4479 FI->setTailCallSPDelta(SPDiff); 4480 4481 return SPDiff; 4482 } 4483 4484 static bool isFunctionGlobalAddress(SDValue Callee); 4485 4486 static bool 4487 callsShareTOCBase(const Function *Caller, SDValue Callee, 4488 const TargetMachine &TM) { 4489 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols 4490 // don't have enough information to determine if the caller and calle share 4491 // the same TOC base, so we have to pessimistically assume they don't for 4492 // correctness. 4493 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4494 if (!G) 4495 return false; 4496 4497 const GlobalValue *GV = G->getGlobal(); 4498 // The medium and large code models are expected to provide a sufficiently 4499 // large TOC to provide all data addressing needs of a module with a 4500 // single TOC. Since each module will be addressed with a single TOC then we 4501 // only need to check that caller and callee don't cross dso boundaries. 4502 if (CodeModel::Medium == TM.getCodeModel() || 4503 CodeModel::Large == TM.getCodeModel()) 4504 return TM.shouldAssumeDSOLocal(*Caller->getParent(), GV); 4505 4506 // Otherwise we need to ensure callee and caller are in the same section, 4507 // since the linker may allocate multiple TOCs, and we don't know which 4508 // sections will belong to the same TOC base. 4509 4510 if (!GV->isStrongDefinitionForLinker()) 4511 return false; 4512 4513 // Any explicitly-specified sections and section prefixes must also match. 4514 // Also, if we're using -ffunction-sections, then each function is always in 4515 // a different section (the same is true for COMDAT functions). 4516 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4517 GV->getSection() != Caller->getSection()) 4518 return false; 4519 if (const auto *F = dyn_cast<Function>(GV)) { 4520 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4521 return false; 4522 } 4523 4524 // If the callee might be interposed, then we can't assume the ultimate call 4525 // target will be in the same section. Even in cases where we can assume that 4526 // interposition won't happen, in any case where the linker might insert a 4527 // stub to allow for interposition, we must generate code as though 4528 // interposition might occur. To understand why this matters, consider a 4529 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4530 // in the same section, but a is in a different module (i.e. has a different 4531 // TOC base pointer). If the linker allows for interposition between b and c, 4532 // then it will generate a stub for the call edge between b and c which will 4533 // save the TOC pointer into the designated stack slot allocated by b. If we 4534 // return true here, and therefore allow a tail call between b and c, that 4535 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4536 // pointer into the stack slot allocated by a (where the a -> b stub saved 4537 // a's TOC base pointer). If we're not considering a tail call, but rather, 4538 // whether a nop is needed after the call instruction in b, because the linker 4539 // will insert a stub, it might complain about a missing nop if we omit it 4540 // (although many don't complain in this case). 4541 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4542 return false; 4543 4544 return true; 4545 } 4546 4547 static bool 4548 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4549 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4550 assert(Subtarget.is64BitELFABI()); 4551 4552 const unsigned PtrByteSize = 8; 4553 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4554 4555 static const MCPhysReg GPR[] = { 4556 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4557 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4558 }; 4559 static const MCPhysReg VR[] = { 4560 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4561 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4562 }; 4563 4564 const unsigned NumGPRs = array_lengthof(GPR); 4565 const unsigned NumFPRs = 13; 4566 const unsigned NumVRs = array_lengthof(VR); 4567 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4568 4569 unsigned NumBytes = LinkageSize; 4570 unsigned AvailableFPRs = NumFPRs; 4571 unsigned AvailableVRs = NumVRs; 4572 4573 for (const ISD::OutputArg& Param : Outs) { 4574 if (Param.Flags.isNest()) continue; 4575 4576 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4577 PtrByteSize, LinkageSize, ParamAreaSize, 4578 NumBytes, AvailableFPRs, AvailableVRs, 4579 Subtarget.hasQPX())) 4580 return true; 4581 } 4582 return false; 4583 } 4584 4585 static bool 4586 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4587 if (CS.arg_size() != CallerFn->arg_size()) 4588 return false; 4589 4590 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4591 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4592 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4593 4594 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4595 const Value* CalleeArg = *CalleeArgIter; 4596 const Value* CallerArg = &(*CallerArgIter); 4597 if (CalleeArg == CallerArg) 4598 continue; 4599 4600 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4601 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4602 // } 4603 // 1st argument of callee is undef and has the same type as caller. 4604 if (CalleeArg->getType() == CallerArg->getType() && 4605 isa<UndefValue>(CalleeArg)) 4606 continue; 4607 4608 return false; 4609 } 4610 4611 return true; 4612 } 4613 4614 // Returns true if TCO is possible between the callers and callees 4615 // calling conventions. 4616 static bool 4617 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4618 CallingConv::ID CalleeCC) { 4619 // Tail calls are possible with fastcc and ccc. 4620 auto isTailCallableCC = [] (CallingConv::ID CC){ 4621 return CC == CallingConv::C || CC == CallingConv::Fast; 4622 }; 4623 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4624 return false; 4625 4626 // We can safely tail call both fastcc and ccc callees from a c calling 4627 // convention caller. If the caller is fastcc, we may have less stack space 4628 // than a non-fastcc caller with the same signature so disable tail-calls in 4629 // that case. 4630 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4631 } 4632 4633 bool 4634 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4635 SDValue Callee, 4636 CallingConv::ID CalleeCC, 4637 ImmutableCallSite CS, 4638 bool isVarArg, 4639 const SmallVectorImpl<ISD::OutputArg> &Outs, 4640 const SmallVectorImpl<ISD::InputArg> &Ins, 4641 SelectionDAG& DAG) const { 4642 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4643 4644 if (DisableSCO && !TailCallOpt) return false; 4645 4646 // Variadic argument functions are not supported. 4647 if (isVarArg) return false; 4648 4649 auto &Caller = DAG.getMachineFunction().getFunction(); 4650 // Check that the calling conventions are compatible for tco. 4651 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4652 return false; 4653 4654 // Caller contains any byval parameter is not supported. 4655 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4656 return false; 4657 4658 // Callee contains any byval parameter is not supported, too. 4659 // Note: This is a quick work around, because in some cases, e.g. 4660 // caller's stack size > callee's stack size, we are still able to apply 4661 // sibling call optimization. For example, gcc is able to do SCO for caller1 4662 // in the following example, but not for caller2. 4663 // struct test { 4664 // long int a; 4665 // char ary[56]; 4666 // } gTest; 4667 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4668 // b->a = v.a; 4669 // return 0; 4670 // } 4671 // void caller1(struct test a, struct test c, struct test *b) { 4672 // callee(gTest, b); } 4673 // void caller2(struct test *b) { callee(gTest, b); } 4674 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4675 return false; 4676 4677 // If callee and caller use different calling conventions, we cannot pass 4678 // parameters on stack since offsets for the parameter area may be different. 4679 if (Caller.getCallingConv() != CalleeCC && 4680 needStackSlotPassParameters(Subtarget, Outs)) 4681 return false; 4682 4683 // No TCO/SCO on indirect call because Caller have to restore its TOC 4684 if (!isFunctionGlobalAddress(Callee) && 4685 !isa<ExternalSymbolSDNode>(Callee)) 4686 return false; 4687 4688 // If the caller and callee potentially have different TOC bases then we 4689 // cannot tail call since we need to restore the TOC pointer after the call. 4690 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4691 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4692 return false; 4693 4694 // TCO allows altering callee ABI, so we don't have to check further. 4695 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4696 return true; 4697 4698 if (DisableSCO) return false; 4699 4700 // If callee use the same argument list that caller is using, then we can 4701 // apply SCO on this case. If it is not, then we need to check if callee needs 4702 // stack for passing arguments. 4703 if (!hasSameArgumentList(&Caller, CS) && 4704 needStackSlotPassParameters(Subtarget, Outs)) { 4705 return false; 4706 } 4707 4708 return true; 4709 } 4710 4711 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4712 /// for tail call optimization. Targets which want to do tail call 4713 /// optimization should implement this function. 4714 bool 4715 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4716 CallingConv::ID CalleeCC, 4717 bool isVarArg, 4718 const SmallVectorImpl<ISD::InputArg> &Ins, 4719 SelectionDAG& DAG) const { 4720 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4721 return false; 4722 4723 // Variable argument functions are not supported. 4724 if (isVarArg) 4725 return false; 4726 4727 MachineFunction &MF = DAG.getMachineFunction(); 4728 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4729 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4730 // Functions containing by val parameters are not supported. 4731 for (unsigned i = 0; i != Ins.size(); i++) { 4732 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4733 if (Flags.isByVal()) return false; 4734 } 4735 4736 // Non-PIC/GOT tail calls are supported. 4737 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4738 return true; 4739 4740 // At the moment we can only do local tail calls (in same module, hidden 4741 // or protected) if we are generating PIC. 4742 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4743 return G->getGlobal()->hasHiddenVisibility() 4744 || G->getGlobal()->hasProtectedVisibility(); 4745 } 4746 4747 return false; 4748 } 4749 4750 /// isCallCompatibleAddress - Return the immediate to use if the specified 4751 /// 32-bit value is representable in the immediate field of a BxA instruction. 4752 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4753 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4754 if (!C) return nullptr; 4755 4756 int Addr = C->getZExtValue(); 4757 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4758 SignExtend32<26>(Addr) != Addr) 4759 return nullptr; // Top 6 bits have to be sext of immediate. 4760 4761 return DAG 4762 .getConstant( 4763 (int)C->getZExtValue() >> 2, SDLoc(Op), 4764 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4765 .getNode(); 4766 } 4767 4768 namespace { 4769 4770 struct TailCallArgumentInfo { 4771 SDValue Arg; 4772 SDValue FrameIdxOp; 4773 int FrameIdx = 0; 4774 4775 TailCallArgumentInfo() = default; 4776 }; 4777 4778 } // end anonymous namespace 4779 4780 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4781 static void StoreTailCallArgumentsToStackSlot( 4782 SelectionDAG &DAG, SDValue Chain, 4783 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4784 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4785 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4786 SDValue Arg = TailCallArgs[i].Arg; 4787 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4788 int FI = TailCallArgs[i].FrameIdx; 4789 // Store relative to framepointer. 4790 MemOpChains.push_back(DAG.getStore( 4791 Chain, dl, Arg, FIN, 4792 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4793 } 4794 } 4795 4796 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4797 /// the appropriate stack slot for the tail call optimized function call. 4798 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4799 SDValue OldRetAddr, SDValue OldFP, 4800 int SPDiff, const SDLoc &dl) { 4801 if (SPDiff) { 4802 // Calculate the new stack slot for the return address. 4803 MachineFunction &MF = DAG.getMachineFunction(); 4804 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4805 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4806 bool isPPC64 = Subtarget.isPPC64(); 4807 int SlotSize = isPPC64 ? 8 : 4; 4808 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4809 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4810 NewRetAddrLoc, true); 4811 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4812 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4813 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4814 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4815 4816 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4817 // slot as the FP is never overwritten. 4818 if (Subtarget.isDarwinABI()) { 4819 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4820 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4821 true); 4822 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4823 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4824 MachinePointerInfo::getFixedStack( 4825 DAG.getMachineFunction(), NewFPIdx)); 4826 } 4827 } 4828 return Chain; 4829 } 4830 4831 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4832 /// the position of the argument. 4833 static void 4834 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4835 SDValue Arg, int SPDiff, unsigned ArgOffset, 4836 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4837 int Offset = ArgOffset + SPDiff; 4838 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4839 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4840 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4841 SDValue FIN = DAG.getFrameIndex(FI, VT); 4842 TailCallArgumentInfo Info; 4843 Info.Arg = Arg; 4844 Info.FrameIdxOp = FIN; 4845 Info.FrameIdx = FI; 4846 TailCallArguments.push_back(Info); 4847 } 4848 4849 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4850 /// stack slot. Returns the chain as result and the loaded frame pointers in 4851 /// LROpOut/FPOpout. Used when tail calling. 4852 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4853 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4854 SDValue &FPOpOut, const SDLoc &dl) const { 4855 if (SPDiff) { 4856 // Load the LR and FP stack slot for later adjusting. 4857 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4858 LROpOut = getReturnAddrFrameIndex(DAG); 4859 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4860 Chain = SDValue(LROpOut.getNode(), 1); 4861 4862 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4863 // slot as the FP is never overwritten. 4864 if (Subtarget.isDarwinABI()) { 4865 FPOpOut = getFramePointerFrameIndex(DAG); 4866 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4867 Chain = SDValue(FPOpOut.getNode(), 1); 4868 } 4869 } 4870 return Chain; 4871 } 4872 4873 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4874 /// by "Src" to address "Dst" of size "Size". Alignment information is 4875 /// specified by the specific parameter attribute. The copy will be passed as 4876 /// a byval function parameter. 4877 /// Sometimes what we are copying is the end of a larger object, the part that 4878 /// does not fit in registers. 4879 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4880 SDValue Chain, ISD::ArgFlagsTy Flags, 4881 SelectionDAG &DAG, const SDLoc &dl) { 4882 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4883 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4884 false, false, false, MachinePointerInfo(), 4885 MachinePointerInfo()); 4886 } 4887 4888 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4889 /// tail calls. 4890 static void LowerMemOpCallTo( 4891 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4892 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4893 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4894 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4895 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4896 if (!isTailCall) { 4897 if (isVector) { 4898 SDValue StackPtr; 4899 if (isPPC64) 4900 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4901 else 4902 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4903 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4904 DAG.getConstant(ArgOffset, dl, PtrVT)); 4905 } 4906 MemOpChains.push_back( 4907 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4908 // Calculate and remember argument location. 4909 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4910 TailCallArguments); 4911 } 4912 4913 static void 4914 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4915 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4916 SDValue FPOp, 4917 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4918 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4919 // might overwrite each other in case of tail call optimization. 4920 SmallVector<SDValue, 8> MemOpChains2; 4921 // Do not flag preceding copytoreg stuff together with the following stuff. 4922 InFlag = SDValue(); 4923 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4924 MemOpChains2, dl); 4925 if (!MemOpChains2.empty()) 4926 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4927 4928 // Store the return address to the appropriate stack slot. 4929 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4930 4931 // Emit callseq_end just before tailcall node. 4932 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4933 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4934 InFlag = Chain.getValue(1); 4935 } 4936 4937 // Is this global address that of a function that can be called by name? (as 4938 // opposed to something that must hold a descriptor for an indirect call). 4939 static bool isFunctionGlobalAddress(SDValue Callee) { 4940 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4941 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4942 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4943 return false; 4944 4945 return G->getGlobal()->getValueType()->isFunctionTy(); 4946 } 4947 4948 return false; 4949 } 4950 4951 static unsigned 4952 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4953 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4954 bool isPatchPoint, bool hasNest, 4955 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4956 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4957 ImmutableCallSite CS, const PPCSubtarget &Subtarget) { 4958 bool isPPC64 = Subtarget.isPPC64(); 4959 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4960 bool is64BitELFv1ABI = isPPC64 && isSVR4ABI && !Subtarget.isELFv2ABI(); 4961 bool isAIXABI = Subtarget.isAIXABI(); 4962 4963 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4964 NodeTys.push_back(MVT::Other); // Returns a chain 4965 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4966 4967 unsigned CallOpc = PPCISD::CALL; 4968 4969 bool needIndirectCall = true; 4970 if (!isSVR4ABI || !isPPC64) 4971 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4972 // If this is an absolute destination address, use the munged value. 4973 Callee = SDValue(Dest, 0); 4974 needIndirectCall = false; 4975 } 4976 4977 // PC-relative references to external symbols should go through $stub, unless 4978 // we're building with the leopard linker or later, which automatically 4979 // synthesizes these stubs. 4980 const TargetMachine &TM = DAG.getTarget(); 4981 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 4982 const GlobalValue *GV = nullptr; 4983 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4984 GV = G->getGlobal(); 4985 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4986 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4987 4988 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4989 // every direct call is) turn it into a TargetGlobalAddress / 4990 // TargetExternalSymbol node so that legalize doesn't hack it. 4991 if (isFunctionGlobalAddress(Callee)) { 4992 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4993 4994 // A call to a TLS address is actually an indirect call to a 4995 // thread-specific pointer. 4996 unsigned OpFlags = 0; 4997 if (UsePlt) 4998 OpFlags = PPCII::MO_PLT; 4999 5000 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 5001 Callee.getValueType(), 0, OpFlags); 5002 needIndirectCall = false; 5003 } 5004 5005 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 5006 unsigned char OpFlags = 0; 5007 5008 if (UsePlt) 5009 OpFlags = PPCII::MO_PLT; 5010 5011 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 5012 OpFlags); 5013 needIndirectCall = false; 5014 } 5015 5016 if (isPatchPoint) { 5017 // We'll form an invalid direct call when lowering a patchpoint; the full 5018 // sequence for an indirect call is complicated, and many of the 5019 // instructions introduced might have side effects (and, thus, can't be 5020 // removed later). The call itself will be removed as soon as the 5021 // argument/return lowering is complete, so the fact that it has the wrong 5022 // kind of operands should not really matter. 5023 needIndirectCall = false; 5024 } 5025 5026 if (needIndirectCall) { 5027 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 5028 // to do the call, we can't use PPCISD::CALL. 5029 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 5030 5031 if (is64BitELFv1ABI) { 5032 // Function pointers in the 64-bit SVR4 ABI do not point to the function 5033 // entry point, but to the function descriptor (the function entry point 5034 // address is part of the function descriptor though). 5035 // The function descriptor is a three doubleword structure with the 5036 // following fields: function entry point, TOC base address and 5037 // environment pointer. 5038 // Thus for a call through a function pointer, the following actions need 5039 // to be performed: 5040 // 1. Save the TOC of the caller in the TOC save area of its stack 5041 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 5042 // 2. Load the address of the function entry point from the function 5043 // descriptor. 5044 // 3. Load the TOC of the callee from the function descriptor into r2. 5045 // 4. Load the environment pointer from the function descriptor into 5046 // r11. 5047 // 5. Branch to the function entry point address. 5048 // 6. On return of the callee, the TOC of the caller needs to be 5049 // restored (this is done in FinishCall()). 5050 // 5051 // The loads are scheduled at the beginning of the call sequence, and the 5052 // register copies are flagged together to ensure that no other 5053 // operations can be scheduled in between. E.g. without flagging the 5054 // copies together, a TOC access in the caller could be scheduled between 5055 // the assignment of the callee TOC and the branch to the callee, which 5056 // results in the TOC access going through the TOC of the callee instead 5057 // of going through the TOC of the caller, which leads to incorrect code. 5058 5059 // Load the address of the function entry point from the function 5060 // descriptor. 5061 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 5062 if (LDChain.getValueType() == MVT::Glue) 5063 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 5064 5065 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 5066 ? (MachineMemOperand::MODereferenceable | 5067 MachineMemOperand::MOInvariant) 5068 : MachineMemOperand::MONone; 5069 5070 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 5071 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 5072 /* Alignment = */ 8, MMOFlags); 5073 5074 // Load environment pointer into r11. 5075 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 5076 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 5077 SDValue LoadEnvPtr = 5078 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 5079 /* Alignment = */ 8, MMOFlags); 5080 5081 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 5082 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 5083 SDValue TOCPtr = 5084 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 5085 /* Alignment = */ 8, MMOFlags); 5086 5087 setUsesTOCBasePtr(DAG); 5088 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 5089 InFlag); 5090 Chain = TOCVal.getValue(0); 5091 InFlag = TOCVal.getValue(1); 5092 5093 // If the function call has an explicit 'nest' parameter, it takes the 5094 // place of the environment pointer. 5095 if (!hasNest) { 5096 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 5097 InFlag); 5098 5099 Chain = EnvVal.getValue(0); 5100 InFlag = EnvVal.getValue(1); 5101 } 5102 5103 MTCTROps[0] = Chain; 5104 MTCTROps[1] = LoadFuncPtr; 5105 MTCTROps[2] = InFlag; 5106 } 5107 5108 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 5109 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 5110 InFlag = Chain.getValue(1); 5111 5112 NodeTys.clear(); 5113 NodeTys.push_back(MVT::Other); 5114 NodeTys.push_back(MVT::Glue); 5115 Ops.push_back(Chain); 5116 CallOpc = PPCISD::BCTRL; 5117 Callee.setNode(nullptr); 5118 // Add use of X11 (holding environment pointer) 5119 if (is64BitELFv1ABI && !hasNest) 5120 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 5121 // Add CTR register as callee so a bctr can be emitted later. 5122 if (isTailCall) 5123 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 5124 } 5125 5126 // If this is a direct call, pass the chain and the callee. 5127 if (Callee.getNode()) { 5128 Ops.push_back(Chain); 5129 Ops.push_back(Callee); 5130 } 5131 // If this is a tail call add stack pointer delta. 5132 if (isTailCall) 5133 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5134 5135 // Add argument registers to the end of the list so that they are known live 5136 // into the call. 5137 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5138 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5139 RegsToPass[i].second.getValueType())); 5140 5141 // All calls, in the AIX ABI and 64-bit ELF ABIs, need the TOC register 5142 // live into the call. 5143 // We do need to reserve R2/X2 to appease the verifier for the PATCHPOINT. 5144 if ((isSVR4ABI && isPPC64) || isAIXABI) { 5145 setUsesTOCBasePtr(DAG); 5146 5147 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is 5148 // no way to mark dependencies as implicit here. 5149 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. 5150 if (!isPatchPoint) 5151 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::X2 5152 : PPC::R2, PtrVT)); 5153 } 5154 5155 return CallOpc; 5156 } 5157 5158 SDValue PPCTargetLowering::LowerCallResult( 5159 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 5160 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5161 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 5162 SmallVector<CCValAssign, 16> RVLocs; 5163 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5164 *DAG.getContext()); 5165 5166 CCRetInfo.AnalyzeCallResult( 5167 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 5168 ? RetCC_PPC_Cold 5169 : RetCC_PPC); 5170 5171 // Copy all of the result registers out of their specified physreg. 5172 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 5173 CCValAssign &VA = RVLocs[i]; 5174 assert(VA.isRegLoc() && "Can only return in registers!"); 5175 5176 SDValue Val; 5177 5178 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 5179 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 5180 InFlag); 5181 Chain = Lo.getValue(1); 5182 InFlag = Lo.getValue(2); 5183 VA = RVLocs[++i]; // skip ahead to next loc 5184 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 5185 InFlag); 5186 Chain = Hi.getValue(1); 5187 InFlag = Hi.getValue(2); 5188 if (!Subtarget.isLittleEndian()) 5189 std::swap (Lo, Hi); 5190 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi); 5191 } else { 5192 Val = DAG.getCopyFromReg(Chain, dl, 5193 VA.getLocReg(), VA.getLocVT(), InFlag); 5194 Chain = Val.getValue(1); 5195 InFlag = Val.getValue(2); 5196 } 5197 5198 switch (VA.getLocInfo()) { 5199 default: llvm_unreachable("Unknown loc info!"); 5200 case CCValAssign::Full: break; 5201 case CCValAssign::AExt: 5202 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5203 break; 5204 case CCValAssign::ZExt: 5205 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 5206 DAG.getValueType(VA.getValVT())); 5207 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5208 break; 5209 case CCValAssign::SExt: 5210 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 5211 DAG.getValueType(VA.getValVT())); 5212 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5213 break; 5214 } 5215 5216 InVals.push_back(Val); 5217 } 5218 5219 return Chain; 5220 } 5221 5222 SDValue PPCTargetLowering::FinishCall( 5223 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 5224 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 5225 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 5226 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5227 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5228 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 5229 std::vector<EVT> NodeTys; 5230 SmallVector<SDValue, 8> Ops; 5231 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 5232 SPDiff, isTailCall, isPatchPoint, hasNest, 5233 RegsToPass, Ops, NodeTys, CS, Subtarget); 5234 5235 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5236 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 5237 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5238 5239 // When performing tail call optimization the callee pops its arguments off 5240 // the stack. Account for this here so these bytes can be pushed back on in 5241 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5242 int BytesCalleePops = 5243 (CallConv == CallingConv::Fast && 5244 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 5245 5246 // Add a register mask operand representing the call-preserved registers. 5247 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5248 const uint32_t *Mask = 5249 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 5250 assert(Mask && "Missing call preserved mask for calling convention"); 5251 Ops.push_back(DAG.getRegisterMask(Mask)); 5252 5253 if (InFlag.getNode()) 5254 Ops.push_back(InFlag); 5255 5256 // Emit tail call. 5257 if (isTailCall) { 5258 assert(((Callee.getOpcode() == ISD::Register && 5259 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5260 Callee.getOpcode() == ISD::TargetExternalSymbol || 5261 Callee.getOpcode() == ISD::TargetGlobalAddress || 5262 isa<ConstantSDNode>(Callee)) && 5263 "Expecting an global address, external symbol, absolute value or register"); 5264 5265 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5266 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 5267 } 5268 5269 // Add a NOP immediately after the branch instruction when using the 64-bit 5270 // SVR4 or the AIX ABI. 5271 // At link time, if caller and callee are in a different module and 5272 // thus have a different TOC, the call will be replaced with a call to a stub 5273 // function which saves the current TOC, loads the TOC of the callee and 5274 // branches to the callee. The NOP will be replaced with a load instruction 5275 // which restores the TOC of the caller from the TOC save slot of the current 5276 // stack frame. If caller and callee belong to the same module (and have the 5277 // same TOC), the NOP will remain unchanged, or become some other NOP. 5278 5279 MachineFunction &MF = DAG.getMachineFunction(); 5280 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5281 if (!isTailCall && !isPatchPoint && 5282 ((Subtarget.isSVR4ABI() && Subtarget.isPPC64()) || 5283 Subtarget.isAIXABI())) { 5284 if (CallOpc == PPCISD::BCTRL) { 5285 if (Subtarget.isAIXABI()) 5286 report_fatal_error("Indirect call on AIX is not implemented."); 5287 5288 // This is a call through a function pointer. 5289 // Restore the caller TOC from the save area into R2. 5290 // See PrepareCall() for more information about calls through function 5291 // pointers in the 64-bit SVR4 ABI. 5292 // We are using a target-specific load with r2 hard coded, because the 5293 // result of a target-independent load would never go directly into r2, 5294 // since r2 is a reserved register (which prevents the register allocator 5295 // from allocating it), resulting in an additional register being 5296 // allocated and an unnecessary move instruction being generated. 5297 CallOpc = PPCISD::BCTRL_LOAD_TOC; 5298 5299 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 5300 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5301 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5302 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 5303 5304 // The address needs to go after the chain input but before the flag (or 5305 // any other variadic arguments). 5306 Ops.insert(std::next(Ops.begin()), AddTOC); 5307 } else if (CallOpc == PPCISD::CALL && 5308 !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) { 5309 // Otherwise insert NOP for non-local calls. 5310 CallOpc = PPCISD::CALL_NOP; 5311 } 5312 } 5313 5314 if (Subtarget.isAIXABI() && isFunctionGlobalAddress(Callee)) { 5315 // On AIX, direct function calls reference the symbol for the function's 5316 // entry point, which is named by inserting a "." before the function's 5317 // C-linkage name. 5318 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 5319 auto &Context = DAG.getMachineFunction().getMMI().getContext(); 5320 MCSymbol *S = Context.getOrCreateSymbol(Twine(".") + 5321 Twine(G->getGlobal()->getName())); 5322 Callee = DAG.getMCSymbol(S, PtrVT); 5323 // Replace the GlobalAddressSDNode Callee with the MCSymbolSDNode. 5324 Ops[1] = Callee; 5325 } 5326 5327 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 5328 InFlag = Chain.getValue(1); 5329 5330 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5331 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5332 InFlag, dl); 5333 if (!Ins.empty()) 5334 InFlag = Chain.getValue(1); 5335 5336 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 5337 Ins, dl, DAG, InVals); 5338 } 5339 5340 SDValue 5341 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5342 SmallVectorImpl<SDValue> &InVals) const { 5343 SelectionDAG &DAG = CLI.DAG; 5344 SDLoc &dl = CLI.DL; 5345 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5346 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5347 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5348 SDValue Chain = CLI.Chain; 5349 SDValue Callee = CLI.Callee; 5350 bool &isTailCall = CLI.IsTailCall; 5351 CallingConv::ID CallConv = CLI.CallConv; 5352 bool isVarArg = CLI.IsVarArg; 5353 bool isPatchPoint = CLI.IsPatchPoint; 5354 ImmutableCallSite CS = CLI.CS; 5355 5356 if (isTailCall) { 5357 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5358 isTailCall = false; 5359 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5360 isTailCall = 5361 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5362 isVarArg, Outs, Ins, DAG); 5363 else 5364 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5365 Ins, DAG); 5366 if (isTailCall) { 5367 ++NumTailCalls; 5368 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5369 ++NumSiblingCalls; 5370 5371 assert(isa<GlobalAddressSDNode>(Callee) && 5372 "Callee should be an llvm::Function object."); 5373 LLVM_DEBUG( 5374 const GlobalValue *GV = 5375 cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5376 const unsigned Width = 5377 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0"); 5378 dbgs() << "TCO caller: " 5379 << left_justify(DAG.getMachineFunction().getName(), Width) 5380 << ", callee linkage: " << GV->getVisibility() << ", " 5381 << GV->getLinkage() << "\n"); 5382 } 5383 } 5384 5385 if (!isTailCall && CS && CS.isMustTailCall()) 5386 report_fatal_error("failed to perform tail call elimination on a call " 5387 "site marked musttail"); 5388 5389 // When long calls (i.e. indirect calls) are always used, calls are always 5390 // made via function pointer. If we have a function name, first translate it 5391 // into a pointer. 5392 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5393 !isTailCall) 5394 Callee = LowerGlobalAddress(Callee, DAG); 5395 5396 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5397 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5398 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5399 dl, DAG, InVals, CS); 5400 5401 if (Subtarget.isSVR4ABI()) 5402 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5403 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5404 dl, DAG, InVals, CS); 5405 5406 if (Subtarget.isAIXABI()) 5407 return LowerCall_AIX(Chain, Callee, CallConv, isVarArg, 5408 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5409 dl, DAG, InVals, CS); 5410 5411 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5412 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5413 dl, DAG, InVals, CS); 5414 } 5415 5416 SDValue PPCTargetLowering::LowerCall_32SVR4( 5417 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5418 bool isTailCall, bool isPatchPoint, 5419 const SmallVectorImpl<ISD::OutputArg> &Outs, 5420 const SmallVectorImpl<SDValue> &OutVals, 5421 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5422 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5423 ImmutableCallSite CS) const { 5424 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5425 // of the 32-bit SVR4 ABI stack frame layout. 5426 5427 assert((CallConv == CallingConv::C || 5428 CallConv == CallingConv::Cold || 5429 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5430 5431 unsigned PtrByteSize = 4; 5432 5433 MachineFunction &MF = DAG.getMachineFunction(); 5434 5435 // Mark this function as potentially containing a function that contains a 5436 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5437 // and restoring the callers stack pointer in this functions epilog. This is 5438 // done because by tail calling the called function might overwrite the value 5439 // in this function's (MF) stack pointer stack slot 0(SP). 5440 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5441 CallConv == CallingConv::Fast) 5442 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5443 5444 // Count how many bytes are to be pushed on the stack, including the linkage 5445 // area, parameter list area and the part of the local variable space which 5446 // contains copies of aggregates which are passed by value. 5447 5448 // Assign locations to all of the outgoing arguments. 5449 SmallVector<CCValAssign, 16> ArgLocs; 5450 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5451 5452 // Reserve space for the linkage area on the stack. 5453 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5454 PtrByteSize); 5455 if (useSoftFloat()) 5456 CCInfo.PreAnalyzeCallOperands(Outs); 5457 5458 if (isVarArg) { 5459 // Handle fixed and variable vector arguments differently. 5460 // Fixed vector arguments go into registers as long as registers are 5461 // available. Variable vector arguments always go into memory. 5462 unsigned NumArgs = Outs.size(); 5463 5464 for (unsigned i = 0; i != NumArgs; ++i) { 5465 MVT ArgVT = Outs[i].VT; 5466 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5467 bool Result; 5468 5469 if (Outs[i].IsFixed) { 5470 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5471 CCInfo); 5472 } else { 5473 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5474 ArgFlags, CCInfo); 5475 } 5476 5477 if (Result) { 5478 #ifndef NDEBUG 5479 errs() << "Call operand #" << i << " has unhandled type " 5480 << EVT(ArgVT).getEVTString() << "\n"; 5481 #endif 5482 llvm_unreachable(nullptr); 5483 } 5484 } 5485 } else { 5486 // All arguments are treated the same. 5487 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5488 } 5489 CCInfo.clearWasPPCF128(); 5490 5491 // Assign locations to all of the outgoing aggregate by value arguments. 5492 SmallVector<CCValAssign, 16> ByValArgLocs; 5493 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5494 5495 // Reserve stack space for the allocations in CCInfo. 5496 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5497 5498 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5499 5500 // Size of the linkage area, parameter list area and the part of the local 5501 // space variable where copies of aggregates which are passed by value are 5502 // stored. 5503 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5504 5505 // Calculate by how many bytes the stack has to be adjusted in case of tail 5506 // call optimization. 5507 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5508 5509 // Adjust the stack pointer for the new arguments... 5510 // These operations are automatically eliminated by the prolog/epilog pass 5511 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5512 SDValue CallSeqStart = Chain; 5513 5514 // Load the return address and frame pointer so it can be moved somewhere else 5515 // later. 5516 SDValue LROp, FPOp; 5517 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5518 5519 // Set up a copy of the stack pointer for use loading and storing any 5520 // arguments that may not fit in the registers available for argument 5521 // passing. 5522 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5523 5524 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5525 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5526 SmallVector<SDValue, 8> MemOpChains; 5527 5528 bool seenFloatArg = false; 5529 // Walk the register/memloc assignments, inserting copies/loads. 5530 // i - Tracks the index into the list of registers allocated for the call 5531 // RealArgIdx - Tracks the index into the list of actual function arguments 5532 // j - Tracks the index into the list of byval arguments 5533 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size(); 5534 i != e; 5535 ++i, ++RealArgIdx) { 5536 CCValAssign &VA = ArgLocs[i]; 5537 SDValue Arg = OutVals[RealArgIdx]; 5538 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags; 5539 5540 if (Flags.isByVal()) { 5541 // Argument is an aggregate which is passed by value, thus we need to 5542 // create a copy of it in the local variable space of the current stack 5543 // frame (which is the stack frame of the caller) and pass the address of 5544 // this copy to the callee. 5545 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5546 CCValAssign &ByValVA = ByValArgLocs[j++]; 5547 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5548 5549 // Memory reserved in the local variable space of the callers stack frame. 5550 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5551 5552 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5553 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5554 StackPtr, PtrOff); 5555 5556 // Create a copy of the argument in the local area of the current 5557 // stack frame. 5558 SDValue MemcpyCall = 5559 CreateCopyOfByValArgument(Arg, PtrOff, 5560 CallSeqStart.getNode()->getOperand(0), 5561 Flags, DAG, dl); 5562 5563 // This must go outside the CALLSEQ_START..END. 5564 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5565 SDLoc(MemcpyCall)); 5566 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5567 NewCallSeqStart.getNode()); 5568 Chain = CallSeqStart = NewCallSeqStart; 5569 5570 // Pass the address of the aggregate copy on the stack either in a 5571 // physical register or in the parameter list area of the current stack 5572 // frame to the callee. 5573 Arg = PtrOff; 5574 } 5575 5576 // When useCRBits() is true, there can be i1 arguments. 5577 // It is because getRegisterType(MVT::i1) => MVT::i1, 5578 // and for other integer types getRegisterType() => MVT::i32. 5579 // Extend i1 and ensure callee will get i32. 5580 if (Arg.getValueType() == MVT::i1) 5581 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 5582 dl, MVT::i32, Arg); 5583 5584 if (VA.isRegLoc()) { 5585 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5586 // Put argument in a physical register. 5587 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) { 5588 bool IsLE = Subtarget.isLittleEndian(); 5589 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5590 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl)); 5591 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0))); 5592 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5593 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl)); 5594 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(), 5595 SVal.getValue(0))); 5596 } else 5597 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5598 } else { 5599 // Put argument in the parameter list area of the current stack frame. 5600 assert(VA.isMemLoc()); 5601 unsigned LocMemOffset = VA.getLocMemOffset(); 5602 5603 if (!isTailCall) { 5604 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5605 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5606 StackPtr, PtrOff); 5607 5608 MemOpChains.push_back( 5609 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5610 } else { 5611 // Calculate and remember argument location. 5612 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5613 TailCallArguments); 5614 } 5615 } 5616 } 5617 5618 if (!MemOpChains.empty()) 5619 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5620 5621 // Build a sequence of copy-to-reg nodes chained together with token chain 5622 // and flag operands which copy the outgoing args into the appropriate regs. 5623 SDValue InFlag; 5624 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5625 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5626 RegsToPass[i].second, InFlag); 5627 InFlag = Chain.getValue(1); 5628 } 5629 5630 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5631 // registers. 5632 if (isVarArg) { 5633 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5634 SDValue Ops[] = { Chain, InFlag }; 5635 5636 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5637 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5638 5639 InFlag = Chain.getValue(1); 5640 } 5641 5642 if (isTailCall) 5643 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5644 TailCallArguments); 5645 5646 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5647 /* unused except on PPC64 ELFv1 */ false, DAG, 5648 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5649 NumBytes, Ins, InVals, CS); 5650 } 5651 5652 // Copy an argument into memory, being careful to do this outside the 5653 // call sequence for the call to which the argument belongs. 5654 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5655 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5656 SelectionDAG &DAG, const SDLoc &dl) const { 5657 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5658 CallSeqStart.getNode()->getOperand(0), 5659 Flags, DAG, dl); 5660 // The MEMCPY must go outside the CALLSEQ_START..END. 5661 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5662 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5663 SDLoc(MemcpyCall)); 5664 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5665 NewCallSeqStart.getNode()); 5666 return NewCallSeqStart; 5667 } 5668 5669 SDValue PPCTargetLowering::LowerCall_64SVR4( 5670 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5671 bool isTailCall, bool isPatchPoint, 5672 const SmallVectorImpl<ISD::OutputArg> &Outs, 5673 const SmallVectorImpl<SDValue> &OutVals, 5674 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5675 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5676 ImmutableCallSite CS) const { 5677 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5678 bool isLittleEndian = Subtarget.isLittleEndian(); 5679 unsigned NumOps = Outs.size(); 5680 bool hasNest = false; 5681 bool IsSibCall = false; 5682 5683 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5684 unsigned PtrByteSize = 8; 5685 5686 MachineFunction &MF = DAG.getMachineFunction(); 5687 5688 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5689 IsSibCall = true; 5690 5691 // Mark this function as potentially containing a function that contains a 5692 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5693 // and restoring the callers stack pointer in this functions epilog. This is 5694 // done because by tail calling the called function might overwrite the value 5695 // in this function's (MF) stack pointer stack slot 0(SP). 5696 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5697 CallConv == CallingConv::Fast) 5698 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5699 5700 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5701 "fastcc not supported on varargs functions"); 5702 5703 // Count how many bytes are to be pushed on the stack, including the linkage 5704 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5705 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5706 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5707 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5708 unsigned NumBytes = LinkageSize; 5709 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5710 unsigned &QFPR_idx = FPR_idx; 5711 5712 static const MCPhysReg GPR[] = { 5713 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5714 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5715 }; 5716 static const MCPhysReg VR[] = { 5717 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5718 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5719 }; 5720 5721 const unsigned NumGPRs = array_lengthof(GPR); 5722 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5723 const unsigned NumVRs = array_lengthof(VR); 5724 const unsigned NumQFPRs = NumFPRs; 5725 5726 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5727 // can be passed to the callee in registers. 5728 // For the fast calling convention, there is another check below. 5729 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5730 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5731 if (!HasParameterArea) { 5732 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5733 unsigned AvailableFPRs = NumFPRs; 5734 unsigned AvailableVRs = NumVRs; 5735 unsigned NumBytesTmp = NumBytes; 5736 for (unsigned i = 0; i != NumOps; ++i) { 5737 if (Outs[i].Flags.isNest()) continue; 5738 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5739 PtrByteSize, LinkageSize, ParamAreaSize, 5740 NumBytesTmp, AvailableFPRs, AvailableVRs, 5741 Subtarget.hasQPX())) 5742 HasParameterArea = true; 5743 } 5744 } 5745 5746 // When using the fast calling convention, we don't provide backing for 5747 // arguments that will be in registers. 5748 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5749 5750 // Avoid allocating parameter area for fastcc functions if all the arguments 5751 // can be passed in the registers. 5752 if (CallConv == CallingConv::Fast) 5753 HasParameterArea = false; 5754 5755 // Add up all the space actually used. 5756 for (unsigned i = 0; i != NumOps; ++i) { 5757 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5758 EVT ArgVT = Outs[i].VT; 5759 EVT OrigVT = Outs[i].ArgVT; 5760 5761 if (Flags.isNest()) 5762 continue; 5763 5764 if (CallConv == CallingConv::Fast) { 5765 if (Flags.isByVal()) { 5766 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5767 if (NumGPRsUsed > NumGPRs) 5768 HasParameterArea = true; 5769 } else { 5770 switch (ArgVT.getSimpleVT().SimpleTy) { 5771 default: llvm_unreachable("Unexpected ValueType for argument!"); 5772 case MVT::i1: 5773 case MVT::i32: 5774 case MVT::i64: 5775 if (++NumGPRsUsed <= NumGPRs) 5776 continue; 5777 break; 5778 case MVT::v4i32: 5779 case MVT::v8i16: 5780 case MVT::v16i8: 5781 case MVT::v2f64: 5782 case MVT::v2i64: 5783 case MVT::v1i128: 5784 case MVT::f128: 5785 if (++NumVRsUsed <= NumVRs) 5786 continue; 5787 break; 5788 case MVT::v4f32: 5789 // When using QPX, this is handled like a FP register, otherwise, it 5790 // is an Altivec register. 5791 if (Subtarget.hasQPX()) { 5792 if (++NumFPRsUsed <= NumFPRs) 5793 continue; 5794 } else { 5795 if (++NumVRsUsed <= NumVRs) 5796 continue; 5797 } 5798 break; 5799 case MVT::f32: 5800 case MVT::f64: 5801 case MVT::v4f64: // QPX 5802 case MVT::v4i1: // QPX 5803 if (++NumFPRsUsed <= NumFPRs) 5804 continue; 5805 break; 5806 } 5807 HasParameterArea = true; 5808 } 5809 } 5810 5811 /* Respect alignment of argument on the stack. */ 5812 unsigned Align = 5813 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5814 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5815 5816 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5817 if (Flags.isInConsecutiveRegsLast()) 5818 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5819 } 5820 5821 unsigned NumBytesActuallyUsed = NumBytes; 5822 5823 // In the old ELFv1 ABI, 5824 // the prolog code of the callee may store up to 8 GPR argument registers to 5825 // the stack, allowing va_start to index over them in memory if its varargs. 5826 // Because we cannot tell if this is needed on the caller side, we have to 5827 // conservatively assume that it is needed. As such, make sure we have at 5828 // least enough stack space for the caller to store the 8 GPRs. 5829 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5830 // really requires memory operands, e.g. a vararg function. 5831 if (HasParameterArea) 5832 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5833 else 5834 NumBytes = LinkageSize; 5835 5836 // Tail call needs the stack to be aligned. 5837 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5838 CallConv == CallingConv::Fast) 5839 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5840 5841 int SPDiff = 0; 5842 5843 // Calculate by how many bytes the stack has to be adjusted in case of tail 5844 // call optimization. 5845 if (!IsSibCall) 5846 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5847 5848 // To protect arguments on the stack from being clobbered in a tail call, 5849 // force all the loads to happen before doing any other lowering. 5850 if (isTailCall) 5851 Chain = DAG.getStackArgumentTokenFactor(Chain); 5852 5853 // Adjust the stack pointer for the new arguments... 5854 // These operations are automatically eliminated by the prolog/epilog pass 5855 if (!IsSibCall) 5856 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5857 SDValue CallSeqStart = Chain; 5858 5859 // Load the return address and frame pointer so it can be move somewhere else 5860 // later. 5861 SDValue LROp, FPOp; 5862 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5863 5864 // Set up a copy of the stack pointer for use loading and storing any 5865 // arguments that may not fit in the registers available for argument 5866 // passing. 5867 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5868 5869 // Figure out which arguments are going to go in registers, and which in 5870 // memory. Also, if this is a vararg function, floating point operations 5871 // must be stored to our stack, and loaded into integer regs as well, if 5872 // any integer regs are available for argument passing. 5873 unsigned ArgOffset = LinkageSize; 5874 5875 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5876 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5877 5878 SmallVector<SDValue, 8> MemOpChains; 5879 for (unsigned i = 0; i != NumOps; ++i) { 5880 SDValue Arg = OutVals[i]; 5881 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5882 EVT ArgVT = Outs[i].VT; 5883 EVT OrigVT = Outs[i].ArgVT; 5884 5885 // PtrOff will be used to store the current argument to the stack if a 5886 // register cannot be found for it. 5887 SDValue PtrOff; 5888 5889 // We re-align the argument offset for each argument, except when using the 5890 // fast calling convention, when we need to make sure we do that only when 5891 // we'll actually use a stack slot. 5892 auto ComputePtrOff = [&]() { 5893 /* Respect alignment of argument on the stack. */ 5894 unsigned Align = 5895 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5896 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5897 5898 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5899 5900 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5901 }; 5902 5903 if (CallConv != CallingConv::Fast) { 5904 ComputePtrOff(); 5905 5906 /* Compute GPR index associated with argument offset. */ 5907 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5908 GPR_idx = std::min(GPR_idx, NumGPRs); 5909 } 5910 5911 // Promote integers to 64-bit values. 5912 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5913 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5914 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5915 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5916 } 5917 5918 // FIXME memcpy is used way more than necessary. Correctness first. 5919 // Note: "by value" is code for passing a structure by value, not 5920 // basic types. 5921 if (Flags.isByVal()) { 5922 // Note: Size includes alignment padding, so 5923 // struct x { short a; char b; } 5924 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5925 // These are the proper values we need for right-justifying the 5926 // aggregate in a parameter register. 5927 unsigned Size = Flags.getByValSize(); 5928 5929 // An empty aggregate parameter takes up no storage and no 5930 // registers. 5931 if (Size == 0) 5932 continue; 5933 5934 if (CallConv == CallingConv::Fast) 5935 ComputePtrOff(); 5936 5937 // All aggregates smaller than 8 bytes must be passed right-justified. 5938 if (Size==1 || Size==2 || Size==4) { 5939 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5940 if (GPR_idx != NumGPRs) { 5941 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5942 MachinePointerInfo(), VT); 5943 MemOpChains.push_back(Load.getValue(1)); 5944 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5945 5946 ArgOffset += PtrByteSize; 5947 continue; 5948 } 5949 } 5950 5951 if (GPR_idx == NumGPRs && Size < 8) { 5952 SDValue AddPtr = PtrOff; 5953 if (!isLittleEndian) { 5954 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5955 PtrOff.getValueType()); 5956 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5957 } 5958 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5959 CallSeqStart, 5960 Flags, DAG, dl); 5961 ArgOffset += PtrByteSize; 5962 continue; 5963 } 5964 // Copy entire object into memory. There are cases where gcc-generated 5965 // code assumes it is there, even if it could be put entirely into 5966 // registers. (This is not what the doc says.) 5967 5968 // FIXME: The above statement is likely due to a misunderstanding of the 5969 // documents. All arguments must be copied into the parameter area BY 5970 // THE CALLEE in the event that the callee takes the address of any 5971 // formal argument. That has not yet been implemented. However, it is 5972 // reasonable to use the stack area as a staging area for the register 5973 // load. 5974 5975 // Skip this for small aggregates, as we will use the same slot for a 5976 // right-justified copy, below. 5977 if (Size >= 8) 5978 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5979 CallSeqStart, 5980 Flags, DAG, dl); 5981 5982 // When a register is available, pass a small aggregate right-justified. 5983 if (Size < 8 && GPR_idx != NumGPRs) { 5984 // The easiest way to get this right-justified in a register 5985 // is to copy the structure into the rightmost portion of a 5986 // local variable slot, then load the whole slot into the 5987 // register. 5988 // FIXME: The memcpy seems to produce pretty awful code for 5989 // small aggregates, particularly for packed ones. 5990 // FIXME: It would be preferable to use the slot in the 5991 // parameter save area instead of a new local variable. 5992 SDValue AddPtr = PtrOff; 5993 if (!isLittleEndian) { 5994 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5995 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5996 } 5997 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5998 CallSeqStart, 5999 Flags, DAG, dl); 6000 6001 // Load the slot into the register. 6002 SDValue Load = 6003 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 6004 MemOpChains.push_back(Load.getValue(1)); 6005 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6006 6007 // Done with this argument. 6008 ArgOffset += PtrByteSize; 6009 continue; 6010 } 6011 6012 // For aggregates larger than PtrByteSize, copy the pieces of the 6013 // object that fit into registers from the parameter save area. 6014 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6015 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6016 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6017 if (GPR_idx != NumGPRs) { 6018 SDValue Load = 6019 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6020 MemOpChains.push_back(Load.getValue(1)); 6021 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6022 ArgOffset += PtrByteSize; 6023 } else { 6024 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6025 break; 6026 } 6027 } 6028 continue; 6029 } 6030 6031 switch (Arg.getSimpleValueType().SimpleTy) { 6032 default: llvm_unreachable("Unexpected ValueType for argument!"); 6033 case MVT::i1: 6034 case MVT::i32: 6035 case MVT::i64: 6036 if (Flags.isNest()) { 6037 // The 'nest' parameter, if any, is passed in R11. 6038 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 6039 hasNest = true; 6040 break; 6041 } 6042 6043 // These can be scalar arguments or elements of an integer array type 6044 // passed directly. Clang may use those instead of "byval" aggregate 6045 // types to avoid forcing arguments to memory unnecessarily. 6046 if (GPR_idx != NumGPRs) { 6047 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6048 } else { 6049 if (CallConv == CallingConv::Fast) 6050 ComputePtrOff(); 6051 6052 assert(HasParameterArea && 6053 "Parameter area must exist to pass an argument in memory."); 6054 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6055 true, isTailCall, false, MemOpChains, 6056 TailCallArguments, dl); 6057 if (CallConv == CallingConv::Fast) 6058 ArgOffset += PtrByteSize; 6059 } 6060 if (CallConv != CallingConv::Fast) 6061 ArgOffset += PtrByteSize; 6062 break; 6063 case MVT::f32: 6064 case MVT::f64: { 6065 // These can be scalar arguments or elements of a float array type 6066 // passed directly. The latter are used to implement ELFv2 homogenous 6067 // float aggregates. 6068 6069 // Named arguments go into FPRs first, and once they overflow, the 6070 // remaining arguments go into GPRs and then the parameter save area. 6071 // Unnamed arguments for vararg functions always go to GPRs and 6072 // then the parameter save area. For now, put all arguments to vararg 6073 // routines always in both locations (FPR *and* GPR or stack slot). 6074 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 6075 bool NeededLoad = false; 6076 6077 // First load the argument into the next available FPR. 6078 if (FPR_idx != NumFPRs) 6079 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6080 6081 // Next, load the argument into GPR or stack slot if needed. 6082 if (!NeedGPROrStack) 6083 ; 6084 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 6085 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 6086 // once we support fp <-> gpr moves. 6087 6088 // In the non-vararg case, this can only ever happen in the 6089 // presence of f32 array types, since otherwise we never run 6090 // out of FPRs before running out of GPRs. 6091 SDValue ArgVal; 6092 6093 // Double values are always passed in a single GPR. 6094 if (Arg.getValueType() != MVT::f32) { 6095 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 6096 6097 // Non-array float values are extended and passed in a GPR. 6098 } else if (!Flags.isInConsecutiveRegs()) { 6099 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6100 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6101 6102 // If we have an array of floats, we collect every odd element 6103 // together with its predecessor into one GPR. 6104 } else if (ArgOffset % PtrByteSize != 0) { 6105 SDValue Lo, Hi; 6106 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 6107 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6108 if (!isLittleEndian) 6109 std::swap(Lo, Hi); 6110 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6111 6112 // The final element, if even, goes into the first half of a GPR. 6113 } else if (Flags.isInConsecutiveRegsLast()) { 6114 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6115 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6116 if (!isLittleEndian) 6117 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 6118 DAG.getConstant(32, dl, MVT::i32)); 6119 6120 // Non-final even elements are skipped; they will be handled 6121 // together the with subsequent argument on the next go-around. 6122 } else 6123 ArgVal = SDValue(); 6124 6125 if (ArgVal.getNode()) 6126 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 6127 } else { 6128 if (CallConv == CallingConv::Fast) 6129 ComputePtrOff(); 6130 6131 // Single-precision floating-point values are mapped to the 6132 // second (rightmost) word of the stack doubleword. 6133 if (Arg.getValueType() == MVT::f32 && 6134 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 6135 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6136 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6137 } 6138 6139 assert(HasParameterArea && 6140 "Parameter area must exist to pass an argument in memory."); 6141 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6142 true, isTailCall, false, MemOpChains, 6143 TailCallArguments, dl); 6144 6145 NeededLoad = true; 6146 } 6147 // When passing an array of floats, the array occupies consecutive 6148 // space in the argument area; only round up to the next doubleword 6149 // at the end of the array. Otherwise, each float takes 8 bytes. 6150 if (CallConv != CallingConv::Fast || NeededLoad) { 6151 ArgOffset += (Arg.getValueType() == MVT::f32 && 6152 Flags.isInConsecutiveRegs()) ? 4 : 8; 6153 if (Flags.isInConsecutiveRegsLast()) 6154 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6155 } 6156 break; 6157 } 6158 case MVT::v4f32: 6159 case MVT::v4i32: 6160 case MVT::v8i16: 6161 case MVT::v16i8: 6162 case MVT::v2f64: 6163 case MVT::v2i64: 6164 case MVT::v1i128: 6165 case MVT::f128: 6166 if (!Subtarget.hasQPX()) { 6167 // These can be scalar arguments or elements of a vector array type 6168 // passed directly. The latter are used to implement ELFv2 homogenous 6169 // vector aggregates. 6170 6171 // For a varargs call, named arguments go into VRs or on the stack as 6172 // usual; unnamed arguments always go to the stack or the corresponding 6173 // GPRs when within range. For now, we always put the value in both 6174 // locations (or even all three). 6175 if (isVarArg) { 6176 assert(HasParameterArea && 6177 "Parameter area must exist if we have a varargs call."); 6178 // We could elide this store in the case where the object fits 6179 // entirely in R registers. Maybe later. 6180 SDValue Store = 6181 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6182 MemOpChains.push_back(Store); 6183 if (VR_idx != NumVRs) { 6184 SDValue Load = 6185 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6186 MemOpChains.push_back(Load.getValue(1)); 6187 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6188 } 6189 ArgOffset += 16; 6190 for (unsigned i=0; i<16; i+=PtrByteSize) { 6191 if (GPR_idx == NumGPRs) 6192 break; 6193 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6194 DAG.getConstant(i, dl, PtrVT)); 6195 SDValue Load = 6196 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6197 MemOpChains.push_back(Load.getValue(1)); 6198 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6199 } 6200 break; 6201 } 6202 6203 // Non-varargs Altivec params go into VRs or on the stack. 6204 if (VR_idx != NumVRs) { 6205 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6206 } else { 6207 if (CallConv == CallingConv::Fast) 6208 ComputePtrOff(); 6209 6210 assert(HasParameterArea && 6211 "Parameter area must exist to pass an argument in memory."); 6212 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6213 true, isTailCall, true, MemOpChains, 6214 TailCallArguments, dl); 6215 if (CallConv == CallingConv::Fast) 6216 ArgOffset += 16; 6217 } 6218 6219 if (CallConv != CallingConv::Fast) 6220 ArgOffset += 16; 6221 break; 6222 } // not QPX 6223 6224 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 6225 "Invalid QPX parameter type"); 6226 6227 LLVM_FALLTHROUGH; 6228 case MVT::v4f64: 6229 case MVT::v4i1: { 6230 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 6231 if (isVarArg) { 6232 assert(HasParameterArea && 6233 "Parameter area must exist if we have a varargs call."); 6234 // We could elide this store in the case where the object fits 6235 // entirely in R registers. Maybe later. 6236 SDValue Store = 6237 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6238 MemOpChains.push_back(Store); 6239 if (QFPR_idx != NumQFPRs) { 6240 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 6241 PtrOff, MachinePointerInfo()); 6242 MemOpChains.push_back(Load.getValue(1)); 6243 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 6244 } 6245 ArgOffset += (IsF32 ? 16 : 32); 6246 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 6247 if (GPR_idx == NumGPRs) 6248 break; 6249 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6250 DAG.getConstant(i, dl, PtrVT)); 6251 SDValue Load = 6252 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6253 MemOpChains.push_back(Load.getValue(1)); 6254 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6255 } 6256 break; 6257 } 6258 6259 // Non-varargs QPX params go into registers or on the stack. 6260 if (QFPR_idx != NumQFPRs) { 6261 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 6262 } else { 6263 if (CallConv == CallingConv::Fast) 6264 ComputePtrOff(); 6265 6266 assert(HasParameterArea && 6267 "Parameter area must exist to pass an argument in memory."); 6268 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6269 true, isTailCall, true, MemOpChains, 6270 TailCallArguments, dl); 6271 if (CallConv == CallingConv::Fast) 6272 ArgOffset += (IsF32 ? 16 : 32); 6273 } 6274 6275 if (CallConv != CallingConv::Fast) 6276 ArgOffset += (IsF32 ? 16 : 32); 6277 break; 6278 } 6279 } 6280 } 6281 6282 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6283 "mismatch in size of parameter area"); 6284 (void)NumBytesActuallyUsed; 6285 6286 if (!MemOpChains.empty()) 6287 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6288 6289 // Check if this is an indirect call (MTCTR/BCTRL). 6290 // See PrepareCall() for more information about calls through function 6291 // pointers in the 64-bit SVR4 ABI. 6292 if (!isTailCall && !isPatchPoint && 6293 !isFunctionGlobalAddress(Callee) && 6294 !isa<ExternalSymbolSDNode>(Callee)) { 6295 // Load r2 into a virtual register and store it to the TOC save area. 6296 setUsesTOCBasePtr(DAG); 6297 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6298 // TOC save area offset. 6299 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6300 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6301 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6302 Chain = DAG.getStore( 6303 Val.getValue(1), dl, Val, AddPtr, 6304 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 6305 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6306 // This does not mean the MTCTR instruction must use R12; it's easier 6307 // to model this as an extra parameter, so do that. 6308 if (isELFv2ABI && !isPatchPoint) 6309 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6310 } 6311 6312 // Build a sequence of copy-to-reg nodes chained together with token chain 6313 // and flag operands which copy the outgoing args into the appropriate regs. 6314 SDValue InFlag; 6315 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6316 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6317 RegsToPass[i].second, InFlag); 6318 InFlag = Chain.getValue(1); 6319 } 6320 6321 if (isTailCall && !IsSibCall) 6322 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6323 TailCallArguments); 6324 6325 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 6326 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 6327 SPDiff, NumBytes, Ins, InVals, CS); 6328 } 6329 6330 SDValue PPCTargetLowering::LowerCall_Darwin( 6331 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6332 bool isTailCall, bool isPatchPoint, 6333 const SmallVectorImpl<ISD::OutputArg> &Outs, 6334 const SmallVectorImpl<SDValue> &OutVals, 6335 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6336 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6337 ImmutableCallSite CS) const { 6338 unsigned NumOps = Outs.size(); 6339 6340 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6341 bool isPPC64 = PtrVT == MVT::i64; 6342 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6343 6344 MachineFunction &MF = DAG.getMachineFunction(); 6345 6346 // Mark this function as potentially containing a function that contains a 6347 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6348 // and restoring the callers stack pointer in this functions epilog. This is 6349 // done because by tail calling the called function might overwrite the value 6350 // in this function's (MF) stack pointer stack slot 0(SP). 6351 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6352 CallConv == CallingConv::Fast) 6353 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6354 6355 // Count how many bytes are to be pushed on the stack, including the linkage 6356 // area, and parameter passing area. We start with 24/48 bytes, which is 6357 // prereserved space for [SP][CR][LR][3 x unused]. 6358 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6359 unsigned NumBytes = LinkageSize; 6360 6361 // Add up all the space actually used. 6362 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6363 // they all go in registers, but we must reserve stack space for them for 6364 // possible use by the caller. In varargs or 64-bit calls, parameters are 6365 // assigned stack space in order, with padding so Altivec parameters are 6366 // 16-byte aligned. 6367 unsigned nAltivecParamsAtEnd = 0; 6368 for (unsigned i = 0; i != NumOps; ++i) { 6369 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6370 EVT ArgVT = Outs[i].VT; 6371 // Varargs Altivec parameters are padded to a 16 byte boundary. 6372 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6373 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6374 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6375 if (!isVarArg && !isPPC64) { 6376 // Non-varargs Altivec parameters go after all the non-Altivec 6377 // parameters; handle those later so we know how much padding we need. 6378 nAltivecParamsAtEnd++; 6379 continue; 6380 } 6381 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6382 NumBytes = ((NumBytes+15)/16)*16; 6383 } 6384 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6385 } 6386 6387 // Allow for Altivec parameters at the end, if needed. 6388 if (nAltivecParamsAtEnd) { 6389 NumBytes = ((NumBytes+15)/16)*16; 6390 NumBytes += 16*nAltivecParamsAtEnd; 6391 } 6392 6393 // The prolog code of the callee may store up to 8 GPR argument registers to 6394 // the stack, allowing va_start to index over them in memory if its varargs. 6395 // Because we cannot tell if this is needed on the caller side, we have to 6396 // conservatively assume that it is needed. As such, make sure we have at 6397 // least enough stack space for the caller to store the 8 GPRs. 6398 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6399 6400 // Tail call needs the stack to be aligned. 6401 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6402 CallConv == CallingConv::Fast) 6403 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6404 6405 // Calculate by how many bytes the stack has to be adjusted in case of tail 6406 // call optimization. 6407 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6408 6409 // To protect arguments on the stack from being clobbered in a tail call, 6410 // force all the loads to happen before doing any other lowering. 6411 if (isTailCall) 6412 Chain = DAG.getStackArgumentTokenFactor(Chain); 6413 6414 // Adjust the stack pointer for the new arguments... 6415 // These operations are automatically eliminated by the prolog/epilog pass 6416 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6417 SDValue CallSeqStart = Chain; 6418 6419 // Load the return address and frame pointer so it can be move somewhere else 6420 // later. 6421 SDValue LROp, FPOp; 6422 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6423 6424 // Set up a copy of the stack pointer for use loading and storing any 6425 // arguments that may not fit in the registers available for argument 6426 // passing. 6427 SDValue StackPtr; 6428 if (isPPC64) 6429 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6430 else 6431 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6432 6433 // Figure out which arguments are going to go in registers, and which in 6434 // memory. Also, if this is a vararg function, floating point operations 6435 // must be stored to our stack, and loaded into integer regs as well, if 6436 // any integer regs are available for argument passing. 6437 unsigned ArgOffset = LinkageSize; 6438 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6439 6440 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6441 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6442 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6443 }; 6444 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6445 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6446 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6447 }; 6448 static const MCPhysReg VR[] = { 6449 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6450 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6451 }; 6452 const unsigned NumGPRs = array_lengthof(GPR_32); 6453 const unsigned NumFPRs = 13; 6454 const unsigned NumVRs = array_lengthof(VR); 6455 6456 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6457 6458 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6459 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6460 6461 SmallVector<SDValue, 8> MemOpChains; 6462 for (unsigned i = 0; i != NumOps; ++i) { 6463 SDValue Arg = OutVals[i]; 6464 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6465 6466 // PtrOff will be used to store the current argument to the stack if a 6467 // register cannot be found for it. 6468 SDValue PtrOff; 6469 6470 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6471 6472 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6473 6474 // On PPC64, promote integers to 64-bit values. 6475 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6476 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6477 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6478 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6479 } 6480 6481 // FIXME memcpy is used way more than necessary. Correctness first. 6482 // Note: "by value" is code for passing a structure by value, not 6483 // basic types. 6484 if (Flags.isByVal()) { 6485 unsigned Size = Flags.getByValSize(); 6486 // Very small objects are passed right-justified. Everything else is 6487 // passed left-justified. 6488 if (Size==1 || Size==2) { 6489 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6490 if (GPR_idx != NumGPRs) { 6491 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6492 MachinePointerInfo(), VT); 6493 MemOpChains.push_back(Load.getValue(1)); 6494 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6495 6496 ArgOffset += PtrByteSize; 6497 } else { 6498 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6499 PtrOff.getValueType()); 6500 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6501 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6502 CallSeqStart, 6503 Flags, DAG, dl); 6504 ArgOffset += PtrByteSize; 6505 } 6506 continue; 6507 } 6508 // Copy entire object into memory. There are cases where gcc-generated 6509 // code assumes it is there, even if it could be put entirely into 6510 // registers. (This is not what the doc says.) 6511 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6512 CallSeqStart, 6513 Flags, DAG, dl); 6514 6515 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6516 // copy the pieces of the object that fit into registers from the 6517 // parameter save area. 6518 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6519 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6520 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6521 if (GPR_idx != NumGPRs) { 6522 SDValue Load = 6523 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6524 MemOpChains.push_back(Load.getValue(1)); 6525 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6526 ArgOffset += PtrByteSize; 6527 } else { 6528 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6529 break; 6530 } 6531 } 6532 continue; 6533 } 6534 6535 switch (Arg.getSimpleValueType().SimpleTy) { 6536 default: llvm_unreachable("Unexpected ValueType for argument!"); 6537 case MVT::i1: 6538 case MVT::i32: 6539 case MVT::i64: 6540 if (GPR_idx != NumGPRs) { 6541 if (Arg.getValueType() == MVT::i1) 6542 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6543 6544 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6545 } else { 6546 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6547 isPPC64, isTailCall, false, MemOpChains, 6548 TailCallArguments, dl); 6549 } 6550 ArgOffset += PtrByteSize; 6551 break; 6552 case MVT::f32: 6553 case MVT::f64: 6554 if (FPR_idx != NumFPRs) { 6555 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6556 6557 if (isVarArg) { 6558 SDValue Store = 6559 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6560 MemOpChains.push_back(Store); 6561 6562 // Float varargs are always shadowed in available integer registers 6563 if (GPR_idx != NumGPRs) { 6564 SDValue Load = 6565 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6566 MemOpChains.push_back(Load.getValue(1)); 6567 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6568 } 6569 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6570 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6571 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6572 SDValue Load = 6573 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6574 MemOpChains.push_back(Load.getValue(1)); 6575 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6576 } 6577 } else { 6578 // If we have any FPRs remaining, we may also have GPRs remaining. 6579 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6580 // GPRs. 6581 if (GPR_idx != NumGPRs) 6582 ++GPR_idx; 6583 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6584 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6585 ++GPR_idx; 6586 } 6587 } else 6588 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6589 isPPC64, isTailCall, false, MemOpChains, 6590 TailCallArguments, dl); 6591 if (isPPC64) 6592 ArgOffset += 8; 6593 else 6594 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6595 break; 6596 case MVT::v4f32: 6597 case MVT::v4i32: 6598 case MVT::v8i16: 6599 case MVT::v16i8: 6600 if (isVarArg) { 6601 // These go aligned on the stack, or in the corresponding R registers 6602 // when within range. The Darwin PPC ABI doc claims they also go in 6603 // V registers; in fact gcc does this only for arguments that are 6604 // prototyped, not for those that match the ... We do it for all 6605 // arguments, seems to work. 6606 while (ArgOffset % 16 !=0) { 6607 ArgOffset += PtrByteSize; 6608 if (GPR_idx != NumGPRs) 6609 GPR_idx++; 6610 } 6611 // We could elide this store in the case where the object fits 6612 // entirely in R registers. Maybe later. 6613 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6614 DAG.getConstant(ArgOffset, dl, PtrVT)); 6615 SDValue Store = 6616 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6617 MemOpChains.push_back(Store); 6618 if (VR_idx != NumVRs) { 6619 SDValue Load = 6620 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6621 MemOpChains.push_back(Load.getValue(1)); 6622 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6623 } 6624 ArgOffset += 16; 6625 for (unsigned i=0; i<16; i+=PtrByteSize) { 6626 if (GPR_idx == NumGPRs) 6627 break; 6628 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6629 DAG.getConstant(i, dl, PtrVT)); 6630 SDValue Load = 6631 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6632 MemOpChains.push_back(Load.getValue(1)); 6633 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6634 } 6635 break; 6636 } 6637 6638 // Non-varargs Altivec params generally go in registers, but have 6639 // stack space allocated at the end. 6640 if (VR_idx != NumVRs) { 6641 // Doesn't have GPR space allocated. 6642 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6643 } else if (nAltivecParamsAtEnd==0) { 6644 // We are emitting Altivec params in order. 6645 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6646 isPPC64, isTailCall, true, MemOpChains, 6647 TailCallArguments, dl); 6648 ArgOffset += 16; 6649 } 6650 break; 6651 } 6652 } 6653 // If all Altivec parameters fit in registers, as they usually do, 6654 // they get stack space following the non-Altivec parameters. We 6655 // don't track this here because nobody below needs it. 6656 // If there are more Altivec parameters than fit in registers emit 6657 // the stores here. 6658 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6659 unsigned j = 0; 6660 // Offset is aligned; skip 1st 12 params which go in V registers. 6661 ArgOffset = ((ArgOffset+15)/16)*16; 6662 ArgOffset += 12*16; 6663 for (unsigned i = 0; i != NumOps; ++i) { 6664 SDValue Arg = OutVals[i]; 6665 EVT ArgType = Outs[i].VT; 6666 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6667 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6668 if (++j > NumVRs) { 6669 SDValue PtrOff; 6670 // We are emitting Altivec params in order. 6671 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6672 isPPC64, isTailCall, true, MemOpChains, 6673 TailCallArguments, dl); 6674 ArgOffset += 16; 6675 } 6676 } 6677 } 6678 } 6679 6680 if (!MemOpChains.empty()) 6681 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6682 6683 // On Darwin, R12 must contain the address of an indirect callee. This does 6684 // not mean the MTCTR instruction must use R12; it's easier to model this as 6685 // an extra parameter, so do that. 6686 if (!isTailCall && 6687 !isFunctionGlobalAddress(Callee) && 6688 !isa<ExternalSymbolSDNode>(Callee) && 6689 !isBLACompatibleAddress(Callee, DAG)) 6690 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6691 PPC::R12), Callee)); 6692 6693 // Build a sequence of copy-to-reg nodes chained together with token chain 6694 // and flag operands which copy the outgoing args into the appropriate regs. 6695 SDValue InFlag; 6696 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6697 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6698 RegsToPass[i].second, InFlag); 6699 InFlag = Chain.getValue(1); 6700 } 6701 6702 if (isTailCall) 6703 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6704 TailCallArguments); 6705 6706 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6707 /* unused except on PPC64 ELFv1 */ false, DAG, 6708 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6709 NumBytes, Ins, InVals, CS); 6710 } 6711 6712 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, 6713 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 6714 CCState &State) { 6715 6716 if (ValVT == MVT::f128) 6717 report_fatal_error("f128 is unimplemented on AIX."); 6718 6719 if (ArgFlags.isByVal()) 6720 report_fatal_error("Passing structure by value is unimplemented."); 6721 6722 if (ArgFlags.isSRet()) 6723 report_fatal_error("Struct return arguments are unimplemented."); 6724 6725 if (ArgFlags.isNest()) 6726 report_fatal_error("Nest arguments are unimplemented."); 6727 6728 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>( 6729 State.getMachineFunction().getSubtarget()); 6730 const bool IsPPC64 = Subtarget.isPPC64(); 6731 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6732 6733 static const MCPhysReg GPR_32[] = {// 32-bit registers. 6734 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6735 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 6736 static const MCPhysReg GPR_64[] = {// 64-bit registers. 6737 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6738 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 6739 6740 // Arguments always reserve parameter save area. 6741 switch (ValVT.SimpleTy) { 6742 default: 6743 report_fatal_error("Unhandled value type for argument."); 6744 case MVT::i64: 6745 // i64 arguments should have been split to i32 for PPC32. 6746 assert(IsPPC64 && "PPC32 should have split i64 values."); 6747 LLVM_FALLTHROUGH; 6748 case MVT::i1: 6749 case MVT::i32: 6750 State.AllocateStack(PtrByteSize, PtrByteSize); 6751 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6752 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6753 // Promote integers if needed. 6754 if (ValVT.getSizeInBits() < RegVT.getSizeInBits()) 6755 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt 6756 : CCValAssign::LocInfo::ZExt; 6757 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6758 } 6759 else 6760 report_fatal_error("Handling of placing parameters on the stack is " 6761 "unimplemented!"); 6762 return false; 6763 6764 case MVT::f32: 6765 case MVT::f64: { 6766 // Parameter save area (PSA) is reserved even if the float passes in fpr. 6767 const unsigned StoreSize = LocVT.getStoreSize(); 6768 // Floats are always 4-byte aligned in the PSA on AIX. 6769 // This includes f64 in 64-bit mode for ABI compatibility. 6770 State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4); 6771 if (unsigned Reg = State.AllocateReg(FPR)) 6772 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, MVT::f64, LocInfo)); 6773 else 6774 report_fatal_error("Handling of placing parameters on the stack is " 6775 "unimplemented!"); 6776 6777 // f32 reserves 1 GPR in both PPC32 and PPC64. 6778 // f64 reserves 2 GPRs in PPC32 and 1 GPR in PPC64. 6779 for (unsigned i = 0; i < StoreSize; i += PtrByteSize) 6780 State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32); 6781 return false; 6782 } 6783 } 6784 } 6785 6786 SDValue PPCTargetLowering::LowerCall_AIX( 6787 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6788 bool isTailCall, bool isPatchPoint, 6789 const SmallVectorImpl<ISD::OutputArg> &Outs, 6790 const SmallVectorImpl<SDValue> &OutVals, 6791 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6792 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6793 ImmutableCallSite CS) const { 6794 6795 assert((CallConv == CallingConv::C || 6796 CallConv == CallingConv::Cold || 6797 CallConv == CallingConv::Fast) && "Unexpected calling convention!"); 6798 6799 if (isVarArg || isPatchPoint) 6800 report_fatal_error("This call type is unimplemented on AIX."); 6801 6802 if (!isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee)) 6803 report_fatal_error("Handling of indirect call is unimplemented!"); 6804 6805 const PPCSubtarget& Subtarget = 6806 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 6807 if (Subtarget.hasQPX()) 6808 report_fatal_error("QPX is not supported on AIX."); 6809 if (Subtarget.hasAltivec()) 6810 report_fatal_error("Altivec support is unimplemented on AIX."); 6811 6812 MachineFunction &MF = DAG.getMachineFunction(); 6813 SmallVector<CCValAssign, 16> ArgLocs; 6814 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 6815 6816 // Reserve space for the linkage save area (LSA) on the stack. 6817 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA: 6818 // [SP][CR][LR][2 x reserved][TOC]. 6819 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64. 6820 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6821 const unsigned PtrByteSize = Subtarget.isPPC64() ? 8 : 4; 6822 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 6823 CCInfo.AnalyzeCallOperands(Outs, CC_AIX); 6824 6825 // The prolog code of the callee may store up to 8 GPR argument registers to 6826 // the stack, allowing va_start to index over them in memory if the callee 6827 // is variadic. 6828 // Because we cannot tell if this is needed on the caller side, we have to 6829 // conservatively assume that it is needed. As such, make sure we have at 6830 // least enough stack space for the caller to store the 8 GPRs. 6831 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize; 6832 const unsigned NumBytes = LinkageSize + MinParameterSaveAreaSize; 6833 6834 // Adjust the stack pointer for the new arguments... 6835 // These operations are automatically eliminated by the prolog/epilog pass. 6836 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6837 SDValue CallSeqStart = Chain; 6838 6839 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6840 6841 for (CCValAssign &VA : ArgLocs) { 6842 SDValue Arg = OutVals[VA.getValNo()]; 6843 6844 switch (VA.getLocInfo()) { 6845 default: report_fatal_error("Unexpected argument extension type."); 6846 case CCValAssign::Full: break; 6847 case CCValAssign::ZExt: 6848 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6849 break; 6850 case CCValAssign::SExt: 6851 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6852 break; 6853 } 6854 6855 if (VA.isRegLoc()) 6856 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 6857 6858 if (VA.isMemLoc()) 6859 report_fatal_error("Handling of placing parameters on the stack is " 6860 "unimplemented!"); 6861 } 6862 6863 // Build a sequence of copy-to-reg nodes chained together with token chain 6864 // and flag operands which copy the outgoing args into the appropriate regs. 6865 SDValue InFlag; 6866 for (auto Reg : RegsToPass) { 6867 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag); 6868 InFlag = Chain.getValue(1); 6869 } 6870 6871 const int SPDiff = 0; 6872 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6873 /* unused except on PPC64 ELFv1 */ false, DAG, RegsToPass, 6874 InFlag, Chain, CallSeqStart, Callee, SPDiff, NumBytes, Ins, 6875 InVals, CS); 6876 } 6877 6878 bool 6879 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6880 MachineFunction &MF, bool isVarArg, 6881 const SmallVectorImpl<ISD::OutputArg> &Outs, 6882 LLVMContext &Context) const { 6883 SmallVector<CCValAssign, 16> RVLocs; 6884 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6885 return CCInfo.CheckReturn( 6886 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6887 ? RetCC_PPC_Cold 6888 : RetCC_PPC); 6889 } 6890 6891 SDValue 6892 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6893 bool isVarArg, 6894 const SmallVectorImpl<ISD::OutputArg> &Outs, 6895 const SmallVectorImpl<SDValue> &OutVals, 6896 const SDLoc &dl, SelectionDAG &DAG) const { 6897 SmallVector<CCValAssign, 16> RVLocs; 6898 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6899 *DAG.getContext()); 6900 CCInfo.AnalyzeReturn(Outs, 6901 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6902 ? RetCC_PPC_Cold 6903 : RetCC_PPC); 6904 6905 SDValue Flag; 6906 SmallVector<SDValue, 4> RetOps(1, Chain); 6907 6908 // Copy the result values into the output registers. 6909 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) { 6910 CCValAssign &VA = RVLocs[i]; 6911 assert(VA.isRegLoc() && "Can only return in registers!"); 6912 6913 SDValue Arg = OutVals[RealResIdx]; 6914 6915 switch (VA.getLocInfo()) { 6916 default: llvm_unreachable("Unknown loc info!"); 6917 case CCValAssign::Full: break; 6918 case CCValAssign::AExt: 6919 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6920 break; 6921 case CCValAssign::ZExt: 6922 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6923 break; 6924 case CCValAssign::SExt: 6925 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6926 break; 6927 } 6928 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 6929 bool isLittleEndian = Subtarget.isLittleEndian(); 6930 // Legalize ret f64 -> ret 2 x i32. 6931 SDValue SVal = 6932 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 6933 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl)); 6934 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 6935 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6936 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 6937 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl)); 6938 Flag = Chain.getValue(1); 6939 VA = RVLocs[++i]; // skip ahead to next loc 6940 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 6941 } else 6942 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6943 Flag = Chain.getValue(1); 6944 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6945 } 6946 6947 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6948 const MCPhysReg *I = 6949 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6950 if (I) { 6951 for (; *I; ++I) { 6952 6953 if (PPC::G8RCRegClass.contains(*I)) 6954 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6955 else if (PPC::F8RCRegClass.contains(*I)) 6956 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6957 else if (PPC::CRRCRegClass.contains(*I)) 6958 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6959 else if (PPC::VRRCRegClass.contains(*I)) 6960 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6961 else 6962 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6963 } 6964 } 6965 6966 RetOps[0] = Chain; // Update chain. 6967 6968 // Add the flag if we have it. 6969 if (Flag.getNode()) 6970 RetOps.push_back(Flag); 6971 6972 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6973 } 6974 6975 SDValue 6976 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6977 SelectionDAG &DAG) const { 6978 SDLoc dl(Op); 6979 6980 // Get the correct type for integers. 6981 EVT IntVT = Op.getValueType(); 6982 6983 // Get the inputs. 6984 SDValue Chain = Op.getOperand(0); 6985 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6986 // Build a DYNAREAOFFSET node. 6987 SDValue Ops[2] = {Chain, FPSIdx}; 6988 SDVTList VTs = DAG.getVTList(IntVT); 6989 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6990 } 6991 6992 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6993 SelectionDAG &DAG) const { 6994 // When we pop the dynamic allocation we need to restore the SP link. 6995 SDLoc dl(Op); 6996 6997 // Get the correct type for pointers. 6998 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6999 7000 // Construct the stack pointer operand. 7001 bool isPPC64 = Subtarget.isPPC64(); 7002 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 7003 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 7004 7005 // Get the operands for the STACKRESTORE. 7006 SDValue Chain = Op.getOperand(0); 7007 SDValue SaveSP = Op.getOperand(1); 7008 7009 // Load the old link SP. 7010 SDValue LoadLinkSP = 7011 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 7012 7013 // Restore the stack pointer. 7014 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 7015 7016 // Store the old link SP. 7017 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 7018 } 7019 7020 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 7021 MachineFunction &MF = DAG.getMachineFunction(); 7022 bool isPPC64 = Subtarget.isPPC64(); 7023 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7024 7025 // Get current frame pointer save index. The users of this index will be 7026 // primarily DYNALLOC instructions. 7027 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7028 int RASI = FI->getReturnAddrSaveIndex(); 7029 7030 // If the frame pointer save index hasn't been defined yet. 7031 if (!RASI) { 7032 // Find out what the fix offset of the frame pointer save area. 7033 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 7034 // Allocate the frame index for frame pointer save area. 7035 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 7036 // Save the result. 7037 FI->setReturnAddrSaveIndex(RASI); 7038 } 7039 return DAG.getFrameIndex(RASI, PtrVT); 7040 } 7041 7042 SDValue 7043 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 7044 MachineFunction &MF = DAG.getMachineFunction(); 7045 bool isPPC64 = Subtarget.isPPC64(); 7046 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7047 7048 // Get current frame pointer save index. The users of this index will be 7049 // primarily DYNALLOC instructions. 7050 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7051 int FPSI = FI->getFramePointerSaveIndex(); 7052 7053 // If the frame pointer save index hasn't been defined yet. 7054 if (!FPSI) { 7055 // Find out what the fix offset of the frame pointer save area. 7056 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 7057 // Allocate the frame index for frame pointer save area. 7058 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 7059 // Save the result. 7060 FI->setFramePointerSaveIndex(FPSI); 7061 } 7062 return DAG.getFrameIndex(FPSI, PtrVT); 7063 } 7064 7065 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7066 SelectionDAG &DAG) const { 7067 // Get the inputs. 7068 SDValue Chain = Op.getOperand(0); 7069 SDValue Size = Op.getOperand(1); 7070 SDLoc dl(Op); 7071 7072 // Get the correct type for pointers. 7073 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7074 // Negate the size. 7075 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 7076 DAG.getConstant(0, dl, PtrVT), Size); 7077 // Construct a node for the frame pointer save index. 7078 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7079 // Build a DYNALLOC node. 7080 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 7081 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 7082 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 7083 } 7084 7085 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 7086 SelectionDAG &DAG) const { 7087 MachineFunction &MF = DAG.getMachineFunction(); 7088 7089 bool isPPC64 = Subtarget.isPPC64(); 7090 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7091 7092 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 7093 return DAG.getFrameIndex(FI, PtrVT); 7094 } 7095 7096 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 7097 SelectionDAG &DAG) const { 7098 SDLoc DL(Op); 7099 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 7100 DAG.getVTList(MVT::i32, MVT::Other), 7101 Op.getOperand(0), Op.getOperand(1)); 7102 } 7103 7104 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 7105 SelectionDAG &DAG) const { 7106 SDLoc DL(Op); 7107 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 7108 Op.getOperand(0), Op.getOperand(1)); 7109 } 7110 7111 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 7112 if (Op.getValueType().isVector()) 7113 return LowerVectorLoad(Op, DAG); 7114 7115 assert(Op.getValueType() == MVT::i1 && 7116 "Custom lowering only for i1 loads"); 7117 7118 // First, load 8 bits into 32 bits, then truncate to 1 bit. 7119 7120 SDLoc dl(Op); 7121 LoadSDNode *LD = cast<LoadSDNode>(Op); 7122 7123 SDValue Chain = LD->getChain(); 7124 SDValue BasePtr = LD->getBasePtr(); 7125 MachineMemOperand *MMO = LD->getMemOperand(); 7126 7127 SDValue NewLD = 7128 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 7129 BasePtr, MVT::i8, MMO); 7130 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 7131 7132 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 7133 return DAG.getMergeValues(Ops, dl); 7134 } 7135 7136 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 7137 if (Op.getOperand(1).getValueType().isVector()) 7138 return LowerVectorStore(Op, DAG); 7139 7140 assert(Op.getOperand(1).getValueType() == MVT::i1 && 7141 "Custom lowering only for i1 stores"); 7142 7143 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 7144 7145 SDLoc dl(Op); 7146 StoreSDNode *ST = cast<StoreSDNode>(Op); 7147 7148 SDValue Chain = ST->getChain(); 7149 SDValue BasePtr = ST->getBasePtr(); 7150 SDValue Value = ST->getValue(); 7151 MachineMemOperand *MMO = ST->getMemOperand(); 7152 7153 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 7154 Value); 7155 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 7156 } 7157 7158 // FIXME: Remove this once the ANDI glue bug is fixed: 7159 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 7160 assert(Op.getValueType() == MVT::i1 && 7161 "Custom lowering only for i1 results"); 7162 7163 SDLoc DL(Op); 7164 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 7165 Op.getOperand(0)); 7166 } 7167 7168 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op, 7169 SelectionDAG &DAG) const { 7170 7171 // Implements a vector truncate that fits in a vector register as a shuffle. 7172 // We want to legalize vector truncates down to where the source fits in 7173 // a vector register (and target is therefore smaller than vector register 7174 // size). At that point legalization will try to custom lower the sub-legal 7175 // result and get here - where we can contain the truncate as a single target 7176 // operation. 7177 7178 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows: 7179 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2> 7180 // 7181 // We will implement it for big-endian ordering as this (where x denotes 7182 // undefined): 7183 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to 7184 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u> 7185 // 7186 // The same operation in little-endian ordering will be: 7187 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to 7188 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1> 7189 7190 assert(Op.getValueType().isVector() && "Vector type expected."); 7191 7192 SDLoc DL(Op); 7193 SDValue N1 = Op.getOperand(0); 7194 unsigned SrcSize = N1.getValueType().getSizeInBits(); 7195 assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector"); 7196 SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL); 7197 7198 EVT TrgVT = Op.getValueType(); 7199 unsigned TrgNumElts = TrgVT.getVectorNumElements(); 7200 EVT EltVT = TrgVT.getVectorElementType(); 7201 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7202 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7203 7204 // First list the elements we want to keep. 7205 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits(); 7206 SmallVector<int, 16> ShuffV; 7207 if (Subtarget.isLittleEndian()) 7208 for (unsigned i = 0; i < TrgNumElts; ++i) 7209 ShuffV.push_back(i * SizeMult); 7210 else 7211 for (unsigned i = 1; i <= TrgNumElts; ++i) 7212 ShuffV.push_back(i * SizeMult - 1); 7213 7214 // Populate the remaining elements with undefs. 7215 for (unsigned i = TrgNumElts; i < WideNumElts; ++i) 7216 // ShuffV.push_back(i + WideNumElts); 7217 ShuffV.push_back(WideNumElts + 1); 7218 7219 SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc); 7220 return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV); 7221 } 7222 7223 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 7224 /// possible. 7225 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 7226 // Not FP? Not a fsel. 7227 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 7228 !Op.getOperand(2).getValueType().isFloatingPoint()) 7229 return Op; 7230 7231 bool HasNoInfs = DAG.getTarget().Options.NoInfsFPMath; 7232 bool HasNoNaNs = DAG.getTarget().Options.NoNaNsFPMath; 7233 // We might be able to do better than this under some circumstances, but in 7234 // general, fsel-based lowering of select is a finite-math-only optimization. 7235 // For more information, see section F.3 of the 2.06 ISA specification. 7236 // With ISA 3.0, we have xsmaxcdp/xsmincdp which are OK to emit even in the 7237 // presence of infinities. 7238 if (!Subtarget.hasP9Vector() && (!HasNoInfs || !HasNoNaNs)) 7239 return Op; 7240 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 7241 7242 EVT ResVT = Op.getValueType(); 7243 EVT CmpVT = Op.getOperand(0).getValueType(); 7244 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7245 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 7246 SDLoc dl(Op); 7247 7248 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) { 7249 switch (CC) { 7250 default: 7251 // Not a min/max but with finite math, we may still be able to use fsel. 7252 if (HasNoInfs && HasNoNaNs) 7253 break; 7254 return Op; 7255 case ISD::SETOGT: 7256 case ISD::SETGT: 7257 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS); 7258 case ISD::SETOLT: 7259 case ISD::SETLT: 7260 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS); 7261 } 7262 } 7263 7264 // TODO: Propagate flags from the select rather than global settings. 7265 SDNodeFlags Flags; 7266 Flags.setNoInfs(true); 7267 Flags.setNoNaNs(true); 7268 7269 // If the RHS of the comparison is a 0.0, we don't need to do the 7270 // subtraction at all. 7271 SDValue Sel1; 7272 if (isFloatingPointZero(RHS)) 7273 switch (CC) { 7274 default: break; // SETUO etc aren't handled by fsel. 7275 case ISD::SETNE: 7276 std::swap(TV, FV); 7277 LLVM_FALLTHROUGH; 7278 case ISD::SETEQ: 7279 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7280 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7281 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7282 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7283 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7284 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7285 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 7286 case ISD::SETULT: 7287 case ISD::SETLT: 7288 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7289 LLVM_FALLTHROUGH; 7290 case ISD::SETOGE: 7291 case ISD::SETGE: 7292 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7293 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7294 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7295 case ISD::SETUGT: 7296 case ISD::SETGT: 7297 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7298 LLVM_FALLTHROUGH; 7299 case ISD::SETOLE: 7300 case ISD::SETLE: 7301 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7302 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7303 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7304 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 7305 } 7306 7307 SDValue Cmp; 7308 switch (CC) { 7309 default: break; // SETUO etc aren't handled by fsel. 7310 case ISD::SETNE: 7311 std::swap(TV, FV); 7312 LLVM_FALLTHROUGH; 7313 case ISD::SETEQ: 7314 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7315 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7316 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7317 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7318 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7319 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7320 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7321 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 7322 case ISD::SETULT: 7323 case ISD::SETLT: 7324 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7325 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7326 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7327 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7328 case ISD::SETOGE: 7329 case ISD::SETGE: 7330 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7331 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7332 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7333 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7334 case ISD::SETUGT: 7335 case ISD::SETGT: 7336 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7337 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7338 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7339 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7340 case ISD::SETOLE: 7341 case ISD::SETLE: 7342 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7343 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7344 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7345 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7346 } 7347 return Op; 7348 } 7349 7350 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 7351 SelectionDAG &DAG, 7352 const SDLoc &dl) const { 7353 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7354 SDValue Src = Op.getOperand(0); 7355 if (Src.getValueType() == MVT::f32) 7356 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7357 7358 SDValue Tmp; 7359 switch (Op.getSimpleValueType().SimpleTy) { 7360 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7361 case MVT::i32: 7362 Tmp = DAG.getNode( 7363 Op.getOpcode() == ISD::FP_TO_SINT 7364 ? PPCISD::FCTIWZ 7365 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7366 dl, MVT::f64, Src); 7367 break; 7368 case MVT::i64: 7369 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7370 "i64 FP_TO_UINT is supported only with FPCVT"); 7371 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7372 PPCISD::FCTIDUZ, 7373 dl, MVT::f64, Src); 7374 break; 7375 } 7376 7377 // Convert the FP value to an int value through memory. 7378 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 7379 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 7380 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 7381 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 7382 MachinePointerInfo MPI = 7383 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 7384 7385 // Emit a store to the stack slot. 7386 SDValue Chain; 7387 if (i32Stack) { 7388 MachineFunction &MF = DAG.getMachineFunction(); 7389 MachineMemOperand *MMO = 7390 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 7391 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 7392 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7393 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 7394 } else 7395 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 7396 7397 // Result is a load from the stack slot. If loading 4 bytes, make sure to 7398 // add in a bias on big endian. 7399 if (Op.getValueType() == MVT::i32 && !i32Stack) { 7400 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 7401 DAG.getConstant(4, dl, FIPtr.getValueType())); 7402 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 7403 } 7404 7405 RLI.Chain = Chain; 7406 RLI.Ptr = FIPtr; 7407 RLI.MPI = MPI; 7408 } 7409 7410 /// Custom lowers floating point to integer conversions to use 7411 /// the direct move instructions available in ISA 2.07 to avoid the 7412 /// need for load/store combinations. 7413 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 7414 SelectionDAG &DAG, 7415 const SDLoc &dl) const { 7416 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7417 SDValue Src = Op.getOperand(0); 7418 7419 if (Src.getValueType() == MVT::f32) 7420 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7421 7422 SDValue Tmp; 7423 switch (Op.getSimpleValueType().SimpleTy) { 7424 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7425 case MVT::i32: 7426 Tmp = DAG.getNode( 7427 Op.getOpcode() == ISD::FP_TO_SINT 7428 ? PPCISD::FCTIWZ 7429 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7430 dl, MVT::f64, Src); 7431 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 7432 break; 7433 case MVT::i64: 7434 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7435 "i64 FP_TO_UINT is supported only with FPCVT"); 7436 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7437 PPCISD::FCTIDUZ, 7438 dl, MVT::f64, Src); 7439 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 7440 break; 7441 } 7442 return Tmp; 7443 } 7444 7445 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 7446 const SDLoc &dl) const { 7447 7448 // FP to INT conversions are legal for f128. 7449 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128)) 7450 return Op; 7451 7452 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 7453 // PPC (the libcall is not available). 7454 if (Op.getOperand(0).getValueType() == MVT::ppcf128) { 7455 if (Op.getValueType() == MVT::i32) { 7456 if (Op.getOpcode() == ISD::FP_TO_SINT) { 7457 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7458 MVT::f64, Op.getOperand(0), 7459 DAG.getIntPtrConstant(0, dl)); 7460 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7461 MVT::f64, Op.getOperand(0), 7462 DAG.getIntPtrConstant(1, dl)); 7463 7464 // Add the two halves of the long double in round-to-zero mode. 7465 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7466 7467 // Now use a smaller FP_TO_SINT. 7468 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 7469 } 7470 if (Op.getOpcode() == ISD::FP_TO_UINT) { 7471 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 7472 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 7473 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128); 7474 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 7475 // FIXME: generated code sucks. 7476 // TODO: Are there fast-math-flags to propagate to this FSUB? 7477 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, 7478 Op.getOperand(0), Tmp); 7479 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 7480 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, 7481 DAG.getConstant(0x80000000, dl, MVT::i32)); 7482 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, 7483 Op.getOperand(0)); 7484 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False, 7485 ISD::SETGE); 7486 } 7487 } 7488 7489 return SDValue(); 7490 } 7491 7492 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 7493 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 7494 7495 ReuseLoadInfo RLI; 7496 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7497 7498 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7499 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7500 } 7501 7502 // We're trying to insert a regular store, S, and then a load, L. If the 7503 // incoming value, O, is a load, we might just be able to have our load use the 7504 // address used by O. However, we don't know if anything else will store to 7505 // that address before we can load from it. To prevent this situation, we need 7506 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7507 // the same chain operand as O, we create a token factor from the chain results 7508 // of O and L, and we replace all uses of O's chain result with that token 7509 // factor (see spliceIntoChain below for this last part). 7510 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7511 ReuseLoadInfo &RLI, 7512 SelectionDAG &DAG, 7513 ISD::LoadExtType ET) const { 7514 SDLoc dl(Op); 7515 if (ET == ISD::NON_EXTLOAD && 7516 (Op.getOpcode() == ISD::FP_TO_UINT || 7517 Op.getOpcode() == ISD::FP_TO_SINT) && 7518 isOperationLegalOrCustom(Op.getOpcode(), 7519 Op.getOperand(0).getValueType())) { 7520 7521 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7522 return true; 7523 } 7524 7525 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7526 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7527 LD->isNonTemporal()) 7528 return false; 7529 if (LD->getMemoryVT() != MemVT) 7530 return false; 7531 7532 RLI.Ptr = LD->getBasePtr(); 7533 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7534 assert(LD->getAddressingMode() == ISD::PRE_INC && 7535 "Non-pre-inc AM on PPC?"); 7536 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7537 LD->getOffset()); 7538 } 7539 7540 RLI.Chain = LD->getChain(); 7541 RLI.MPI = LD->getPointerInfo(); 7542 RLI.IsDereferenceable = LD->isDereferenceable(); 7543 RLI.IsInvariant = LD->isInvariant(); 7544 RLI.Alignment = LD->getAlignment(); 7545 RLI.AAInfo = LD->getAAInfo(); 7546 RLI.Ranges = LD->getRanges(); 7547 7548 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7549 return true; 7550 } 7551 7552 // Given the head of the old chain, ResChain, insert a token factor containing 7553 // it and NewResChain, and make users of ResChain now be users of that token 7554 // factor. 7555 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7556 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7557 SDValue NewResChain, 7558 SelectionDAG &DAG) const { 7559 if (!ResChain) 7560 return; 7561 7562 SDLoc dl(NewResChain); 7563 7564 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7565 NewResChain, DAG.getUNDEF(MVT::Other)); 7566 assert(TF.getNode() != NewResChain.getNode() && 7567 "A new TF really is required here"); 7568 7569 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7570 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7571 } 7572 7573 /// Analyze profitability of direct move 7574 /// prefer float load to int load plus direct move 7575 /// when there is no integer use of int load 7576 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7577 SDNode *Origin = Op.getOperand(0).getNode(); 7578 if (Origin->getOpcode() != ISD::LOAD) 7579 return true; 7580 7581 // If there is no LXSIBZX/LXSIHZX, like Power8, 7582 // prefer direct move if the memory size is 1 or 2 bytes. 7583 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7584 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7585 return true; 7586 7587 for (SDNode::use_iterator UI = Origin->use_begin(), 7588 UE = Origin->use_end(); 7589 UI != UE; ++UI) { 7590 7591 // Only look at the users of the loaded value. 7592 if (UI.getUse().get().getResNo() != 0) 7593 continue; 7594 7595 if (UI->getOpcode() != ISD::SINT_TO_FP && 7596 UI->getOpcode() != ISD::UINT_TO_FP) 7597 return true; 7598 } 7599 7600 return false; 7601 } 7602 7603 /// Custom lowers integer to floating point conversions to use 7604 /// the direct move instructions available in ISA 2.07 to avoid the 7605 /// need for load/store combinations. 7606 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7607 SelectionDAG &DAG, 7608 const SDLoc &dl) const { 7609 assert((Op.getValueType() == MVT::f32 || 7610 Op.getValueType() == MVT::f64) && 7611 "Invalid floating point type as target of conversion"); 7612 assert(Subtarget.hasFPCVT() && 7613 "Int to FP conversions with direct moves require FPCVT"); 7614 SDValue FP; 7615 SDValue Src = Op.getOperand(0); 7616 bool SinglePrec = Op.getValueType() == MVT::f32; 7617 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7618 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 7619 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 7620 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 7621 7622 if (WordInt) { 7623 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7624 dl, MVT::f64, Src); 7625 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7626 } 7627 else { 7628 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7629 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7630 } 7631 7632 return FP; 7633 } 7634 7635 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) { 7636 7637 EVT VecVT = Vec.getValueType(); 7638 assert(VecVT.isVector() && "Expected a vector type."); 7639 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width."); 7640 7641 EVT EltVT = VecVT.getVectorElementType(); 7642 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7643 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7644 7645 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements(); 7646 SmallVector<SDValue, 16> Ops(NumConcat); 7647 Ops[0] = Vec; 7648 SDValue UndefVec = DAG.getUNDEF(VecVT); 7649 for (unsigned i = 1; i < NumConcat; ++i) 7650 Ops[i] = UndefVec; 7651 7652 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops); 7653 } 7654 7655 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG, 7656 const SDLoc &dl) const { 7657 7658 unsigned Opc = Op.getOpcode(); 7659 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) && 7660 "Unexpected conversion type"); 7661 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) && 7662 "Supports conversions to v2f64/v4f32 only."); 7663 7664 bool SignedConv = Opc == ISD::SINT_TO_FP; 7665 bool FourEltRes = Op.getValueType() == MVT::v4f32; 7666 7667 SDValue Wide = widenVec(DAG, Op.getOperand(0), dl); 7668 EVT WideVT = Wide.getValueType(); 7669 unsigned WideNumElts = WideVT.getVectorNumElements(); 7670 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64; 7671 7672 SmallVector<int, 16> ShuffV; 7673 for (unsigned i = 0; i < WideNumElts; ++i) 7674 ShuffV.push_back(i + WideNumElts); 7675 7676 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2; 7677 int SaveElts = FourEltRes ? 4 : 2; 7678 if (Subtarget.isLittleEndian()) 7679 for (int i = 0; i < SaveElts; i++) 7680 ShuffV[i * Stride] = i; 7681 else 7682 for (int i = 1; i <= SaveElts; i++) 7683 ShuffV[i * Stride - 1] = i - 1; 7684 7685 SDValue ShuffleSrc2 = 7686 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT); 7687 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV); 7688 unsigned ExtendOp = 7689 SignedConv ? (unsigned)PPCISD::SExtVElems : (unsigned)ISD::BITCAST; 7690 7691 SDValue Extend; 7692 if (!Subtarget.hasP9Altivec() && SignedConv) { 7693 Arrange = DAG.getBitcast(IntermediateVT, Arrange); 7694 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange, 7695 DAG.getValueType(Op.getOperand(0).getValueType())); 7696 } else 7697 Extend = DAG.getNode(ExtendOp, dl, IntermediateVT, Arrange); 7698 7699 return DAG.getNode(Opc, dl, Op.getValueType(), Extend); 7700 } 7701 7702 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 7703 SelectionDAG &DAG) const { 7704 SDLoc dl(Op); 7705 7706 EVT InVT = Op.getOperand(0).getValueType(); 7707 EVT OutVT = Op.getValueType(); 7708 if (OutVT.isVector() && OutVT.isFloatingPoint() && 7709 isOperationCustom(Op.getOpcode(), InVT)) 7710 return LowerINT_TO_FPVector(Op, DAG, dl); 7711 7712 // Conversions to f128 are legal. 7713 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128)) 7714 return Op; 7715 7716 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 7717 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 7718 return SDValue(); 7719 7720 SDValue Value = Op.getOperand(0); 7721 // The values are now known to be -1 (false) or 1 (true). To convert this 7722 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7723 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7724 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7725 7726 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7727 7728 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7729 7730 if (Op.getValueType() != MVT::v4f64) 7731 Value = DAG.getNode(ISD::FP_ROUND, dl, 7732 Op.getValueType(), Value, 7733 DAG.getIntPtrConstant(1, dl)); 7734 return Value; 7735 } 7736 7737 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 7738 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 7739 return SDValue(); 7740 7741 if (Op.getOperand(0).getValueType() == MVT::i1) 7742 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 7743 DAG.getConstantFP(1.0, dl, Op.getValueType()), 7744 DAG.getConstantFP(0.0, dl, Op.getValueType())); 7745 7746 // If we have direct moves, we can do all the conversion, skip the store/load 7747 // however, without FPCVT we can't do most conversions. 7748 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 7749 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 7750 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 7751 7752 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 7753 "UINT_TO_FP is supported only with FPCVT"); 7754 7755 // If we have FCFIDS, then use it when converting to single-precision. 7756 // Otherwise, convert to double-precision and then round. 7757 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7758 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 7759 : PPCISD::FCFIDS) 7760 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 7761 : PPCISD::FCFID); 7762 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7763 ? MVT::f32 7764 : MVT::f64; 7765 7766 if (Op.getOperand(0).getValueType() == MVT::i64) { 7767 SDValue SINT = Op.getOperand(0); 7768 // When converting to single-precision, we actually need to convert 7769 // to double-precision first and then round to single-precision. 7770 // To avoid double-rounding effects during that operation, we have 7771 // to prepare the input operand. Bits that might be truncated when 7772 // converting to double-precision are replaced by a bit that won't 7773 // be lost at this stage, but is below the single-precision rounding 7774 // position. 7775 // 7776 // However, if -enable-unsafe-fp-math is in effect, accept double 7777 // rounding to avoid the extra overhead. 7778 if (Op.getValueType() == MVT::f32 && 7779 !Subtarget.hasFPCVT() && 7780 !DAG.getTarget().Options.UnsafeFPMath) { 7781 7782 // Twiddle input to make sure the low 11 bits are zero. (If this 7783 // is the case, we are guaranteed the value will fit into the 53 bit 7784 // mantissa of an IEEE double-precision value without rounding.) 7785 // If any of those low 11 bits were not zero originally, make sure 7786 // bit 12 (value 2048) is set instead, so that the final rounding 7787 // to single-precision gets the correct result. 7788 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7789 SINT, DAG.getConstant(2047, dl, MVT::i64)); 7790 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 7791 Round, DAG.getConstant(2047, dl, MVT::i64)); 7792 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 7793 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7794 Round, DAG.getConstant(-2048, dl, MVT::i64)); 7795 7796 // However, we cannot use that value unconditionally: if the magnitude 7797 // of the input value is small, the bit-twiddling we did above might 7798 // end up visibly changing the output. Fortunately, in that case, we 7799 // don't need to twiddle bits since the original input will convert 7800 // exactly to double-precision floating-point already. Therefore, 7801 // construct a conditional to use the original value if the top 11 7802 // bits are all sign-bit copies, and use the rounded value computed 7803 // above otherwise. 7804 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 7805 SINT, DAG.getConstant(53, dl, MVT::i32)); 7806 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 7807 Cond, DAG.getConstant(1, dl, MVT::i64)); 7808 Cond = DAG.getSetCC(dl, MVT::i32, 7809 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 7810 7811 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 7812 } 7813 7814 ReuseLoadInfo RLI; 7815 SDValue Bits; 7816 7817 MachineFunction &MF = DAG.getMachineFunction(); 7818 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 7819 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7820 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7821 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7822 } else if (Subtarget.hasLFIWAX() && 7823 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 7824 MachineMemOperand *MMO = 7825 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7826 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7827 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7828 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 7829 DAG.getVTList(MVT::f64, MVT::Other), 7830 Ops, MVT::i32, MMO); 7831 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7832 } else if (Subtarget.hasFPCVT() && 7833 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 7834 MachineMemOperand *MMO = 7835 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7836 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7837 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7838 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 7839 DAG.getVTList(MVT::f64, MVT::Other), 7840 Ops, MVT::i32, MMO); 7841 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7842 } else if (((Subtarget.hasLFIWAX() && 7843 SINT.getOpcode() == ISD::SIGN_EXTEND) || 7844 (Subtarget.hasFPCVT() && 7845 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 7846 SINT.getOperand(0).getValueType() == MVT::i32) { 7847 MachineFrameInfo &MFI = MF.getFrameInfo(); 7848 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7849 7850 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7851 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7852 7853 SDValue Store = 7854 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 7855 MachinePointerInfo::getFixedStack( 7856 DAG.getMachineFunction(), FrameIdx)); 7857 7858 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7859 "Expected an i32 store"); 7860 7861 RLI.Ptr = FIdx; 7862 RLI.Chain = Store; 7863 RLI.MPI = 7864 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7865 RLI.Alignment = 4; 7866 7867 MachineMemOperand *MMO = 7868 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7869 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7870 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7871 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 7872 PPCISD::LFIWZX : PPCISD::LFIWAX, 7873 dl, DAG.getVTList(MVT::f64, MVT::Other), 7874 Ops, MVT::i32, MMO); 7875 } else 7876 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 7877 7878 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 7879 7880 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7881 FP = DAG.getNode(ISD::FP_ROUND, dl, 7882 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 7883 return FP; 7884 } 7885 7886 assert(Op.getOperand(0).getValueType() == MVT::i32 && 7887 "Unhandled INT_TO_FP type in custom expander!"); 7888 // Since we only generate this in 64-bit mode, we can take advantage of 7889 // 64-bit registers. In particular, sign extend the input value into the 7890 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 7891 // then lfd it and fcfid it. 7892 MachineFunction &MF = DAG.getMachineFunction(); 7893 MachineFrameInfo &MFI = MF.getFrameInfo(); 7894 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7895 7896 SDValue Ld; 7897 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 7898 ReuseLoadInfo RLI; 7899 bool ReusingLoad; 7900 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 7901 DAG))) { 7902 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7903 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7904 7905 SDValue Store = 7906 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7907 MachinePointerInfo::getFixedStack( 7908 DAG.getMachineFunction(), FrameIdx)); 7909 7910 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7911 "Expected an i32 store"); 7912 7913 RLI.Ptr = FIdx; 7914 RLI.Chain = Store; 7915 RLI.MPI = 7916 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7917 RLI.Alignment = 4; 7918 } 7919 7920 MachineMemOperand *MMO = 7921 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7922 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7923 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7924 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 7925 PPCISD::LFIWZX : PPCISD::LFIWAX, 7926 dl, DAG.getVTList(MVT::f64, MVT::Other), 7927 Ops, MVT::i32, MMO); 7928 if (ReusingLoad) 7929 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 7930 } else { 7931 assert(Subtarget.isPPC64() && 7932 "i32->FP without LFIWAX supported only on PPC64"); 7933 7934 int FrameIdx = MFI.CreateStackObject(8, 8, false); 7935 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7936 7937 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 7938 Op.getOperand(0)); 7939 7940 // STD the extended value into the stack slot. 7941 SDValue Store = DAG.getStore( 7942 DAG.getEntryNode(), dl, Ext64, FIdx, 7943 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7944 7945 // Load the value as a double. 7946 Ld = DAG.getLoad( 7947 MVT::f64, dl, Store, FIdx, 7948 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7949 } 7950 7951 // FCFID it and return it. 7952 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 7953 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7954 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 7955 DAG.getIntPtrConstant(0, dl)); 7956 return FP; 7957 } 7958 7959 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 7960 SelectionDAG &DAG) const { 7961 SDLoc dl(Op); 7962 /* 7963 The rounding mode is in bits 30:31 of FPSR, and has the following 7964 settings: 7965 00 Round to nearest 7966 01 Round to 0 7967 10 Round to +inf 7968 11 Round to -inf 7969 7970 FLT_ROUNDS, on the other hand, expects the following: 7971 -1 Undefined 7972 0 Round to 0 7973 1 Round to nearest 7974 2 Round to +inf 7975 3 Round to -inf 7976 7977 To perform the conversion, we do: 7978 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7979 */ 7980 7981 MachineFunction &MF = DAG.getMachineFunction(); 7982 EVT VT = Op.getValueType(); 7983 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7984 7985 // Save FP Control Word to register 7986 EVT NodeTys[] = { 7987 MVT::f64, // return register 7988 MVT::Glue // unused in this context 7989 }; 7990 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7991 7992 // Save FP register to stack slot 7993 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7994 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7995 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7996 MachinePointerInfo()); 7997 7998 // Load FP Control Word from low 32 bits of stack slot. 7999 SDValue Four = DAG.getConstant(4, dl, PtrVT); 8000 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 8001 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 8002 8003 // Transform as necessary 8004 SDValue CWD1 = 8005 DAG.getNode(ISD::AND, dl, MVT::i32, 8006 CWD, DAG.getConstant(3, dl, MVT::i32)); 8007 SDValue CWD2 = 8008 DAG.getNode(ISD::SRL, dl, MVT::i32, 8009 DAG.getNode(ISD::AND, dl, MVT::i32, 8010 DAG.getNode(ISD::XOR, dl, MVT::i32, 8011 CWD, DAG.getConstant(3, dl, MVT::i32)), 8012 DAG.getConstant(3, dl, MVT::i32)), 8013 DAG.getConstant(1, dl, MVT::i32)); 8014 8015 SDValue RetVal = 8016 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 8017 8018 return DAG.getNode((VT.getSizeInBits() < 16 ? 8019 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 8020 } 8021 8022 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8023 EVT VT = Op.getValueType(); 8024 unsigned BitWidth = VT.getSizeInBits(); 8025 SDLoc dl(Op); 8026 assert(Op.getNumOperands() == 3 && 8027 VT == Op.getOperand(1).getValueType() && 8028 "Unexpected SHL!"); 8029 8030 // Expand into a bunch of logical ops. Note that these ops 8031 // depend on the PPC behavior for oversized shift amounts. 8032 SDValue Lo = Op.getOperand(0); 8033 SDValue Hi = Op.getOperand(1); 8034 SDValue Amt = Op.getOperand(2); 8035 EVT AmtVT = Amt.getValueType(); 8036 8037 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8038 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8039 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 8040 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 8041 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 8042 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8043 DAG.getConstant(-BitWidth, dl, AmtVT)); 8044 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 8045 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8046 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 8047 SDValue OutOps[] = { OutLo, OutHi }; 8048 return DAG.getMergeValues(OutOps, dl); 8049 } 8050 8051 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8052 EVT VT = Op.getValueType(); 8053 SDLoc dl(Op); 8054 unsigned BitWidth = VT.getSizeInBits(); 8055 assert(Op.getNumOperands() == 3 && 8056 VT == Op.getOperand(1).getValueType() && 8057 "Unexpected SRL!"); 8058 8059 // Expand into a bunch of logical ops. Note that these ops 8060 // depend on the PPC behavior for oversized shift amounts. 8061 SDValue Lo = Op.getOperand(0); 8062 SDValue Hi = Op.getOperand(1); 8063 SDValue Amt = Op.getOperand(2); 8064 EVT AmtVT = Amt.getValueType(); 8065 8066 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8067 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8068 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8069 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8070 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8071 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8072 DAG.getConstant(-BitWidth, dl, AmtVT)); 8073 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 8074 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8075 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 8076 SDValue OutOps[] = { OutLo, OutHi }; 8077 return DAG.getMergeValues(OutOps, dl); 8078 } 8079 8080 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 8081 SDLoc dl(Op); 8082 EVT VT = Op.getValueType(); 8083 unsigned BitWidth = VT.getSizeInBits(); 8084 assert(Op.getNumOperands() == 3 && 8085 VT == Op.getOperand(1).getValueType() && 8086 "Unexpected SRA!"); 8087 8088 // Expand into a bunch of logical ops, followed by a select_cc. 8089 SDValue Lo = Op.getOperand(0); 8090 SDValue Hi = Op.getOperand(1); 8091 SDValue Amt = Op.getOperand(2); 8092 EVT AmtVT = Amt.getValueType(); 8093 8094 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8095 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8096 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8097 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8098 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8099 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8100 DAG.getConstant(-BitWidth, dl, AmtVT)); 8101 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 8102 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 8103 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 8104 Tmp4, Tmp6, ISD::SETLE); 8105 SDValue OutOps[] = { OutLo, OutHi }; 8106 return DAG.getMergeValues(OutOps, dl); 8107 } 8108 8109 //===----------------------------------------------------------------------===// 8110 // Vector related lowering. 8111 // 8112 8113 /// BuildSplatI - Build a canonical splati of Val with an element size of 8114 /// SplatSize. Cast the result to VT. 8115 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 8116 SelectionDAG &DAG, const SDLoc &dl) { 8117 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 8118 8119 static const MVT VTys[] = { // canonical VT to use for each size. 8120 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 8121 }; 8122 8123 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 8124 8125 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 8126 if (Val == -1) 8127 SplatSize = 1; 8128 8129 EVT CanonicalVT = VTys[SplatSize-1]; 8130 8131 // Build a canonical splat for this value. 8132 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 8133 } 8134 8135 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 8136 /// specified intrinsic ID. 8137 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 8138 const SDLoc &dl, EVT DestVT = MVT::Other) { 8139 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 8140 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8141 DAG.getConstant(IID, dl, MVT::i32), Op); 8142 } 8143 8144 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 8145 /// specified intrinsic ID. 8146 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 8147 SelectionDAG &DAG, const SDLoc &dl, 8148 EVT DestVT = MVT::Other) { 8149 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 8150 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8151 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 8152 } 8153 8154 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 8155 /// specified intrinsic ID. 8156 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 8157 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 8158 EVT DestVT = MVT::Other) { 8159 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 8160 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8161 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 8162 } 8163 8164 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 8165 /// amount. The result has the specified value type. 8166 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 8167 SelectionDAG &DAG, const SDLoc &dl) { 8168 // Force LHS/RHS to be the right type. 8169 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 8170 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 8171 8172 int Ops[16]; 8173 for (unsigned i = 0; i != 16; ++i) 8174 Ops[i] = i + Amt; 8175 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 8176 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8177 } 8178 8179 /// Do we have an efficient pattern in a .td file for this node? 8180 /// 8181 /// \param V - pointer to the BuildVectorSDNode being matched 8182 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 8183 /// 8184 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 8185 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 8186 /// the opposite is true (expansion is beneficial) are: 8187 /// - The node builds a vector out of integers that are not 32 or 64-bits 8188 /// - The node builds a vector out of constants 8189 /// - The node is a "load-and-splat" 8190 /// In all other cases, we will choose to keep the BUILD_VECTOR. 8191 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 8192 bool HasDirectMove, 8193 bool HasP8Vector) { 8194 EVT VecVT = V->getValueType(0); 8195 bool RightType = VecVT == MVT::v2f64 || 8196 (HasP8Vector && VecVT == MVT::v4f32) || 8197 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 8198 if (!RightType) 8199 return false; 8200 8201 bool IsSplat = true; 8202 bool IsLoad = false; 8203 SDValue Op0 = V->getOperand(0); 8204 8205 // This function is called in a block that confirms the node is not a constant 8206 // splat. So a constant BUILD_VECTOR here means the vector is built out of 8207 // different constants. 8208 if (V->isConstant()) 8209 return false; 8210 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 8211 if (V->getOperand(i).isUndef()) 8212 return false; 8213 // We want to expand nodes that represent load-and-splat even if the 8214 // loaded value is a floating point truncation or conversion to int. 8215 if (V->getOperand(i).getOpcode() == ISD::LOAD || 8216 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 8217 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8218 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 8219 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8220 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 8221 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 8222 IsLoad = true; 8223 // If the operands are different or the input is not a load and has more 8224 // uses than just this BV node, then it isn't a splat. 8225 if (V->getOperand(i) != Op0 || 8226 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 8227 IsSplat = false; 8228 } 8229 return !(IsSplat && IsLoad); 8230 } 8231 8232 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 8233 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 8234 8235 SDLoc dl(Op); 8236 SDValue Op0 = Op->getOperand(0); 8237 8238 if (!EnableQuadPrecision || 8239 (Op.getValueType() != MVT::f128 ) || 8240 (Op0.getOpcode() != ISD::BUILD_PAIR) || 8241 (Op0.getOperand(0).getValueType() != MVT::i64) || 8242 (Op0.getOperand(1).getValueType() != MVT::i64)) 8243 return SDValue(); 8244 8245 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 8246 Op0.getOperand(1)); 8247 } 8248 8249 static const SDValue *getNormalLoadInput(const SDValue &Op) { 8250 const SDValue *InputLoad = &Op; 8251 if (InputLoad->getOpcode() == ISD::BITCAST) 8252 InputLoad = &InputLoad->getOperand(0); 8253 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR) 8254 InputLoad = &InputLoad->getOperand(0); 8255 if (InputLoad->getOpcode() != ISD::LOAD) 8256 return nullptr; 8257 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8258 return ISD::isNormalLoad(LD) ? InputLoad : nullptr; 8259 } 8260 8261 // If this is a case we can't handle, return null and let the default 8262 // expansion code take care of it. If we CAN select this case, and if it 8263 // selects to a single instruction, return Op. Otherwise, if we can codegen 8264 // this case more efficiently than a constant pool load, lower it to the 8265 // sequence of ops that should be used. 8266 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 8267 SelectionDAG &DAG) const { 8268 SDLoc dl(Op); 8269 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 8270 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 8271 8272 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 8273 // We first build an i32 vector, load it into a QPX register, 8274 // then convert it to a floating-point vector and compare it 8275 // to a zero vector to get the boolean result. 8276 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8277 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8278 MachinePointerInfo PtrInfo = 8279 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8280 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8281 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8282 8283 assert(BVN->getNumOperands() == 4 && 8284 "BUILD_VECTOR for v4i1 does not have 4 operands"); 8285 8286 bool IsConst = true; 8287 for (unsigned i = 0; i < 4; ++i) { 8288 if (BVN->getOperand(i).isUndef()) continue; 8289 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 8290 IsConst = false; 8291 break; 8292 } 8293 } 8294 8295 if (IsConst) { 8296 Constant *One = 8297 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 8298 Constant *NegOne = 8299 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 8300 8301 Constant *CV[4]; 8302 for (unsigned i = 0; i < 4; ++i) { 8303 if (BVN->getOperand(i).isUndef()) 8304 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 8305 else if (isNullConstant(BVN->getOperand(i))) 8306 CV[i] = NegOne; 8307 else 8308 CV[i] = One; 8309 } 8310 8311 Constant *CP = ConstantVector::get(CV); 8312 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 8313 16 /* alignment */); 8314 8315 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 8316 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 8317 return DAG.getMemIntrinsicNode( 8318 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 8319 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 8320 } 8321 8322 SmallVector<SDValue, 4> Stores; 8323 for (unsigned i = 0; i < 4; ++i) { 8324 if (BVN->getOperand(i).isUndef()) continue; 8325 8326 unsigned Offset = 4*i; 8327 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8328 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8329 8330 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 8331 if (StoreSize > 4) { 8332 Stores.push_back( 8333 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 8334 PtrInfo.getWithOffset(Offset), MVT::i32)); 8335 } else { 8336 SDValue StoreValue = BVN->getOperand(i); 8337 if (StoreSize < 4) 8338 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 8339 8340 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 8341 PtrInfo.getWithOffset(Offset))); 8342 } 8343 } 8344 8345 SDValue StoreChain; 8346 if (!Stores.empty()) 8347 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8348 else 8349 StoreChain = DAG.getEntryNode(); 8350 8351 // Now load from v4i32 into the QPX register; this will extend it to 8352 // v4i64 but not yet convert it to a floating point. Nevertheless, this 8353 // is typed as v4f64 because the QPX register integer states are not 8354 // explicitly represented. 8355 8356 SDValue Ops[] = {StoreChain, 8357 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 8358 FIdx}; 8359 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 8360 8361 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 8362 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8363 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8364 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 8365 LoadedVect); 8366 8367 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 8368 8369 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 8370 } 8371 8372 // All other QPX vectors are handled by generic code. 8373 if (Subtarget.hasQPX()) 8374 return SDValue(); 8375 8376 // Check if this is a splat of a constant value. 8377 APInt APSplatBits, APSplatUndef; 8378 unsigned SplatBitSize; 8379 bool HasAnyUndefs; 8380 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 8381 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 8382 SplatBitSize > 32) { 8383 8384 const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0)); 8385 // Handle load-and-splat patterns as we have instructions that will do this 8386 // in one go. 8387 if (InputLoad && DAG.isSplatValue(Op, true)) { 8388 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8389 8390 // We have handling for 4 and 8 byte elements. 8391 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits(); 8392 8393 // Checking for a single use of this load, we have to check for vector 8394 // width (128 bits) / ElementSize uses (since each operand of the 8395 // BUILD_VECTOR is a separate use of the value. 8396 if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) && 8397 ((Subtarget.hasVSX() && ElementSize == 64) || 8398 (Subtarget.hasP9Vector() && ElementSize == 32))) { 8399 SDValue Ops[] = { 8400 LD->getChain(), // Chain 8401 LD->getBasePtr(), // Ptr 8402 DAG.getValueType(Op.getValueType()) // VT 8403 }; 8404 return 8405 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, 8406 DAG.getVTList(Op.getValueType(), MVT::Other), 8407 Ops, LD->getMemoryVT(), LD->getMemOperand()); 8408 } 8409 } 8410 8411 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 8412 // lowered to VSX instructions under certain conditions. 8413 // Without VSX, there is no pattern more efficient than expanding the node. 8414 if (Subtarget.hasVSX() && 8415 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 8416 Subtarget.hasP8Vector())) 8417 return Op; 8418 return SDValue(); 8419 } 8420 8421 unsigned SplatBits = APSplatBits.getZExtValue(); 8422 unsigned SplatUndef = APSplatUndef.getZExtValue(); 8423 unsigned SplatSize = SplatBitSize / 8; 8424 8425 // First, handle single instruction cases. 8426 8427 // All zeros? 8428 if (SplatBits == 0) { 8429 // Canonicalize all zero vectors to be v4i32. 8430 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 8431 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 8432 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 8433 } 8434 return Op; 8435 } 8436 8437 // We have XXSPLTIB for constant splats one byte wide 8438 if (Subtarget.hasP9Vector() && SplatSize == 1) { 8439 // This is a splat of 1-byte elements with some elements potentially undef. 8440 // Rather than trying to match undef in the SDAG patterns, ensure that all 8441 // elements are the same constant. 8442 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 8443 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 8444 dl, MVT::i32)); 8445 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 8446 if (Op.getValueType() != MVT::v16i8) 8447 return DAG.getBitcast(Op.getValueType(), NewBV); 8448 return NewBV; 8449 } 8450 8451 // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll 8452 // detect that constant splats like v8i16: 0xABAB are really just splats 8453 // of a 1-byte constant. In this case, we need to convert the node to a 8454 // splat of v16i8 and a bitcast. 8455 if (Op.getValueType() != MVT::v16i8) 8456 return DAG.getBitcast(Op.getValueType(), 8457 DAG.getConstant(SplatBits, dl, MVT::v16i8)); 8458 8459 return Op; 8460 } 8461 8462 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 8463 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 8464 (32-SplatBitSize)); 8465 if (SextVal >= -16 && SextVal <= 15) 8466 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 8467 8468 // Two instruction sequences. 8469 8470 // If this value is in the range [-32,30] and is even, use: 8471 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 8472 // If this value is in the range [17,31] and is odd, use: 8473 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 8474 // If this value is in the range [-31,-17] and is odd, use: 8475 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 8476 // Note the last two are three-instruction sequences. 8477 if (SextVal >= -32 && SextVal <= 31) { 8478 // To avoid having these optimizations undone by constant folding, 8479 // we convert to a pseudo that will be expanded later into one of 8480 // the above forms. 8481 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 8482 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 8483 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 8484 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 8485 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 8486 if (VT == Op.getValueType()) 8487 return RetVal; 8488 else 8489 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 8490 } 8491 8492 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 8493 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 8494 // for fneg/fabs. 8495 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 8496 // Make -1 and vspltisw -1: 8497 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 8498 8499 // Make the VSLW intrinsic, computing 0x8000_0000. 8500 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 8501 OnesV, DAG, dl); 8502 8503 // xor by OnesV to invert it. 8504 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 8505 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8506 } 8507 8508 // Check to see if this is a wide variety of vsplti*, binop self cases. 8509 static const signed char SplatCsts[] = { 8510 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 8511 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 8512 }; 8513 8514 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 8515 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 8516 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 8517 int i = SplatCsts[idx]; 8518 8519 // Figure out what shift amount will be used by altivec if shifted by i in 8520 // this splat size. 8521 unsigned TypeShiftAmt = i & (SplatBitSize-1); 8522 8523 // vsplti + shl self. 8524 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 8525 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8526 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8527 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 8528 Intrinsic::ppc_altivec_vslw 8529 }; 8530 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8531 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8532 } 8533 8534 // vsplti + srl self. 8535 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8536 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8537 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8538 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 8539 Intrinsic::ppc_altivec_vsrw 8540 }; 8541 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8542 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8543 } 8544 8545 // vsplti + sra self. 8546 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8547 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8548 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8549 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 8550 Intrinsic::ppc_altivec_vsraw 8551 }; 8552 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8553 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8554 } 8555 8556 // vsplti + rol self. 8557 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 8558 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 8559 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8560 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8561 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 8562 Intrinsic::ppc_altivec_vrlw 8563 }; 8564 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8565 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8566 } 8567 8568 // t = vsplti c, result = vsldoi t, t, 1 8569 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 8570 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8571 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 8572 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8573 } 8574 // t = vsplti c, result = vsldoi t, t, 2 8575 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 8576 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8577 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 8578 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8579 } 8580 // t = vsplti c, result = vsldoi t, t, 3 8581 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 8582 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8583 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 8584 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8585 } 8586 } 8587 8588 return SDValue(); 8589 } 8590 8591 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 8592 /// the specified operations to build the shuffle. 8593 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 8594 SDValue RHS, SelectionDAG &DAG, 8595 const SDLoc &dl) { 8596 unsigned OpNum = (PFEntry >> 26) & 0x0F; 8597 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8598 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8599 8600 enum { 8601 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 8602 OP_VMRGHW, 8603 OP_VMRGLW, 8604 OP_VSPLTISW0, 8605 OP_VSPLTISW1, 8606 OP_VSPLTISW2, 8607 OP_VSPLTISW3, 8608 OP_VSLDOI4, 8609 OP_VSLDOI8, 8610 OP_VSLDOI12 8611 }; 8612 8613 if (OpNum == OP_COPY) { 8614 if (LHSID == (1*9+2)*9+3) return LHS; 8615 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 8616 return RHS; 8617 } 8618 8619 SDValue OpLHS, OpRHS; 8620 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 8621 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 8622 8623 int ShufIdxs[16]; 8624 switch (OpNum) { 8625 default: llvm_unreachable("Unknown i32 permute!"); 8626 case OP_VMRGHW: 8627 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 8628 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 8629 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 8630 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 8631 break; 8632 case OP_VMRGLW: 8633 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 8634 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 8635 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 8636 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 8637 break; 8638 case OP_VSPLTISW0: 8639 for (unsigned i = 0; i != 16; ++i) 8640 ShufIdxs[i] = (i&3)+0; 8641 break; 8642 case OP_VSPLTISW1: 8643 for (unsigned i = 0; i != 16; ++i) 8644 ShufIdxs[i] = (i&3)+4; 8645 break; 8646 case OP_VSPLTISW2: 8647 for (unsigned i = 0; i != 16; ++i) 8648 ShufIdxs[i] = (i&3)+8; 8649 break; 8650 case OP_VSPLTISW3: 8651 for (unsigned i = 0; i != 16; ++i) 8652 ShufIdxs[i] = (i&3)+12; 8653 break; 8654 case OP_VSLDOI4: 8655 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 8656 case OP_VSLDOI8: 8657 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 8658 case OP_VSLDOI12: 8659 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 8660 } 8661 EVT VT = OpLHS.getValueType(); 8662 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 8663 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 8664 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 8665 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8666 } 8667 8668 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 8669 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 8670 /// SDValue. 8671 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 8672 SelectionDAG &DAG) const { 8673 const unsigned BytesInVector = 16; 8674 bool IsLE = Subtarget.isLittleEndian(); 8675 SDLoc dl(N); 8676 SDValue V1 = N->getOperand(0); 8677 SDValue V2 = N->getOperand(1); 8678 unsigned ShiftElts = 0, InsertAtByte = 0; 8679 bool Swap = false; 8680 8681 // Shifts required to get the byte we want at element 7. 8682 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 8683 0, 15, 14, 13, 12, 11, 10, 9}; 8684 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 8685 1, 2, 3, 4, 5, 6, 7, 8}; 8686 8687 ArrayRef<int> Mask = N->getMask(); 8688 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 8689 8690 // For each mask element, find out if we're just inserting something 8691 // from V2 into V1 or vice versa. 8692 // Possible permutations inserting an element from V2 into V1: 8693 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8694 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8695 // ... 8696 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 8697 // Inserting from V1 into V2 will be similar, except mask range will be 8698 // [16,31]. 8699 8700 bool FoundCandidate = false; 8701 // If both vector operands for the shuffle are the same vector, the mask 8702 // will contain only elements from the first one and the second one will be 8703 // undef. 8704 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 8705 // Go through the mask of half-words to find an element that's being moved 8706 // from one vector to the other. 8707 for (unsigned i = 0; i < BytesInVector; ++i) { 8708 unsigned CurrentElement = Mask[i]; 8709 // If 2nd operand is undefined, we should only look for element 7 in the 8710 // Mask. 8711 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 8712 continue; 8713 8714 bool OtherElementsInOrder = true; 8715 // Examine the other elements in the Mask to see if they're in original 8716 // order. 8717 for (unsigned j = 0; j < BytesInVector; ++j) { 8718 if (j == i) 8719 continue; 8720 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 8721 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 8722 // in which we always assume we're always picking from the 1st operand. 8723 int MaskOffset = 8724 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 8725 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 8726 OtherElementsInOrder = false; 8727 break; 8728 } 8729 } 8730 // If other elements are in original order, we record the number of shifts 8731 // we need to get the element we want into element 7. Also record which byte 8732 // in the vector we should insert into. 8733 if (OtherElementsInOrder) { 8734 // If 2nd operand is undefined, we assume no shifts and no swapping. 8735 if (V2.isUndef()) { 8736 ShiftElts = 0; 8737 Swap = false; 8738 } else { 8739 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 8740 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 8741 : BigEndianShifts[CurrentElement & 0xF]; 8742 Swap = CurrentElement < BytesInVector; 8743 } 8744 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 8745 FoundCandidate = true; 8746 break; 8747 } 8748 } 8749 8750 if (!FoundCandidate) 8751 return SDValue(); 8752 8753 // Candidate found, construct the proper SDAG sequence with VINSERTB, 8754 // optionally with VECSHL if shift is required. 8755 if (Swap) 8756 std::swap(V1, V2); 8757 if (V2.isUndef()) 8758 V2 = V1; 8759 if (ShiftElts) { 8760 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8761 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8762 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 8763 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8764 } 8765 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 8766 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8767 } 8768 8769 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 8770 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 8771 /// SDValue. 8772 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 8773 SelectionDAG &DAG) const { 8774 const unsigned NumHalfWords = 8; 8775 const unsigned BytesInVector = NumHalfWords * 2; 8776 // Check that the shuffle is on half-words. 8777 if (!isNByteElemShuffleMask(N, 2, 1)) 8778 return SDValue(); 8779 8780 bool IsLE = Subtarget.isLittleEndian(); 8781 SDLoc dl(N); 8782 SDValue V1 = N->getOperand(0); 8783 SDValue V2 = N->getOperand(1); 8784 unsigned ShiftElts = 0, InsertAtByte = 0; 8785 bool Swap = false; 8786 8787 // Shifts required to get the half-word we want at element 3. 8788 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 8789 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 8790 8791 uint32_t Mask = 0; 8792 uint32_t OriginalOrderLow = 0x1234567; 8793 uint32_t OriginalOrderHigh = 0x89ABCDEF; 8794 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 8795 // 32-bit space, only need 4-bit nibbles per element. 8796 for (unsigned i = 0; i < NumHalfWords; ++i) { 8797 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8798 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 8799 } 8800 8801 // For each mask element, find out if we're just inserting something 8802 // from V2 into V1 or vice versa. Possible permutations inserting an element 8803 // from V2 into V1: 8804 // X, 1, 2, 3, 4, 5, 6, 7 8805 // 0, X, 2, 3, 4, 5, 6, 7 8806 // 0, 1, X, 3, 4, 5, 6, 7 8807 // 0, 1, 2, X, 4, 5, 6, 7 8808 // 0, 1, 2, 3, X, 5, 6, 7 8809 // 0, 1, 2, 3, 4, X, 6, 7 8810 // 0, 1, 2, 3, 4, 5, X, 7 8811 // 0, 1, 2, 3, 4, 5, 6, X 8812 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 8813 8814 bool FoundCandidate = false; 8815 // Go through the mask of half-words to find an element that's being moved 8816 // from one vector to the other. 8817 for (unsigned i = 0; i < NumHalfWords; ++i) { 8818 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8819 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 8820 uint32_t MaskOtherElts = ~(0xF << MaskShift); 8821 uint32_t TargetOrder = 0x0; 8822 8823 // If both vector operands for the shuffle are the same vector, the mask 8824 // will contain only elements from the first one and the second one will be 8825 // undef. 8826 if (V2.isUndef()) { 8827 ShiftElts = 0; 8828 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 8829 TargetOrder = OriginalOrderLow; 8830 Swap = false; 8831 // Skip if not the correct element or mask of other elements don't equal 8832 // to our expected order. 8833 if (MaskOneElt == VINSERTHSrcElem && 8834 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8835 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8836 FoundCandidate = true; 8837 break; 8838 } 8839 } else { // If both operands are defined. 8840 // Target order is [8,15] if the current mask is between [0,7]. 8841 TargetOrder = 8842 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 8843 // Skip if mask of other elements don't equal our expected order. 8844 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8845 // We only need the last 3 bits for the number of shifts. 8846 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 8847 : BigEndianShifts[MaskOneElt & 0x7]; 8848 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8849 Swap = MaskOneElt < NumHalfWords; 8850 FoundCandidate = true; 8851 break; 8852 } 8853 } 8854 } 8855 8856 if (!FoundCandidate) 8857 return SDValue(); 8858 8859 // Candidate found, construct the proper SDAG sequence with VINSERTH, 8860 // optionally with VECSHL if shift is required. 8861 if (Swap) 8862 std::swap(V1, V2); 8863 if (V2.isUndef()) 8864 V2 = V1; 8865 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8866 if (ShiftElts) { 8867 // Double ShiftElts because we're left shifting on v16i8 type. 8868 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8869 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 8870 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 8871 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8872 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8873 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8874 } 8875 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 8876 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8877 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8878 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8879 } 8880 8881 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 8882 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 8883 /// return the code it can be lowered into. Worst case, it can always be 8884 /// lowered into a vperm. 8885 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 8886 SelectionDAG &DAG) const { 8887 SDLoc dl(Op); 8888 SDValue V1 = Op.getOperand(0); 8889 SDValue V2 = Op.getOperand(1); 8890 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 8891 EVT VT = Op.getValueType(); 8892 bool isLittleEndian = Subtarget.isLittleEndian(); 8893 8894 unsigned ShiftElts, InsertAtByte; 8895 bool Swap = false; 8896 8897 // If this is a load-and-splat, we can do that with a single instruction 8898 // in some cases. However if the load has multiple uses, we don't want to 8899 // combine it because that will just produce multiple loads. 8900 const SDValue *InputLoad = getNormalLoadInput(V1); 8901 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() && 8902 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) && 8903 InputLoad->hasOneUse()) { 8904 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4); 8905 int SplatIdx = 8906 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG); 8907 8908 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8909 // For 4-byte load-and-splat, we need Power9. 8910 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) { 8911 uint64_t Offset = 0; 8912 if (IsFourByte) 8913 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4; 8914 else 8915 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8; 8916 SDValue BasePtr = LD->getBasePtr(); 8917 if (Offset != 0) 8918 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 8919 BasePtr, DAG.getIntPtrConstant(Offset, dl)); 8920 SDValue Ops[] = { 8921 LD->getChain(), // Chain 8922 BasePtr, // BasePtr 8923 DAG.getValueType(Op.getValueType()) // VT 8924 }; 8925 SDVTList VTL = 8926 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other); 8927 SDValue LdSplt = 8928 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL, 8929 Ops, LD->getMemoryVT(), LD->getMemOperand()); 8930 if (LdSplt.getValueType() != SVOp->getValueType(0)) 8931 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt); 8932 return LdSplt; 8933 } 8934 } 8935 if (Subtarget.hasP9Vector() && 8936 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 8937 isLittleEndian)) { 8938 if (Swap) 8939 std::swap(V1, V2); 8940 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8941 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 8942 if (ShiftElts) { 8943 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 8944 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8945 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 8946 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8947 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8948 } 8949 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 8950 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8951 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8952 } 8953 8954 if (Subtarget.hasP9Altivec()) { 8955 SDValue NewISDNode; 8956 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 8957 return NewISDNode; 8958 8959 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 8960 return NewISDNode; 8961 } 8962 8963 if (Subtarget.hasVSX() && 8964 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8965 if (Swap) 8966 std::swap(V1, V2); 8967 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8968 SDValue Conv2 = 8969 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 8970 8971 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 8972 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8973 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 8974 } 8975 8976 if (Subtarget.hasVSX() && 8977 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8978 if (Swap) 8979 std::swap(V1, V2); 8980 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8981 SDValue Conv2 = 8982 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 8983 8984 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 8985 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8986 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 8987 } 8988 8989 if (Subtarget.hasP9Vector()) { 8990 if (PPC::isXXBRHShuffleMask(SVOp)) { 8991 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8992 SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv); 8993 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 8994 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 8995 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8996 SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv); 8997 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 8998 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 8999 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9000 SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv); 9001 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 9002 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 9003 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 9004 SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv); 9005 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 9006 } 9007 } 9008 9009 if (Subtarget.hasVSX()) { 9010 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 9011 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG); 9012 9013 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9014 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 9015 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9016 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 9017 } 9018 9019 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 9020 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 9021 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 9022 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 9023 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 9024 } 9025 } 9026 9027 if (Subtarget.hasQPX()) { 9028 if (VT.getVectorNumElements() != 4) 9029 return SDValue(); 9030 9031 if (V2.isUndef()) V2 = V1; 9032 9033 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 9034 if (AlignIdx != -1) { 9035 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 9036 DAG.getConstant(AlignIdx, dl, MVT::i32)); 9037 } else if (SVOp->isSplat()) { 9038 int SplatIdx = SVOp->getSplatIndex(); 9039 if (SplatIdx >= 4) { 9040 std::swap(V1, V2); 9041 SplatIdx -= 4; 9042 } 9043 9044 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 9045 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9046 } 9047 9048 // Lower this into a qvgpci/qvfperm pair. 9049 9050 // Compute the qvgpci literal 9051 unsigned idx = 0; 9052 for (unsigned i = 0; i < 4; ++i) { 9053 int m = SVOp->getMaskElt(i); 9054 unsigned mm = m >= 0 ? (unsigned) m : i; 9055 idx |= mm << (3-i)*3; 9056 } 9057 9058 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 9059 DAG.getConstant(idx, dl, MVT::i32)); 9060 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 9061 } 9062 9063 // Cases that are handled by instructions that take permute immediates 9064 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 9065 // selected by the instruction selector. 9066 if (V2.isUndef()) { 9067 if (PPC::isSplatShuffleMask(SVOp, 1) || 9068 PPC::isSplatShuffleMask(SVOp, 2) || 9069 PPC::isSplatShuffleMask(SVOp, 4) || 9070 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 9071 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 9072 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 9073 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 9074 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 9075 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 9076 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 9077 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 9078 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 9079 (Subtarget.hasP8Altivec() && ( 9080 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 9081 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 9082 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 9083 return Op; 9084 } 9085 } 9086 9087 // Altivec has a variety of "shuffle immediates" that take two vector inputs 9088 // and produce a fixed permutation. If any of these match, do not lower to 9089 // VPERM. 9090 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 9091 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 9092 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 9093 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 9094 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9095 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9096 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9097 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9098 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9099 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9100 (Subtarget.hasP8Altivec() && ( 9101 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 9102 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 9103 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 9104 return Op; 9105 9106 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 9107 // perfect shuffle table to emit an optimal matching sequence. 9108 ArrayRef<int> PermMask = SVOp->getMask(); 9109 9110 unsigned PFIndexes[4]; 9111 bool isFourElementShuffle = true; 9112 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 9113 unsigned EltNo = 8; // Start out undef. 9114 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 9115 if (PermMask[i*4+j] < 0) 9116 continue; // Undef, ignore it. 9117 9118 unsigned ByteSource = PermMask[i*4+j]; 9119 if ((ByteSource & 3) != j) { 9120 isFourElementShuffle = false; 9121 break; 9122 } 9123 9124 if (EltNo == 8) { 9125 EltNo = ByteSource/4; 9126 } else if (EltNo != ByteSource/4) { 9127 isFourElementShuffle = false; 9128 break; 9129 } 9130 } 9131 PFIndexes[i] = EltNo; 9132 } 9133 9134 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 9135 // perfect shuffle vector to determine if it is cost effective to do this as 9136 // discrete instructions, or whether we should use a vperm. 9137 // For now, we skip this for little endian until such time as we have a 9138 // little-endian perfect shuffle table. 9139 if (isFourElementShuffle && !isLittleEndian) { 9140 // Compute the index in the perfect shuffle table. 9141 unsigned PFTableIndex = 9142 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 9143 9144 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 9145 unsigned Cost = (PFEntry >> 30); 9146 9147 // Determining when to avoid vperm is tricky. Many things affect the cost 9148 // of vperm, particularly how many times the perm mask needs to be computed. 9149 // For example, if the perm mask can be hoisted out of a loop or is already 9150 // used (perhaps because there are multiple permutes with the same shuffle 9151 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 9152 // the loop requires an extra register. 9153 // 9154 // As a compromise, we only emit discrete instructions if the shuffle can be 9155 // generated in 3 or fewer operations. When we have loop information 9156 // available, if this block is within a loop, we should avoid using vperm 9157 // for 3-operation perms and use a constant pool load instead. 9158 if (Cost < 3) 9159 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 9160 } 9161 9162 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 9163 // vector that will get spilled to the constant pool. 9164 if (V2.isUndef()) V2 = V1; 9165 9166 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 9167 // that it is in input element units, not in bytes. Convert now. 9168 9169 // For little endian, the order of the input vectors is reversed, and 9170 // the permutation mask is complemented with respect to 31. This is 9171 // necessary to produce proper semantics with the big-endian-biased vperm 9172 // instruction. 9173 EVT EltVT = V1.getValueType().getVectorElementType(); 9174 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 9175 9176 SmallVector<SDValue, 16> ResultMask; 9177 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 9178 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 9179 9180 for (unsigned j = 0; j != BytesPerElement; ++j) 9181 if (isLittleEndian) 9182 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 9183 dl, MVT::i32)); 9184 else 9185 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 9186 MVT::i32)); 9187 } 9188 9189 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 9190 if (isLittleEndian) 9191 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9192 V2, V1, VPermMask); 9193 else 9194 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9195 V1, V2, VPermMask); 9196 } 9197 9198 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 9199 /// vector comparison. If it is, return true and fill in Opc/isDot with 9200 /// information about the intrinsic. 9201 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 9202 bool &isDot, const PPCSubtarget &Subtarget) { 9203 unsigned IntrinsicID = 9204 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 9205 CompareOpc = -1; 9206 isDot = false; 9207 switch (IntrinsicID) { 9208 default: 9209 return false; 9210 // Comparison predicates. 9211 case Intrinsic::ppc_altivec_vcmpbfp_p: 9212 CompareOpc = 966; 9213 isDot = true; 9214 break; 9215 case Intrinsic::ppc_altivec_vcmpeqfp_p: 9216 CompareOpc = 198; 9217 isDot = true; 9218 break; 9219 case Intrinsic::ppc_altivec_vcmpequb_p: 9220 CompareOpc = 6; 9221 isDot = true; 9222 break; 9223 case Intrinsic::ppc_altivec_vcmpequh_p: 9224 CompareOpc = 70; 9225 isDot = true; 9226 break; 9227 case Intrinsic::ppc_altivec_vcmpequw_p: 9228 CompareOpc = 134; 9229 isDot = true; 9230 break; 9231 case Intrinsic::ppc_altivec_vcmpequd_p: 9232 if (Subtarget.hasP8Altivec()) { 9233 CompareOpc = 199; 9234 isDot = true; 9235 } else 9236 return false; 9237 break; 9238 case Intrinsic::ppc_altivec_vcmpneb_p: 9239 case Intrinsic::ppc_altivec_vcmpneh_p: 9240 case Intrinsic::ppc_altivec_vcmpnew_p: 9241 case Intrinsic::ppc_altivec_vcmpnezb_p: 9242 case Intrinsic::ppc_altivec_vcmpnezh_p: 9243 case Intrinsic::ppc_altivec_vcmpnezw_p: 9244 if (Subtarget.hasP9Altivec()) { 9245 switch (IntrinsicID) { 9246 default: 9247 llvm_unreachable("Unknown comparison intrinsic."); 9248 case Intrinsic::ppc_altivec_vcmpneb_p: 9249 CompareOpc = 7; 9250 break; 9251 case Intrinsic::ppc_altivec_vcmpneh_p: 9252 CompareOpc = 71; 9253 break; 9254 case Intrinsic::ppc_altivec_vcmpnew_p: 9255 CompareOpc = 135; 9256 break; 9257 case Intrinsic::ppc_altivec_vcmpnezb_p: 9258 CompareOpc = 263; 9259 break; 9260 case Intrinsic::ppc_altivec_vcmpnezh_p: 9261 CompareOpc = 327; 9262 break; 9263 case Intrinsic::ppc_altivec_vcmpnezw_p: 9264 CompareOpc = 391; 9265 break; 9266 } 9267 isDot = true; 9268 } else 9269 return false; 9270 break; 9271 case Intrinsic::ppc_altivec_vcmpgefp_p: 9272 CompareOpc = 454; 9273 isDot = true; 9274 break; 9275 case Intrinsic::ppc_altivec_vcmpgtfp_p: 9276 CompareOpc = 710; 9277 isDot = true; 9278 break; 9279 case Intrinsic::ppc_altivec_vcmpgtsb_p: 9280 CompareOpc = 774; 9281 isDot = true; 9282 break; 9283 case Intrinsic::ppc_altivec_vcmpgtsh_p: 9284 CompareOpc = 838; 9285 isDot = true; 9286 break; 9287 case Intrinsic::ppc_altivec_vcmpgtsw_p: 9288 CompareOpc = 902; 9289 isDot = true; 9290 break; 9291 case Intrinsic::ppc_altivec_vcmpgtsd_p: 9292 if (Subtarget.hasP8Altivec()) { 9293 CompareOpc = 967; 9294 isDot = true; 9295 } else 9296 return false; 9297 break; 9298 case Intrinsic::ppc_altivec_vcmpgtub_p: 9299 CompareOpc = 518; 9300 isDot = true; 9301 break; 9302 case Intrinsic::ppc_altivec_vcmpgtuh_p: 9303 CompareOpc = 582; 9304 isDot = true; 9305 break; 9306 case Intrinsic::ppc_altivec_vcmpgtuw_p: 9307 CompareOpc = 646; 9308 isDot = true; 9309 break; 9310 case Intrinsic::ppc_altivec_vcmpgtud_p: 9311 if (Subtarget.hasP8Altivec()) { 9312 CompareOpc = 711; 9313 isDot = true; 9314 } else 9315 return false; 9316 break; 9317 9318 // VSX predicate comparisons use the same infrastructure 9319 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9320 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9321 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9322 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9323 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9324 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9325 if (Subtarget.hasVSX()) { 9326 switch (IntrinsicID) { 9327 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9328 CompareOpc = 99; 9329 break; 9330 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9331 CompareOpc = 115; 9332 break; 9333 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9334 CompareOpc = 107; 9335 break; 9336 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9337 CompareOpc = 67; 9338 break; 9339 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9340 CompareOpc = 83; 9341 break; 9342 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9343 CompareOpc = 75; 9344 break; 9345 } 9346 isDot = true; 9347 } else 9348 return false; 9349 break; 9350 9351 // Normal Comparisons. 9352 case Intrinsic::ppc_altivec_vcmpbfp: 9353 CompareOpc = 966; 9354 break; 9355 case Intrinsic::ppc_altivec_vcmpeqfp: 9356 CompareOpc = 198; 9357 break; 9358 case Intrinsic::ppc_altivec_vcmpequb: 9359 CompareOpc = 6; 9360 break; 9361 case Intrinsic::ppc_altivec_vcmpequh: 9362 CompareOpc = 70; 9363 break; 9364 case Intrinsic::ppc_altivec_vcmpequw: 9365 CompareOpc = 134; 9366 break; 9367 case Intrinsic::ppc_altivec_vcmpequd: 9368 if (Subtarget.hasP8Altivec()) 9369 CompareOpc = 199; 9370 else 9371 return false; 9372 break; 9373 case Intrinsic::ppc_altivec_vcmpneb: 9374 case Intrinsic::ppc_altivec_vcmpneh: 9375 case Intrinsic::ppc_altivec_vcmpnew: 9376 case Intrinsic::ppc_altivec_vcmpnezb: 9377 case Intrinsic::ppc_altivec_vcmpnezh: 9378 case Intrinsic::ppc_altivec_vcmpnezw: 9379 if (Subtarget.hasP9Altivec()) 9380 switch (IntrinsicID) { 9381 default: 9382 llvm_unreachable("Unknown comparison intrinsic."); 9383 case Intrinsic::ppc_altivec_vcmpneb: 9384 CompareOpc = 7; 9385 break; 9386 case Intrinsic::ppc_altivec_vcmpneh: 9387 CompareOpc = 71; 9388 break; 9389 case Intrinsic::ppc_altivec_vcmpnew: 9390 CompareOpc = 135; 9391 break; 9392 case Intrinsic::ppc_altivec_vcmpnezb: 9393 CompareOpc = 263; 9394 break; 9395 case Intrinsic::ppc_altivec_vcmpnezh: 9396 CompareOpc = 327; 9397 break; 9398 case Intrinsic::ppc_altivec_vcmpnezw: 9399 CompareOpc = 391; 9400 break; 9401 } 9402 else 9403 return false; 9404 break; 9405 case Intrinsic::ppc_altivec_vcmpgefp: 9406 CompareOpc = 454; 9407 break; 9408 case Intrinsic::ppc_altivec_vcmpgtfp: 9409 CompareOpc = 710; 9410 break; 9411 case Intrinsic::ppc_altivec_vcmpgtsb: 9412 CompareOpc = 774; 9413 break; 9414 case Intrinsic::ppc_altivec_vcmpgtsh: 9415 CompareOpc = 838; 9416 break; 9417 case Intrinsic::ppc_altivec_vcmpgtsw: 9418 CompareOpc = 902; 9419 break; 9420 case Intrinsic::ppc_altivec_vcmpgtsd: 9421 if (Subtarget.hasP8Altivec()) 9422 CompareOpc = 967; 9423 else 9424 return false; 9425 break; 9426 case Intrinsic::ppc_altivec_vcmpgtub: 9427 CompareOpc = 518; 9428 break; 9429 case Intrinsic::ppc_altivec_vcmpgtuh: 9430 CompareOpc = 582; 9431 break; 9432 case Intrinsic::ppc_altivec_vcmpgtuw: 9433 CompareOpc = 646; 9434 break; 9435 case Intrinsic::ppc_altivec_vcmpgtud: 9436 if (Subtarget.hasP8Altivec()) 9437 CompareOpc = 711; 9438 else 9439 return false; 9440 break; 9441 } 9442 return true; 9443 } 9444 9445 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 9446 /// lower, do it, otherwise return null. 9447 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 9448 SelectionDAG &DAG) const { 9449 unsigned IntrinsicID = 9450 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9451 9452 SDLoc dl(Op); 9453 9454 if (IntrinsicID == Intrinsic::thread_pointer) { 9455 // Reads the thread pointer register, used for __builtin_thread_pointer. 9456 if (Subtarget.isPPC64()) 9457 return DAG.getRegister(PPC::X13, MVT::i64); 9458 return DAG.getRegister(PPC::R2, MVT::i32); 9459 } 9460 9461 // If this is a lowered altivec predicate compare, CompareOpc is set to the 9462 // opcode number of the comparison. 9463 int CompareOpc; 9464 bool isDot; 9465 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 9466 return SDValue(); // Don't custom lower most intrinsics. 9467 9468 // If this is a non-dot comparison, make the VCMP node and we are done. 9469 if (!isDot) { 9470 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 9471 Op.getOperand(1), Op.getOperand(2), 9472 DAG.getConstant(CompareOpc, dl, MVT::i32)); 9473 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 9474 } 9475 9476 // Create the PPCISD altivec 'dot' comparison node. 9477 SDValue Ops[] = { 9478 Op.getOperand(2), // LHS 9479 Op.getOperand(3), // RHS 9480 DAG.getConstant(CompareOpc, dl, MVT::i32) 9481 }; 9482 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 9483 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 9484 9485 // Now that we have the comparison, emit a copy from the CR to a GPR. 9486 // This is flagged to the above dot comparison. 9487 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 9488 DAG.getRegister(PPC::CR6, MVT::i32), 9489 CompNode.getValue(1)); 9490 9491 // Unpack the result based on how the target uses it. 9492 unsigned BitNo; // Bit # of CR6. 9493 bool InvertBit; // Invert result? 9494 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 9495 default: // Can't happen, don't crash on invalid number though. 9496 case 0: // Return the value of the EQ bit of CR6. 9497 BitNo = 0; InvertBit = false; 9498 break; 9499 case 1: // Return the inverted value of the EQ bit of CR6. 9500 BitNo = 0; InvertBit = true; 9501 break; 9502 case 2: // Return the value of the LT bit of CR6. 9503 BitNo = 2; InvertBit = false; 9504 break; 9505 case 3: // Return the inverted value of the LT bit of CR6. 9506 BitNo = 2; InvertBit = true; 9507 break; 9508 } 9509 9510 // Shift the bit into the low position. 9511 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 9512 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 9513 // Isolate the bit. 9514 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 9515 DAG.getConstant(1, dl, MVT::i32)); 9516 9517 // If we are supposed to, toggle the bit. 9518 if (InvertBit) 9519 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 9520 DAG.getConstant(1, dl, MVT::i32)); 9521 return Flags; 9522 } 9523 9524 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 9525 SelectionDAG &DAG) const { 9526 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 9527 // the beginning of the argument list. 9528 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 9529 SDLoc DL(Op); 9530 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 9531 case Intrinsic::ppc_cfence: { 9532 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 9533 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 9534 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 9535 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 9536 Op.getOperand(ArgStart + 1)), 9537 Op.getOperand(0)), 9538 0); 9539 } 9540 default: 9541 break; 9542 } 9543 return SDValue(); 9544 } 9545 9546 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 9547 // Check for a DIV with the same operands as this REM. 9548 for (auto UI : Op.getOperand(1)->uses()) { 9549 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 9550 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 9551 if (UI->getOperand(0) == Op.getOperand(0) && 9552 UI->getOperand(1) == Op.getOperand(1)) 9553 return SDValue(); 9554 } 9555 return Op; 9556 } 9557 9558 // Lower scalar BSWAP64 to xxbrd. 9559 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 9560 SDLoc dl(Op); 9561 // MTVSRDD 9562 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 9563 Op.getOperand(0)); 9564 // XXBRD 9565 Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op); 9566 // MFVSRD 9567 int VectorIndex = 0; 9568 if (Subtarget.isLittleEndian()) 9569 VectorIndex = 1; 9570 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 9571 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 9572 return Op; 9573 } 9574 9575 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 9576 // compared to a value that is atomically loaded (atomic loads zero-extend). 9577 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 9578 SelectionDAG &DAG) const { 9579 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 9580 "Expecting an atomic compare-and-swap here."); 9581 SDLoc dl(Op); 9582 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 9583 EVT MemVT = AtomicNode->getMemoryVT(); 9584 if (MemVT.getSizeInBits() >= 32) 9585 return Op; 9586 9587 SDValue CmpOp = Op.getOperand(2); 9588 // If this is already correctly zero-extended, leave it alone. 9589 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 9590 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 9591 return Op; 9592 9593 // Clear the high bits of the compare operand. 9594 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 9595 SDValue NewCmpOp = 9596 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 9597 DAG.getConstant(MaskVal, dl, MVT::i32)); 9598 9599 // Replace the existing compare operand with the properly zero-extended one. 9600 SmallVector<SDValue, 4> Ops; 9601 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 9602 Ops.push_back(AtomicNode->getOperand(i)); 9603 Ops[2] = NewCmpOp; 9604 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 9605 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 9606 auto NodeTy = 9607 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 9608 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 9609 } 9610 9611 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 9612 SelectionDAG &DAG) const { 9613 SDLoc dl(Op); 9614 // Create a stack slot that is 16-byte aligned. 9615 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9616 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9617 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9618 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9619 9620 // Store the input value into Value#0 of the stack slot. 9621 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 9622 MachinePointerInfo()); 9623 // Load it out. 9624 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 9625 } 9626 9627 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 9628 SelectionDAG &DAG) const { 9629 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 9630 "Should only be called for ISD::INSERT_VECTOR_ELT"); 9631 9632 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 9633 // We have legal lowering for constant indices but not for variable ones. 9634 if (!C) 9635 return SDValue(); 9636 9637 EVT VT = Op.getValueType(); 9638 SDLoc dl(Op); 9639 SDValue V1 = Op.getOperand(0); 9640 SDValue V2 = Op.getOperand(1); 9641 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 9642 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 9643 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 9644 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 9645 unsigned InsertAtElement = C->getZExtValue(); 9646 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 9647 if (Subtarget.isLittleEndian()) { 9648 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 9649 } 9650 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 9651 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9652 } 9653 return Op; 9654 } 9655 9656 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 9657 SelectionDAG &DAG) const { 9658 SDLoc dl(Op); 9659 SDNode *N = Op.getNode(); 9660 9661 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 9662 "Unknown extract_vector_elt type"); 9663 9664 SDValue Value = N->getOperand(0); 9665 9666 // The first part of this is like the store lowering except that we don't 9667 // need to track the chain. 9668 9669 // The values are now known to be -1 (false) or 1 (true). To convert this 9670 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9671 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9672 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9673 9674 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9675 // understand how to form the extending load. 9676 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9677 9678 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9679 9680 // Now convert to an integer and store. 9681 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9682 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9683 Value); 9684 9685 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9686 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9687 MachinePointerInfo PtrInfo = 9688 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9689 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9690 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9691 9692 SDValue StoreChain = DAG.getEntryNode(); 9693 SDValue Ops[] = {StoreChain, 9694 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9695 Value, FIdx}; 9696 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9697 9698 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9699 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9700 9701 // Extract the value requested. 9702 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9703 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9704 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9705 9706 SDValue IntVal = 9707 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 9708 9709 if (!Subtarget.useCRBits()) 9710 return IntVal; 9711 9712 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 9713 } 9714 9715 /// Lowering for QPX v4i1 loads 9716 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 9717 SelectionDAG &DAG) const { 9718 SDLoc dl(Op); 9719 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 9720 SDValue LoadChain = LN->getChain(); 9721 SDValue BasePtr = LN->getBasePtr(); 9722 9723 if (Op.getValueType() == MVT::v4f64 || 9724 Op.getValueType() == MVT::v4f32) { 9725 EVT MemVT = LN->getMemoryVT(); 9726 unsigned Alignment = LN->getAlignment(); 9727 9728 // If this load is properly aligned, then it is legal. 9729 if (Alignment >= MemVT.getStoreSize()) 9730 return Op; 9731 9732 EVT ScalarVT = Op.getValueType().getScalarType(), 9733 ScalarMemVT = MemVT.getScalarType(); 9734 unsigned Stride = ScalarMemVT.getStoreSize(); 9735 9736 SDValue Vals[4], LoadChains[4]; 9737 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9738 SDValue Load; 9739 if (ScalarVT != ScalarMemVT) 9740 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 9741 BasePtr, 9742 LN->getPointerInfo().getWithOffset(Idx * Stride), 9743 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9744 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9745 else 9746 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 9747 LN->getPointerInfo().getWithOffset(Idx * Stride), 9748 MinAlign(Alignment, Idx * Stride), 9749 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9750 9751 if (Idx == 0 && LN->isIndexed()) { 9752 assert(LN->getAddressingMode() == ISD::PRE_INC && 9753 "Unknown addressing mode on vector load"); 9754 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 9755 LN->getAddressingMode()); 9756 } 9757 9758 Vals[Idx] = Load; 9759 LoadChains[Idx] = Load.getValue(1); 9760 9761 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9762 DAG.getConstant(Stride, dl, 9763 BasePtr.getValueType())); 9764 } 9765 9766 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9767 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 9768 9769 if (LN->isIndexed()) { 9770 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 9771 return DAG.getMergeValues(RetOps, dl); 9772 } 9773 9774 SDValue RetOps[] = { Value, TF }; 9775 return DAG.getMergeValues(RetOps, dl); 9776 } 9777 9778 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 9779 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 9780 9781 // To lower v4i1 from a byte array, we load the byte elements of the 9782 // vector and then reuse the BUILD_VECTOR logic. 9783 9784 SDValue VectElmts[4], VectElmtChains[4]; 9785 for (unsigned i = 0; i < 4; ++i) { 9786 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9787 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9788 9789 VectElmts[i] = DAG.getExtLoad( 9790 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 9791 LN->getPointerInfo().getWithOffset(i), MVT::i8, 9792 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9793 VectElmtChains[i] = VectElmts[i].getValue(1); 9794 } 9795 9796 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 9797 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 9798 9799 SDValue RVals[] = { Value, LoadChain }; 9800 return DAG.getMergeValues(RVals, dl); 9801 } 9802 9803 /// Lowering for QPX v4i1 stores 9804 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 9805 SelectionDAG &DAG) const { 9806 SDLoc dl(Op); 9807 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 9808 SDValue StoreChain = SN->getChain(); 9809 SDValue BasePtr = SN->getBasePtr(); 9810 SDValue Value = SN->getValue(); 9811 9812 if (Value.getValueType() == MVT::v4f64 || 9813 Value.getValueType() == MVT::v4f32) { 9814 EVT MemVT = SN->getMemoryVT(); 9815 unsigned Alignment = SN->getAlignment(); 9816 9817 // If this store is properly aligned, then it is legal. 9818 if (Alignment >= MemVT.getStoreSize()) 9819 return Op; 9820 9821 EVT ScalarVT = Value.getValueType().getScalarType(), 9822 ScalarMemVT = MemVT.getScalarType(); 9823 unsigned Stride = ScalarMemVT.getStoreSize(); 9824 9825 SDValue Stores[4]; 9826 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9827 SDValue Ex = DAG.getNode( 9828 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 9829 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 9830 SDValue Store; 9831 if (ScalarVT != ScalarMemVT) 9832 Store = 9833 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 9834 SN->getPointerInfo().getWithOffset(Idx * Stride), 9835 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9836 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9837 else 9838 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 9839 SN->getPointerInfo().getWithOffset(Idx * Stride), 9840 MinAlign(Alignment, Idx * Stride), 9841 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9842 9843 if (Idx == 0 && SN->isIndexed()) { 9844 assert(SN->getAddressingMode() == ISD::PRE_INC && 9845 "Unknown addressing mode on vector store"); 9846 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 9847 SN->getAddressingMode()); 9848 } 9849 9850 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9851 DAG.getConstant(Stride, dl, 9852 BasePtr.getValueType())); 9853 Stores[Idx] = Store; 9854 } 9855 9856 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9857 9858 if (SN->isIndexed()) { 9859 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 9860 return DAG.getMergeValues(RetOps, dl); 9861 } 9862 9863 return TF; 9864 } 9865 9866 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 9867 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 9868 9869 // The values are now known to be -1 (false) or 1 (true). To convert this 9870 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9871 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9872 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9873 9874 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9875 // understand how to form the extending load. 9876 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9877 9878 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9879 9880 // Now convert to an integer and store. 9881 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9882 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9883 Value); 9884 9885 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9886 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9887 MachinePointerInfo PtrInfo = 9888 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9889 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9890 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9891 9892 SDValue Ops[] = {StoreChain, 9893 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9894 Value, FIdx}; 9895 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9896 9897 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9898 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9899 9900 // Move data into the byte array. 9901 SDValue Loads[4], LoadChains[4]; 9902 for (unsigned i = 0; i < 4; ++i) { 9903 unsigned Offset = 4*i; 9904 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9905 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9906 9907 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 9908 PtrInfo.getWithOffset(Offset)); 9909 LoadChains[i] = Loads[i].getValue(1); 9910 } 9911 9912 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9913 9914 SDValue Stores[4]; 9915 for (unsigned i = 0; i < 4; ++i) { 9916 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9917 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9918 9919 Stores[i] = DAG.getTruncStore( 9920 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 9921 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 9922 SN->getAAInfo()); 9923 } 9924 9925 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9926 9927 return StoreChain; 9928 } 9929 9930 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 9931 SDLoc dl(Op); 9932 if (Op.getValueType() == MVT::v4i32) { 9933 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9934 9935 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 9936 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 9937 9938 SDValue RHSSwap = // = vrlw RHS, 16 9939 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 9940 9941 // Shrinkify inputs to v8i16. 9942 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 9943 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 9944 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 9945 9946 // Low parts multiplied together, generating 32-bit results (we ignore the 9947 // top parts). 9948 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 9949 LHS, RHS, DAG, dl, MVT::v4i32); 9950 9951 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 9952 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 9953 // Shift the high parts up 16 bits. 9954 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 9955 Neg16, DAG, dl); 9956 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 9957 } else if (Op.getValueType() == MVT::v8i16) { 9958 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9959 9960 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 9961 9962 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 9963 LHS, RHS, Zero, DAG, dl); 9964 } else if (Op.getValueType() == MVT::v16i8) { 9965 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9966 bool isLittleEndian = Subtarget.isLittleEndian(); 9967 9968 // Multiply the even 8-bit parts, producing 16-bit sums. 9969 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 9970 LHS, RHS, DAG, dl, MVT::v8i16); 9971 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 9972 9973 // Multiply the odd 8-bit parts, producing 16-bit sums. 9974 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 9975 LHS, RHS, DAG, dl, MVT::v8i16); 9976 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 9977 9978 // Merge the results together. Because vmuleub and vmuloub are 9979 // instructions with a big-endian bias, we must reverse the 9980 // element numbering and reverse the meaning of "odd" and "even" 9981 // when generating little endian code. 9982 int Ops[16]; 9983 for (unsigned i = 0; i != 8; ++i) { 9984 if (isLittleEndian) { 9985 Ops[i*2 ] = 2*i; 9986 Ops[i*2+1] = 2*i+16; 9987 } else { 9988 Ops[i*2 ] = 2*i+1; 9989 Ops[i*2+1] = 2*i+1+16; 9990 } 9991 } 9992 if (isLittleEndian) 9993 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 9994 else 9995 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 9996 } else { 9997 llvm_unreachable("Unknown mul to lower!"); 9998 } 9999 } 10000 10001 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { 10002 10003 assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS"); 10004 10005 EVT VT = Op.getValueType(); 10006 assert(VT.isVector() && 10007 "Only set vector abs as custom, scalar abs shouldn't reach here!"); 10008 assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 10009 VT == MVT::v16i8) && 10010 "Unexpected vector element type!"); 10011 assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) && 10012 "Current subtarget doesn't support smax v2i64!"); 10013 10014 // For vector abs, it can be lowered to: 10015 // abs x 10016 // ==> 10017 // y = -x 10018 // smax(x, y) 10019 10020 SDLoc dl(Op); 10021 SDValue X = Op.getOperand(0); 10022 SDValue Zero = DAG.getConstant(0, dl, VT); 10023 SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X); 10024 10025 // SMAX patch https://reviews.llvm.org/D47332 10026 // hasn't landed yet, so use intrinsic first here. 10027 // TODO: Should use SMAX directly once SMAX patch landed 10028 Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw; 10029 if (VT == MVT::v2i64) 10030 BifID = Intrinsic::ppc_altivec_vmaxsd; 10031 else if (VT == MVT::v8i16) 10032 BifID = Intrinsic::ppc_altivec_vmaxsh; 10033 else if (VT == MVT::v16i8) 10034 BifID = Intrinsic::ppc_altivec_vmaxsb; 10035 10036 return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT); 10037 } 10038 10039 // Custom lowering for fpext vf32 to v2f64 10040 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 10041 10042 assert(Op.getOpcode() == ISD::FP_EXTEND && 10043 "Should only be called for ISD::FP_EXTEND"); 10044 10045 // We only want to custom lower an extend from v2f32 to v2f64. 10046 if (Op.getValueType() != MVT::v2f64 || 10047 Op.getOperand(0).getValueType() != MVT::v2f32) 10048 return SDValue(); 10049 10050 SDLoc dl(Op); 10051 SDValue Op0 = Op.getOperand(0); 10052 10053 switch (Op0.getOpcode()) { 10054 default: 10055 return SDValue(); 10056 case ISD::EXTRACT_SUBVECTOR: { 10057 assert(Op0.getNumOperands() == 2 && 10058 isa<ConstantSDNode>(Op0->getOperand(1)) && 10059 "Node should have 2 operands with second one being a constant!"); 10060 10061 if (Op0.getOperand(0).getValueType() != MVT::v4f32) 10062 return SDValue(); 10063 10064 // Custom lower is only done for high or low doubleword. 10065 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue(); 10066 if (Idx % 2 != 0) 10067 return SDValue(); 10068 10069 // Since input is v4f32, at this point Idx is either 0 or 2. 10070 // Shift to get the doubleword position we want. 10071 int DWord = Idx >> 1; 10072 10073 // High and low word positions are different on little endian. 10074 if (Subtarget.isLittleEndian()) 10075 DWord ^= 0x1; 10076 10077 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, 10078 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32)); 10079 } 10080 case ISD::FADD: 10081 case ISD::FMUL: 10082 case ISD::FSUB: { 10083 SDValue NewLoad[2]; 10084 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) { 10085 // Ensure both input are loads. 10086 SDValue LdOp = Op0.getOperand(i); 10087 if (LdOp.getOpcode() != ISD::LOAD) 10088 return SDValue(); 10089 // Generate new load node. 10090 LoadSDNode *LD = cast<LoadSDNode>(LdOp); 10091 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10092 NewLoad[i] = DAG.getMemIntrinsicNode( 10093 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10094 LD->getMemoryVT(), LD->getMemOperand()); 10095 } 10096 SDValue NewOp = 10097 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0], 10098 NewLoad[1], Op0.getNode()->getFlags()); 10099 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp, 10100 DAG.getConstant(0, dl, MVT::i32)); 10101 } 10102 case ISD::LOAD: { 10103 LoadSDNode *LD = cast<LoadSDNode>(Op0); 10104 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10105 SDValue NewLd = DAG.getMemIntrinsicNode( 10106 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10107 LD->getMemoryVT(), LD->getMemOperand()); 10108 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd, 10109 DAG.getConstant(0, dl, MVT::i32)); 10110 } 10111 } 10112 llvm_unreachable("ERROR:Should return for all cases within swtich."); 10113 } 10114 10115 /// LowerOperation - Provide custom lowering hooks for some operations. 10116 /// 10117 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10118 switch (Op.getOpcode()) { 10119 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 10120 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10121 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10122 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10123 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10124 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 10125 case ISD::SETCC: return LowerSETCC(Op, DAG); 10126 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 10127 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 10128 10129 // Variable argument lowering. 10130 case ISD::VASTART: return LowerVASTART(Op, DAG); 10131 case ISD::VAARG: return LowerVAARG(Op, DAG); 10132 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 10133 10134 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 10135 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 10136 case ISD::GET_DYNAMIC_AREA_OFFSET: 10137 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 10138 10139 // Exception handling lowering. 10140 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 10141 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 10142 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 10143 10144 case ISD::LOAD: return LowerLOAD(Op, DAG); 10145 case ISD::STORE: return LowerSTORE(Op, DAG); 10146 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 10147 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 10148 case ISD::FP_TO_UINT: 10149 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 10150 case ISD::UINT_TO_FP: 10151 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 10152 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 10153 10154 // Lower 64-bit shifts. 10155 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 10156 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 10157 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 10158 10159 // Vector-related lowering. 10160 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10161 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10162 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 10163 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10164 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 10165 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10166 case ISD::MUL: return LowerMUL(Op, DAG); 10167 case ISD::ABS: return LowerABS(Op, DAG); 10168 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 10169 10170 // For counter-based loop handling. 10171 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 10172 10173 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 10174 10175 // Frame & Return address. 10176 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 10177 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 10178 10179 case ISD::INTRINSIC_VOID: 10180 return LowerINTRINSIC_VOID(Op, DAG); 10181 case ISD::SREM: 10182 case ISD::UREM: 10183 return LowerREM(Op, DAG); 10184 case ISD::BSWAP: 10185 return LowerBSWAP(Op, DAG); 10186 case ISD::ATOMIC_CMP_SWAP: 10187 return LowerATOMIC_CMP_SWAP(Op, DAG); 10188 } 10189 } 10190 10191 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 10192 SmallVectorImpl<SDValue>&Results, 10193 SelectionDAG &DAG) const { 10194 SDLoc dl(N); 10195 switch (N->getOpcode()) { 10196 default: 10197 llvm_unreachable("Do not know how to custom type legalize this operation!"); 10198 case ISD::READCYCLECOUNTER: { 10199 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 10200 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 10201 10202 Results.push_back(RTB); 10203 Results.push_back(RTB.getValue(1)); 10204 Results.push_back(RTB.getValue(2)); 10205 break; 10206 } 10207 case ISD::INTRINSIC_W_CHAIN: { 10208 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 10209 Intrinsic::loop_decrement) 10210 break; 10211 10212 assert(N->getValueType(0) == MVT::i1 && 10213 "Unexpected result type for CTR decrement intrinsic"); 10214 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 10215 N->getValueType(0)); 10216 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 10217 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 10218 N->getOperand(1)); 10219 10220 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 10221 Results.push_back(NewInt.getValue(1)); 10222 break; 10223 } 10224 case ISD::VAARG: { 10225 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 10226 return; 10227 10228 EVT VT = N->getValueType(0); 10229 10230 if (VT == MVT::i64) { 10231 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 10232 10233 Results.push_back(NewNode); 10234 Results.push_back(NewNode.getValue(1)); 10235 } 10236 return; 10237 } 10238 case ISD::FP_TO_SINT: 10239 case ISD::FP_TO_UINT: 10240 // LowerFP_TO_INT() can only handle f32 and f64. 10241 if (N->getOperand(0).getValueType() == MVT::ppcf128) 10242 return; 10243 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 10244 return; 10245 case ISD::TRUNCATE: { 10246 EVT TrgVT = N->getValueType(0); 10247 EVT OpVT = N->getOperand(0).getValueType(); 10248 if (TrgVT.isVector() && 10249 isOperationCustom(N->getOpcode(), TrgVT) && 10250 OpVT.getSizeInBits() <= 128 && 10251 isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits())) 10252 Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG)); 10253 return; 10254 } 10255 case ISD::BITCAST: 10256 // Don't handle bitcast here. 10257 return; 10258 } 10259 } 10260 10261 //===----------------------------------------------------------------------===// 10262 // Other Lowering Code 10263 //===----------------------------------------------------------------------===// 10264 10265 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 10266 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 10267 Function *Func = Intrinsic::getDeclaration(M, Id); 10268 return Builder.CreateCall(Func, {}); 10269 } 10270 10271 // The mappings for emitLeading/TrailingFence is taken from 10272 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 10273 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 10274 Instruction *Inst, 10275 AtomicOrdering Ord) const { 10276 if (Ord == AtomicOrdering::SequentiallyConsistent) 10277 return callIntrinsic(Builder, Intrinsic::ppc_sync); 10278 if (isReleaseOrStronger(Ord)) 10279 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10280 return nullptr; 10281 } 10282 10283 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 10284 Instruction *Inst, 10285 AtomicOrdering Ord) const { 10286 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 10287 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 10288 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 10289 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 10290 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 10291 return Builder.CreateCall( 10292 Intrinsic::getDeclaration( 10293 Builder.GetInsertBlock()->getParent()->getParent(), 10294 Intrinsic::ppc_cfence, {Inst->getType()}), 10295 {Inst}); 10296 // FIXME: Can use isync for rmw operation. 10297 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10298 } 10299 return nullptr; 10300 } 10301 10302 MachineBasicBlock * 10303 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 10304 unsigned AtomicSize, 10305 unsigned BinOpcode, 10306 unsigned CmpOpcode, 10307 unsigned CmpPred) const { 10308 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10309 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10310 10311 auto LoadMnemonic = PPC::LDARX; 10312 auto StoreMnemonic = PPC::STDCX; 10313 switch (AtomicSize) { 10314 default: 10315 llvm_unreachable("Unexpected size of atomic entity"); 10316 case 1: 10317 LoadMnemonic = PPC::LBARX; 10318 StoreMnemonic = PPC::STBCX; 10319 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10320 break; 10321 case 2: 10322 LoadMnemonic = PPC::LHARX; 10323 StoreMnemonic = PPC::STHCX; 10324 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10325 break; 10326 case 4: 10327 LoadMnemonic = PPC::LWARX; 10328 StoreMnemonic = PPC::STWCX; 10329 break; 10330 case 8: 10331 LoadMnemonic = PPC::LDARX; 10332 StoreMnemonic = PPC::STDCX; 10333 break; 10334 } 10335 10336 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10337 MachineFunction *F = BB->getParent(); 10338 MachineFunction::iterator It = ++BB->getIterator(); 10339 10340 Register dest = MI.getOperand(0).getReg(); 10341 Register ptrA = MI.getOperand(1).getReg(); 10342 Register ptrB = MI.getOperand(2).getReg(); 10343 Register incr = MI.getOperand(3).getReg(); 10344 DebugLoc dl = MI.getDebugLoc(); 10345 10346 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10347 MachineBasicBlock *loop2MBB = 10348 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10349 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10350 F->insert(It, loopMBB); 10351 if (CmpOpcode) 10352 F->insert(It, loop2MBB); 10353 F->insert(It, exitMBB); 10354 exitMBB->splice(exitMBB->begin(), BB, 10355 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10356 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10357 10358 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10359 Register TmpReg = (!BinOpcode) ? incr : 10360 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 10361 : &PPC::GPRCRegClass); 10362 10363 // thisMBB: 10364 // ... 10365 // fallthrough --> loopMBB 10366 BB->addSuccessor(loopMBB); 10367 10368 // loopMBB: 10369 // l[wd]arx dest, ptr 10370 // add r0, dest, incr 10371 // st[wd]cx. r0, ptr 10372 // bne- loopMBB 10373 // fallthrough --> exitMBB 10374 10375 // For max/min... 10376 // loopMBB: 10377 // l[wd]arx dest, ptr 10378 // cmpl?[wd] incr, dest 10379 // bgt exitMBB 10380 // loop2MBB: 10381 // st[wd]cx. dest, ptr 10382 // bne- loopMBB 10383 // fallthrough --> exitMBB 10384 10385 BB = loopMBB; 10386 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10387 .addReg(ptrA).addReg(ptrB); 10388 if (BinOpcode) 10389 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 10390 if (CmpOpcode) { 10391 // Signed comparisons of byte or halfword values must be sign-extended. 10392 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 10393 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10394 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 10395 ExtReg).addReg(dest); 10396 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10397 .addReg(incr).addReg(ExtReg); 10398 } else 10399 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10400 .addReg(incr).addReg(dest); 10401 10402 BuildMI(BB, dl, TII->get(PPC::BCC)) 10403 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 10404 BB->addSuccessor(loop2MBB); 10405 BB->addSuccessor(exitMBB); 10406 BB = loop2MBB; 10407 } 10408 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10409 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 10410 BuildMI(BB, dl, TII->get(PPC::BCC)) 10411 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 10412 BB->addSuccessor(loopMBB); 10413 BB->addSuccessor(exitMBB); 10414 10415 // exitMBB: 10416 // ... 10417 BB = exitMBB; 10418 return BB; 10419 } 10420 10421 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( 10422 MachineInstr &MI, MachineBasicBlock *BB, 10423 bool is8bit, // operation 10424 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const { 10425 // If we support part-word atomic mnemonics, just use them 10426 if (Subtarget.hasPartwordAtomics()) 10427 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode, 10428 CmpPred); 10429 10430 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10431 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10432 // In 64 bit mode we have to use 64 bits for addresses, even though the 10433 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 10434 // registers without caring whether they're 32 or 64, but here we're 10435 // doing actual arithmetic on the addresses. 10436 bool is64bit = Subtarget.isPPC64(); 10437 bool isLittleEndian = Subtarget.isLittleEndian(); 10438 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10439 10440 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10441 MachineFunction *F = BB->getParent(); 10442 MachineFunction::iterator It = ++BB->getIterator(); 10443 10444 Register dest = MI.getOperand(0).getReg(); 10445 Register ptrA = MI.getOperand(1).getReg(); 10446 Register ptrB = MI.getOperand(2).getReg(); 10447 Register incr = MI.getOperand(3).getReg(); 10448 DebugLoc dl = MI.getDebugLoc(); 10449 10450 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10451 MachineBasicBlock *loop2MBB = 10452 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10453 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10454 F->insert(It, loopMBB); 10455 if (CmpOpcode) 10456 F->insert(It, loop2MBB); 10457 F->insert(It, exitMBB); 10458 exitMBB->splice(exitMBB->begin(), BB, 10459 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10460 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10461 10462 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10463 const TargetRegisterClass *RC = 10464 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10465 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 10466 10467 Register PtrReg = RegInfo.createVirtualRegister(RC); 10468 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 10469 Register ShiftReg = 10470 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 10471 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC); 10472 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 10473 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 10474 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 10475 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 10476 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC); 10477 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 10478 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 10479 Register Ptr1Reg; 10480 Register TmpReg = 10481 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC); 10482 10483 // thisMBB: 10484 // ... 10485 // fallthrough --> loopMBB 10486 BB->addSuccessor(loopMBB); 10487 10488 // The 4-byte load must be aligned, while a char or short may be 10489 // anywhere in the word. Hence all this nasty bookkeeping code. 10490 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10491 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10492 // xori shift, shift1, 24 [16] 10493 // rlwinm ptr, ptr1, 0, 0, 29 10494 // slw incr2, incr, shift 10495 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10496 // slw mask, mask2, shift 10497 // loopMBB: 10498 // lwarx tmpDest, ptr 10499 // add tmp, tmpDest, incr2 10500 // andc tmp2, tmpDest, mask 10501 // and tmp3, tmp, mask 10502 // or tmp4, tmp3, tmp2 10503 // stwcx. tmp4, ptr 10504 // bne- loopMBB 10505 // fallthrough --> exitMBB 10506 // srw dest, tmpDest, shift 10507 if (ptrA != ZeroReg) { 10508 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10509 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10510 .addReg(ptrA) 10511 .addReg(ptrB); 10512 } else { 10513 Ptr1Reg = ptrB; 10514 } 10515 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 10516 // mode. 10517 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 10518 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 10519 .addImm(3) 10520 .addImm(27) 10521 .addImm(is8bit ? 28 : 27); 10522 if (!isLittleEndian) 10523 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 10524 .addReg(Shift1Reg) 10525 .addImm(is8bit ? 24 : 16); 10526 if (is64bit) 10527 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10528 .addReg(Ptr1Reg) 10529 .addImm(0) 10530 .addImm(61); 10531 else 10532 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10533 .addReg(Ptr1Reg) 10534 .addImm(0) 10535 .addImm(0) 10536 .addImm(29); 10537 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg); 10538 if (is8bit) 10539 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10540 else { 10541 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10542 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10543 .addReg(Mask3Reg) 10544 .addImm(65535); 10545 } 10546 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10547 .addReg(Mask2Reg) 10548 .addReg(ShiftReg); 10549 10550 BB = loopMBB; 10551 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10552 .addReg(ZeroReg) 10553 .addReg(PtrReg); 10554 if (BinOpcode) 10555 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 10556 .addReg(Incr2Reg) 10557 .addReg(TmpDestReg); 10558 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 10559 .addReg(TmpDestReg) 10560 .addReg(MaskReg); 10561 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg); 10562 if (CmpOpcode) { 10563 // For unsigned comparisons, we can directly compare the shifted values. 10564 // For signed comparisons we shift and sign extend. 10565 Register SReg = RegInfo.createVirtualRegister(GPRC); 10566 BuildMI(BB, dl, TII->get(PPC::AND), SReg) 10567 .addReg(TmpDestReg) 10568 .addReg(MaskReg); 10569 unsigned ValueReg = SReg; 10570 unsigned CmpReg = Incr2Reg; 10571 if (CmpOpcode == PPC::CMPW) { 10572 ValueReg = RegInfo.createVirtualRegister(GPRC); 10573 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 10574 .addReg(SReg) 10575 .addReg(ShiftReg); 10576 Register ValueSReg = RegInfo.createVirtualRegister(GPRC); 10577 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 10578 .addReg(ValueReg); 10579 ValueReg = ValueSReg; 10580 CmpReg = incr; 10581 } 10582 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10583 .addReg(CmpReg) 10584 .addReg(ValueReg); 10585 BuildMI(BB, dl, TII->get(PPC::BCC)) 10586 .addImm(CmpPred) 10587 .addReg(PPC::CR0) 10588 .addMBB(exitMBB); 10589 BB->addSuccessor(loop2MBB); 10590 BB->addSuccessor(exitMBB); 10591 BB = loop2MBB; 10592 } 10593 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg); 10594 BuildMI(BB, dl, TII->get(PPC::STWCX)) 10595 .addReg(Tmp4Reg) 10596 .addReg(ZeroReg) 10597 .addReg(PtrReg); 10598 BuildMI(BB, dl, TII->get(PPC::BCC)) 10599 .addImm(PPC::PRED_NE) 10600 .addReg(PPC::CR0) 10601 .addMBB(loopMBB); 10602 BB->addSuccessor(loopMBB); 10603 BB->addSuccessor(exitMBB); 10604 10605 // exitMBB: 10606 // ... 10607 BB = exitMBB; 10608 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 10609 .addReg(TmpDestReg) 10610 .addReg(ShiftReg); 10611 return BB; 10612 } 10613 10614 llvm::MachineBasicBlock * 10615 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 10616 MachineBasicBlock *MBB) const { 10617 DebugLoc DL = MI.getDebugLoc(); 10618 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10619 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 10620 10621 MachineFunction *MF = MBB->getParent(); 10622 MachineRegisterInfo &MRI = MF->getRegInfo(); 10623 10624 const BasicBlock *BB = MBB->getBasicBlock(); 10625 MachineFunction::iterator I = ++MBB->getIterator(); 10626 10627 Register DstReg = MI.getOperand(0).getReg(); 10628 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 10629 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 10630 Register mainDstReg = MRI.createVirtualRegister(RC); 10631 Register restoreDstReg = MRI.createVirtualRegister(RC); 10632 10633 MVT PVT = getPointerTy(MF->getDataLayout()); 10634 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10635 "Invalid Pointer Size!"); 10636 // For v = setjmp(buf), we generate 10637 // 10638 // thisMBB: 10639 // SjLjSetup mainMBB 10640 // bl mainMBB 10641 // v_restore = 1 10642 // b sinkMBB 10643 // 10644 // mainMBB: 10645 // buf[LabelOffset] = LR 10646 // v_main = 0 10647 // 10648 // sinkMBB: 10649 // v = phi(main, restore) 10650 // 10651 10652 MachineBasicBlock *thisMBB = MBB; 10653 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 10654 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 10655 MF->insert(I, mainMBB); 10656 MF->insert(I, sinkMBB); 10657 10658 MachineInstrBuilder MIB; 10659 10660 // Transfer the remainder of BB and its successor edges to sinkMBB. 10661 sinkMBB->splice(sinkMBB->begin(), MBB, 10662 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 10663 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 10664 10665 // Note that the structure of the jmp_buf used here is not compatible 10666 // with that used by libc, and is not designed to be. Specifically, it 10667 // stores only those 'reserved' registers that LLVM does not otherwise 10668 // understand how to spill. Also, by convention, by the time this 10669 // intrinsic is called, Clang has already stored the frame address in the 10670 // first slot of the buffer and stack address in the third. Following the 10671 // X86 target code, we'll store the jump address in the second slot. We also 10672 // need to save the TOC pointer (R2) to handle jumps between shared 10673 // libraries, and that will be stored in the fourth slot. The thread 10674 // identifier (R13) is not affected. 10675 10676 // thisMBB: 10677 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10678 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10679 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10680 10681 // Prepare IP either in reg. 10682 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 10683 Register LabelReg = MRI.createVirtualRegister(PtrRC); 10684 Register BufReg = MI.getOperand(1).getReg(); 10685 10686 if (Subtarget.is64BitELFABI()) { 10687 setUsesTOCBasePtr(*MBB->getParent()); 10688 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 10689 .addReg(PPC::X2) 10690 .addImm(TOCOffset) 10691 .addReg(BufReg) 10692 .cloneMemRefs(MI); 10693 } 10694 10695 // Naked functions never have a base pointer, and so we use r1. For all 10696 // other functions, this decision must be delayed until during PEI. 10697 unsigned BaseReg; 10698 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 10699 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 10700 else 10701 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 10702 10703 MIB = BuildMI(*thisMBB, MI, DL, 10704 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 10705 .addReg(BaseReg) 10706 .addImm(BPOffset) 10707 .addReg(BufReg) 10708 .cloneMemRefs(MI); 10709 10710 // Setup 10711 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 10712 MIB.addRegMask(TRI->getNoPreservedMask()); 10713 10714 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 10715 10716 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 10717 .addMBB(mainMBB); 10718 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 10719 10720 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 10721 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 10722 10723 // mainMBB: 10724 // mainDstReg = 0 10725 MIB = 10726 BuildMI(mainMBB, DL, 10727 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 10728 10729 // Store IP 10730 if (Subtarget.isPPC64()) { 10731 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 10732 .addReg(LabelReg) 10733 .addImm(LabelOffset) 10734 .addReg(BufReg); 10735 } else { 10736 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 10737 .addReg(LabelReg) 10738 .addImm(LabelOffset) 10739 .addReg(BufReg); 10740 } 10741 MIB.cloneMemRefs(MI); 10742 10743 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 10744 mainMBB->addSuccessor(sinkMBB); 10745 10746 // sinkMBB: 10747 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 10748 TII->get(PPC::PHI), DstReg) 10749 .addReg(mainDstReg).addMBB(mainMBB) 10750 .addReg(restoreDstReg).addMBB(thisMBB); 10751 10752 MI.eraseFromParent(); 10753 return sinkMBB; 10754 } 10755 10756 MachineBasicBlock * 10757 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 10758 MachineBasicBlock *MBB) const { 10759 DebugLoc DL = MI.getDebugLoc(); 10760 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10761 10762 MachineFunction *MF = MBB->getParent(); 10763 MachineRegisterInfo &MRI = MF->getRegInfo(); 10764 10765 MVT PVT = getPointerTy(MF->getDataLayout()); 10766 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10767 "Invalid Pointer Size!"); 10768 10769 const TargetRegisterClass *RC = 10770 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10771 Register Tmp = MRI.createVirtualRegister(RC); 10772 // Since FP is only updated here but NOT referenced, it's treated as GPR. 10773 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 10774 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 10775 unsigned BP = 10776 (PVT == MVT::i64) 10777 ? PPC::X30 10778 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 10779 : PPC::R30); 10780 10781 MachineInstrBuilder MIB; 10782 10783 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10784 const int64_t SPOffset = 2 * PVT.getStoreSize(); 10785 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10786 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10787 10788 Register BufReg = MI.getOperand(0).getReg(); 10789 10790 // Reload FP (the jumped-to function may not have had a 10791 // frame pointer, and if so, then its r31 will be restored 10792 // as necessary). 10793 if (PVT == MVT::i64) { 10794 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 10795 .addImm(0) 10796 .addReg(BufReg); 10797 } else { 10798 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 10799 .addImm(0) 10800 .addReg(BufReg); 10801 } 10802 MIB.cloneMemRefs(MI); 10803 10804 // Reload IP 10805 if (PVT == MVT::i64) { 10806 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 10807 .addImm(LabelOffset) 10808 .addReg(BufReg); 10809 } else { 10810 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 10811 .addImm(LabelOffset) 10812 .addReg(BufReg); 10813 } 10814 MIB.cloneMemRefs(MI); 10815 10816 // Reload SP 10817 if (PVT == MVT::i64) { 10818 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 10819 .addImm(SPOffset) 10820 .addReg(BufReg); 10821 } else { 10822 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 10823 .addImm(SPOffset) 10824 .addReg(BufReg); 10825 } 10826 MIB.cloneMemRefs(MI); 10827 10828 // Reload BP 10829 if (PVT == MVT::i64) { 10830 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 10831 .addImm(BPOffset) 10832 .addReg(BufReg); 10833 } else { 10834 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 10835 .addImm(BPOffset) 10836 .addReg(BufReg); 10837 } 10838 MIB.cloneMemRefs(MI); 10839 10840 // Reload TOC 10841 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 10842 setUsesTOCBasePtr(*MBB->getParent()); 10843 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 10844 .addImm(TOCOffset) 10845 .addReg(BufReg) 10846 .cloneMemRefs(MI); 10847 } 10848 10849 // Jump 10850 BuildMI(*MBB, MI, DL, 10851 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 10852 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 10853 10854 MI.eraseFromParent(); 10855 return MBB; 10856 } 10857 10858 MachineBasicBlock * 10859 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 10860 MachineBasicBlock *BB) const { 10861 if (MI.getOpcode() == TargetOpcode::STACKMAP || 10862 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10863 if (Subtarget.is64BitELFABI() && 10864 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10865 // Call lowering should have added an r2 operand to indicate a dependence 10866 // on the TOC base pointer value. It can't however, because there is no 10867 // way to mark the dependence as implicit there, and so the stackmap code 10868 // will confuse it with a regular operand. Instead, add the dependence 10869 // here. 10870 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 10871 } 10872 10873 return emitPatchPoint(MI, BB); 10874 } 10875 10876 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 10877 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 10878 return emitEHSjLjSetJmp(MI, BB); 10879 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 10880 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 10881 return emitEHSjLjLongJmp(MI, BB); 10882 } 10883 10884 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10885 10886 // To "insert" these instructions we actually have to insert their 10887 // control-flow patterns. 10888 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10889 MachineFunction::iterator It = ++BB->getIterator(); 10890 10891 MachineFunction *F = BB->getParent(); 10892 10893 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10894 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 || 10895 MI.getOpcode() == PPC::SELECT_I8) { 10896 SmallVector<MachineOperand, 2> Cond; 10897 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10898 MI.getOpcode() == PPC::SELECT_CC_I8) 10899 Cond.push_back(MI.getOperand(4)); 10900 else 10901 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 10902 Cond.push_back(MI.getOperand(1)); 10903 10904 DebugLoc dl = MI.getDebugLoc(); 10905 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 10906 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 10907 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10908 MI.getOpcode() == PPC::SELECT_CC_I8 || 10909 MI.getOpcode() == PPC::SELECT_CC_F4 || 10910 MI.getOpcode() == PPC::SELECT_CC_F8 || 10911 MI.getOpcode() == PPC::SELECT_CC_F16 || 10912 MI.getOpcode() == PPC::SELECT_CC_QFRC || 10913 MI.getOpcode() == PPC::SELECT_CC_QSRC || 10914 MI.getOpcode() == PPC::SELECT_CC_QBRC || 10915 MI.getOpcode() == PPC::SELECT_CC_VRRC || 10916 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 10917 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 10918 MI.getOpcode() == PPC::SELECT_CC_VSRC || 10919 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 10920 MI.getOpcode() == PPC::SELECT_CC_SPE || 10921 MI.getOpcode() == PPC::SELECT_I4 || 10922 MI.getOpcode() == PPC::SELECT_I8 || 10923 MI.getOpcode() == PPC::SELECT_F4 || 10924 MI.getOpcode() == PPC::SELECT_F8 || 10925 MI.getOpcode() == PPC::SELECT_F16 || 10926 MI.getOpcode() == PPC::SELECT_QFRC || 10927 MI.getOpcode() == PPC::SELECT_QSRC || 10928 MI.getOpcode() == PPC::SELECT_QBRC || 10929 MI.getOpcode() == PPC::SELECT_SPE || 10930 MI.getOpcode() == PPC::SELECT_SPE4 || 10931 MI.getOpcode() == PPC::SELECT_VRRC || 10932 MI.getOpcode() == PPC::SELECT_VSFRC || 10933 MI.getOpcode() == PPC::SELECT_VSSRC || 10934 MI.getOpcode() == PPC::SELECT_VSRC) { 10935 // The incoming instruction knows the destination vreg to set, the 10936 // condition code register to branch on, the true/false values to 10937 // select between, and a branch opcode to use. 10938 10939 // thisMBB: 10940 // ... 10941 // TrueVal = ... 10942 // cmpTY ccX, r1, r2 10943 // bCC copy1MBB 10944 // fallthrough --> copy0MBB 10945 MachineBasicBlock *thisMBB = BB; 10946 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 10947 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10948 DebugLoc dl = MI.getDebugLoc(); 10949 F->insert(It, copy0MBB); 10950 F->insert(It, sinkMBB); 10951 10952 // Transfer the remainder of BB and its successor edges to sinkMBB. 10953 sinkMBB->splice(sinkMBB->begin(), BB, 10954 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10955 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10956 10957 // Next, add the true and fallthrough blocks as its successors. 10958 BB->addSuccessor(copy0MBB); 10959 BB->addSuccessor(sinkMBB); 10960 10961 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 10962 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 10963 MI.getOpcode() == PPC::SELECT_F16 || 10964 MI.getOpcode() == PPC::SELECT_SPE4 || 10965 MI.getOpcode() == PPC::SELECT_SPE || 10966 MI.getOpcode() == PPC::SELECT_QFRC || 10967 MI.getOpcode() == PPC::SELECT_QSRC || 10968 MI.getOpcode() == PPC::SELECT_QBRC || 10969 MI.getOpcode() == PPC::SELECT_VRRC || 10970 MI.getOpcode() == PPC::SELECT_VSFRC || 10971 MI.getOpcode() == PPC::SELECT_VSSRC || 10972 MI.getOpcode() == PPC::SELECT_VSRC) { 10973 BuildMI(BB, dl, TII->get(PPC::BC)) 10974 .addReg(MI.getOperand(1).getReg()) 10975 .addMBB(sinkMBB); 10976 } else { 10977 unsigned SelectPred = MI.getOperand(4).getImm(); 10978 BuildMI(BB, dl, TII->get(PPC::BCC)) 10979 .addImm(SelectPred) 10980 .addReg(MI.getOperand(1).getReg()) 10981 .addMBB(sinkMBB); 10982 } 10983 10984 // copy0MBB: 10985 // %FalseValue = ... 10986 // # fallthrough to sinkMBB 10987 BB = copy0MBB; 10988 10989 // Update machine-CFG edges 10990 BB->addSuccessor(sinkMBB); 10991 10992 // sinkMBB: 10993 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 10994 // ... 10995 BB = sinkMBB; 10996 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 10997 .addReg(MI.getOperand(3).getReg()) 10998 .addMBB(copy0MBB) 10999 .addReg(MI.getOperand(2).getReg()) 11000 .addMBB(thisMBB); 11001 } else if (MI.getOpcode() == PPC::ReadTB) { 11002 // To read the 64-bit time-base register on a 32-bit target, we read the 11003 // two halves. Should the counter have wrapped while it was being read, we 11004 // need to try again. 11005 // ... 11006 // readLoop: 11007 // mfspr Rx,TBU # load from TBU 11008 // mfspr Ry,TB # load from TB 11009 // mfspr Rz,TBU # load from TBU 11010 // cmpw crX,Rx,Rz # check if 'old'='new' 11011 // bne readLoop # branch if they're not equal 11012 // ... 11013 11014 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 11015 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11016 DebugLoc dl = MI.getDebugLoc(); 11017 F->insert(It, readMBB); 11018 F->insert(It, sinkMBB); 11019 11020 // Transfer the remainder of BB and its successor edges to sinkMBB. 11021 sinkMBB->splice(sinkMBB->begin(), BB, 11022 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11023 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11024 11025 BB->addSuccessor(readMBB); 11026 BB = readMBB; 11027 11028 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11029 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 11030 Register LoReg = MI.getOperand(0).getReg(); 11031 Register HiReg = MI.getOperand(1).getReg(); 11032 11033 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 11034 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 11035 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 11036 11037 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11038 11039 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 11040 .addReg(HiReg) 11041 .addReg(ReadAgainReg); 11042 BuildMI(BB, dl, TII->get(PPC::BCC)) 11043 .addImm(PPC::PRED_NE) 11044 .addReg(CmpReg) 11045 .addMBB(readMBB); 11046 11047 BB->addSuccessor(readMBB); 11048 BB->addSuccessor(sinkMBB); 11049 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 11050 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 11051 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 11052 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 11053 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 11054 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 11055 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 11056 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 11057 11058 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 11059 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 11060 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 11061 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 11062 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 11063 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 11064 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 11065 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 11066 11067 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 11068 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 11069 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 11070 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 11071 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 11072 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 11073 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 11074 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 11075 11076 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 11077 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 11078 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 11079 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 11080 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 11081 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 11082 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 11083 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 11084 11085 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 11086 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 11087 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 11088 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 11089 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 11090 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 11091 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 11092 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 11093 11094 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 11095 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 11096 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 11097 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 11098 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 11099 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 11100 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 11101 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 11102 11103 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 11104 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 11105 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 11106 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 11107 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 11108 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 11109 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 11110 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 11111 11112 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 11113 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 11114 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 11115 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 11116 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 11117 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 11118 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 11119 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 11120 11121 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 11122 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 11123 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 11124 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 11125 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 11126 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 11127 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 11128 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 11129 11130 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 11131 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 11132 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 11133 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 11134 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 11135 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 11136 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 11137 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 11138 11139 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 11140 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 11141 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 11142 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 11143 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 11144 BB = EmitAtomicBinary(MI, BB, 4, 0); 11145 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 11146 BB = EmitAtomicBinary(MI, BB, 8, 0); 11147 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 11148 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 11149 (Subtarget.hasPartwordAtomics() && 11150 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 11151 (Subtarget.hasPartwordAtomics() && 11152 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 11153 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 11154 11155 auto LoadMnemonic = PPC::LDARX; 11156 auto StoreMnemonic = PPC::STDCX; 11157 switch (MI.getOpcode()) { 11158 default: 11159 llvm_unreachable("Compare and swap of unknown size"); 11160 case PPC::ATOMIC_CMP_SWAP_I8: 11161 LoadMnemonic = PPC::LBARX; 11162 StoreMnemonic = PPC::STBCX; 11163 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11164 break; 11165 case PPC::ATOMIC_CMP_SWAP_I16: 11166 LoadMnemonic = PPC::LHARX; 11167 StoreMnemonic = PPC::STHCX; 11168 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11169 break; 11170 case PPC::ATOMIC_CMP_SWAP_I32: 11171 LoadMnemonic = PPC::LWARX; 11172 StoreMnemonic = PPC::STWCX; 11173 break; 11174 case PPC::ATOMIC_CMP_SWAP_I64: 11175 LoadMnemonic = PPC::LDARX; 11176 StoreMnemonic = PPC::STDCX; 11177 break; 11178 } 11179 Register dest = MI.getOperand(0).getReg(); 11180 Register ptrA = MI.getOperand(1).getReg(); 11181 Register ptrB = MI.getOperand(2).getReg(); 11182 Register oldval = MI.getOperand(3).getReg(); 11183 Register newval = MI.getOperand(4).getReg(); 11184 DebugLoc dl = MI.getDebugLoc(); 11185 11186 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11187 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11188 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11189 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11190 F->insert(It, loop1MBB); 11191 F->insert(It, loop2MBB); 11192 F->insert(It, midMBB); 11193 F->insert(It, exitMBB); 11194 exitMBB->splice(exitMBB->begin(), BB, 11195 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11196 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11197 11198 // thisMBB: 11199 // ... 11200 // fallthrough --> loopMBB 11201 BB->addSuccessor(loop1MBB); 11202 11203 // loop1MBB: 11204 // l[bhwd]arx dest, ptr 11205 // cmp[wd] dest, oldval 11206 // bne- midMBB 11207 // loop2MBB: 11208 // st[bhwd]cx. newval, ptr 11209 // bne- loopMBB 11210 // b exitBB 11211 // midMBB: 11212 // st[bhwd]cx. dest, ptr 11213 // exitBB: 11214 BB = loop1MBB; 11215 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB); 11216 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 11217 .addReg(oldval) 11218 .addReg(dest); 11219 BuildMI(BB, dl, TII->get(PPC::BCC)) 11220 .addImm(PPC::PRED_NE) 11221 .addReg(PPC::CR0) 11222 .addMBB(midMBB); 11223 BB->addSuccessor(loop2MBB); 11224 BB->addSuccessor(midMBB); 11225 11226 BB = loop2MBB; 11227 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11228 .addReg(newval) 11229 .addReg(ptrA) 11230 .addReg(ptrB); 11231 BuildMI(BB, dl, TII->get(PPC::BCC)) 11232 .addImm(PPC::PRED_NE) 11233 .addReg(PPC::CR0) 11234 .addMBB(loop1MBB); 11235 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11236 BB->addSuccessor(loop1MBB); 11237 BB->addSuccessor(exitMBB); 11238 11239 BB = midMBB; 11240 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11241 .addReg(dest) 11242 .addReg(ptrA) 11243 .addReg(ptrB); 11244 BB->addSuccessor(exitMBB); 11245 11246 // exitMBB: 11247 // ... 11248 BB = exitMBB; 11249 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 11250 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 11251 // We must use 64-bit registers for addresses when targeting 64-bit, 11252 // since we're actually doing arithmetic on them. Other registers 11253 // can be 32-bit. 11254 bool is64bit = Subtarget.isPPC64(); 11255 bool isLittleEndian = Subtarget.isLittleEndian(); 11256 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 11257 11258 Register dest = MI.getOperand(0).getReg(); 11259 Register ptrA = MI.getOperand(1).getReg(); 11260 Register ptrB = MI.getOperand(2).getReg(); 11261 Register oldval = MI.getOperand(3).getReg(); 11262 Register newval = MI.getOperand(4).getReg(); 11263 DebugLoc dl = MI.getDebugLoc(); 11264 11265 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11266 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11267 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11268 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11269 F->insert(It, loop1MBB); 11270 F->insert(It, loop2MBB); 11271 F->insert(It, midMBB); 11272 F->insert(It, exitMBB); 11273 exitMBB->splice(exitMBB->begin(), BB, 11274 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11275 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11276 11277 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11278 const TargetRegisterClass *RC = 11279 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11280 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 11281 11282 Register PtrReg = RegInfo.createVirtualRegister(RC); 11283 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 11284 Register ShiftReg = 11285 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 11286 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC); 11287 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC); 11288 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC); 11289 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC); 11290 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 11291 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 11292 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 11293 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 11294 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 11295 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 11296 Register Ptr1Reg; 11297 Register TmpReg = RegInfo.createVirtualRegister(GPRC); 11298 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 11299 // thisMBB: 11300 // ... 11301 // fallthrough --> loopMBB 11302 BB->addSuccessor(loop1MBB); 11303 11304 // The 4-byte load must be aligned, while a char or short may be 11305 // anywhere in the word. Hence all this nasty bookkeeping code. 11306 // add ptr1, ptrA, ptrB [copy if ptrA==0] 11307 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 11308 // xori shift, shift1, 24 [16] 11309 // rlwinm ptr, ptr1, 0, 0, 29 11310 // slw newval2, newval, shift 11311 // slw oldval2, oldval,shift 11312 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 11313 // slw mask, mask2, shift 11314 // and newval3, newval2, mask 11315 // and oldval3, oldval2, mask 11316 // loop1MBB: 11317 // lwarx tmpDest, ptr 11318 // and tmp, tmpDest, mask 11319 // cmpw tmp, oldval3 11320 // bne- midMBB 11321 // loop2MBB: 11322 // andc tmp2, tmpDest, mask 11323 // or tmp4, tmp2, newval3 11324 // stwcx. tmp4, ptr 11325 // bne- loop1MBB 11326 // b exitBB 11327 // midMBB: 11328 // stwcx. tmpDest, ptr 11329 // exitBB: 11330 // srw dest, tmpDest, shift 11331 if (ptrA != ZeroReg) { 11332 Ptr1Reg = RegInfo.createVirtualRegister(RC); 11333 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 11334 .addReg(ptrA) 11335 .addReg(ptrB); 11336 } else { 11337 Ptr1Reg = ptrB; 11338 } 11339 11340 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 11341 // mode. 11342 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 11343 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 11344 .addImm(3) 11345 .addImm(27) 11346 .addImm(is8bit ? 28 : 27); 11347 if (!isLittleEndian) 11348 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 11349 .addReg(Shift1Reg) 11350 .addImm(is8bit ? 24 : 16); 11351 if (is64bit) 11352 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 11353 .addReg(Ptr1Reg) 11354 .addImm(0) 11355 .addImm(61); 11356 else 11357 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 11358 .addReg(Ptr1Reg) 11359 .addImm(0) 11360 .addImm(0) 11361 .addImm(29); 11362 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 11363 .addReg(newval) 11364 .addReg(ShiftReg); 11365 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 11366 .addReg(oldval) 11367 .addReg(ShiftReg); 11368 if (is8bit) 11369 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 11370 else { 11371 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 11372 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 11373 .addReg(Mask3Reg) 11374 .addImm(65535); 11375 } 11376 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 11377 .addReg(Mask2Reg) 11378 .addReg(ShiftReg); 11379 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 11380 .addReg(NewVal2Reg) 11381 .addReg(MaskReg); 11382 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 11383 .addReg(OldVal2Reg) 11384 .addReg(MaskReg); 11385 11386 BB = loop1MBB; 11387 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 11388 .addReg(ZeroReg) 11389 .addReg(PtrReg); 11390 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg) 11391 .addReg(TmpDestReg) 11392 .addReg(MaskReg); 11393 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 11394 .addReg(TmpReg) 11395 .addReg(OldVal3Reg); 11396 BuildMI(BB, dl, TII->get(PPC::BCC)) 11397 .addImm(PPC::PRED_NE) 11398 .addReg(PPC::CR0) 11399 .addMBB(midMBB); 11400 BB->addSuccessor(loop2MBB); 11401 BB->addSuccessor(midMBB); 11402 11403 BB = loop2MBB; 11404 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 11405 .addReg(TmpDestReg) 11406 .addReg(MaskReg); 11407 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg) 11408 .addReg(Tmp2Reg) 11409 .addReg(NewVal3Reg); 11410 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11411 .addReg(Tmp4Reg) 11412 .addReg(ZeroReg) 11413 .addReg(PtrReg); 11414 BuildMI(BB, dl, TII->get(PPC::BCC)) 11415 .addImm(PPC::PRED_NE) 11416 .addReg(PPC::CR0) 11417 .addMBB(loop1MBB); 11418 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11419 BB->addSuccessor(loop1MBB); 11420 BB->addSuccessor(exitMBB); 11421 11422 BB = midMBB; 11423 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11424 .addReg(TmpDestReg) 11425 .addReg(ZeroReg) 11426 .addReg(PtrReg); 11427 BB->addSuccessor(exitMBB); 11428 11429 // exitMBB: 11430 // ... 11431 BB = exitMBB; 11432 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 11433 .addReg(TmpReg) 11434 .addReg(ShiftReg); 11435 } else if (MI.getOpcode() == PPC::FADDrtz) { 11436 // This pseudo performs an FADD with rounding mode temporarily forced 11437 // to round-to-zero. We emit this via custom inserter since the FPSCR 11438 // is not modeled at the SelectionDAG level. 11439 Register Dest = MI.getOperand(0).getReg(); 11440 Register Src1 = MI.getOperand(1).getReg(); 11441 Register Src2 = MI.getOperand(2).getReg(); 11442 DebugLoc dl = MI.getDebugLoc(); 11443 11444 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11445 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 11446 11447 // Save FPSCR value. 11448 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 11449 11450 // Set rounding mode to round-to-zero. 11451 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 11452 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 11453 11454 // Perform addition. 11455 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 11456 11457 // Restore FPSCR value. 11458 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 11459 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 11460 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 11461 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 11462 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 11463 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 11464 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 11465 ? PPC::ANDIo8 11466 : PPC::ANDIo; 11467 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 11468 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 11469 11470 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11471 Register Dest = RegInfo.createVirtualRegister( 11472 Opcode == PPC::ANDIo ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); 11473 11474 DebugLoc dl = MI.getDebugLoc(); 11475 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 11476 .addReg(MI.getOperand(1).getReg()) 11477 .addImm(1); 11478 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 11479 MI.getOperand(0).getReg()) 11480 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 11481 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 11482 DebugLoc Dl = MI.getDebugLoc(); 11483 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11484 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11485 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 11486 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11487 MI.getOperand(0).getReg()) 11488 .addReg(CRReg); 11489 } else if (MI.getOpcode() == PPC::TBEGIN_RET) { 11490 DebugLoc Dl = MI.getDebugLoc(); 11491 unsigned Imm = MI.getOperand(1).getImm(); 11492 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm); 11493 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11494 MI.getOperand(0).getReg()) 11495 .addReg(PPC::CR0EQ); 11496 } else if (MI.getOpcode() == PPC::SETRNDi) { 11497 DebugLoc dl = MI.getDebugLoc(); 11498 Register OldFPSCRReg = MI.getOperand(0).getReg(); 11499 11500 // Save FPSCR value. 11501 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 11502 11503 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has 11504 // the following settings: 11505 // 00 Round to nearest 11506 // 01 Round to 0 11507 // 10 Round to +inf 11508 // 11 Round to -inf 11509 11510 // When the operand is immediate, using the two least significant bits of 11511 // the immediate to set the bits 62:63 of FPSCR. 11512 unsigned Mode = MI.getOperand(1).getImm(); 11513 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0)) 11514 .addImm(31); 11515 11516 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0)) 11517 .addImm(30); 11518 } else if (MI.getOpcode() == PPC::SETRND) { 11519 DebugLoc dl = MI.getDebugLoc(); 11520 11521 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg 11522 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg. 11523 // If the target doesn't have DirectMove, we should use stack to do the 11524 // conversion, because the target doesn't have the instructions like mtvsrd 11525 // or mfvsrd to do this conversion directly. 11526 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) { 11527 if (Subtarget.hasDirectMove()) { 11528 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg) 11529 .addReg(SrcReg); 11530 } else { 11531 // Use stack to do the register copy. 11532 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD; 11533 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11534 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg); 11535 if (RC == &PPC::F8RCRegClass) { 11536 // Copy register from F8RCRegClass to G8RCRegclass. 11537 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) && 11538 "Unsupported RegClass."); 11539 11540 StoreOp = PPC::STFD; 11541 LoadOp = PPC::LD; 11542 } else { 11543 // Copy register from G8RCRegClass to F8RCRegclass. 11544 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && 11545 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && 11546 "Unsupported RegClass."); 11547 } 11548 11549 MachineFrameInfo &MFI = F->getFrameInfo(); 11550 int FrameIdx = MFI.CreateStackObject(8, 8, false); 11551 11552 MachineMemOperand *MMOStore = F->getMachineMemOperand( 11553 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 11554 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), 11555 MFI.getObjectAlignment(FrameIdx)); 11556 11557 // Store the SrcReg into the stack. 11558 BuildMI(*BB, MI, dl, TII->get(StoreOp)) 11559 .addReg(SrcReg) 11560 .addImm(0) 11561 .addFrameIndex(FrameIdx) 11562 .addMemOperand(MMOStore); 11563 11564 MachineMemOperand *MMOLoad = F->getMachineMemOperand( 11565 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 11566 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), 11567 MFI.getObjectAlignment(FrameIdx)); 11568 11569 // Load from the stack where SrcReg is stored, and save to DestReg, 11570 // so we have done the RegClass conversion from RegClass::SrcReg to 11571 // RegClass::DestReg. 11572 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg) 11573 .addImm(0) 11574 .addFrameIndex(FrameIdx) 11575 .addMemOperand(MMOLoad); 11576 } 11577 }; 11578 11579 Register OldFPSCRReg = MI.getOperand(0).getReg(); 11580 11581 // Save FPSCR value. 11582 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 11583 11584 // When the operand is gprc register, use two least significant bits of the 11585 // register and mtfsf instruction to set the bits 62:63 of FPSCR. 11586 // 11587 // copy OldFPSCRTmpReg, OldFPSCRReg 11588 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1) 11589 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62 11590 // copy NewFPSCRReg, NewFPSCRTmpReg 11591 // mtfsf 255, NewFPSCRReg 11592 MachineOperand SrcOp = MI.getOperand(1); 11593 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11594 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11595 11596 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg); 11597 11598 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11599 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11600 11601 // The first operand of INSERT_SUBREG should be a register which has 11602 // subregisters, we only care about its RegClass, so we should use an 11603 // IMPLICIT_DEF register. 11604 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg); 11605 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg) 11606 .addReg(ImDefReg) 11607 .add(SrcOp) 11608 .addImm(1); 11609 11610 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11611 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg) 11612 .addReg(OldFPSCRTmpReg) 11613 .addReg(ExtSrcReg) 11614 .addImm(0) 11615 .addImm(62); 11616 11617 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 11618 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg); 11619 11620 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63 11621 // bits of FPSCR. 11622 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)) 11623 .addImm(255) 11624 .addReg(NewFPSCRReg) 11625 .addImm(0) 11626 .addImm(0); 11627 } else { 11628 llvm_unreachable("Unexpected instr type to insert"); 11629 } 11630 11631 MI.eraseFromParent(); // The pseudo instruction is gone now. 11632 return BB; 11633 } 11634 11635 //===----------------------------------------------------------------------===// 11636 // Target Optimization Hooks 11637 //===----------------------------------------------------------------------===// 11638 11639 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 11640 // For the estimates, convergence is quadratic, so we essentially double the 11641 // number of digits correct after every iteration. For both FRE and FRSQRTE, 11642 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 11643 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 11644 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 11645 if (VT.getScalarType() == MVT::f64) 11646 RefinementSteps++; 11647 return RefinementSteps; 11648 } 11649 11650 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 11651 int Enabled, int &RefinementSteps, 11652 bool &UseOneConstNR, 11653 bool Reciprocal) const { 11654 EVT VT = Operand.getValueType(); 11655 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 11656 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 11657 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 11658 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 11659 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 11660 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 11661 if (RefinementSteps == ReciprocalEstimate::Unspecified) 11662 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 11663 11664 // The Newton-Raphson computation with a single constant does not provide 11665 // enough accuracy on some CPUs. 11666 UseOneConstNR = !Subtarget.needsTwoConstNR(); 11667 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 11668 } 11669 return SDValue(); 11670 } 11671 11672 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 11673 int Enabled, 11674 int &RefinementSteps) const { 11675 EVT VT = Operand.getValueType(); 11676 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 11677 (VT == MVT::f64 && Subtarget.hasFRE()) || 11678 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 11679 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 11680 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 11681 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 11682 if (RefinementSteps == ReciprocalEstimate::Unspecified) 11683 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 11684 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 11685 } 11686 return SDValue(); 11687 } 11688 11689 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 11690 // Note: This functionality is used only when unsafe-fp-math is enabled, and 11691 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 11692 // enabled for division), this functionality is redundant with the default 11693 // combiner logic (once the division -> reciprocal/multiply transformation 11694 // has taken place). As a result, this matters more for older cores than for 11695 // newer ones. 11696 11697 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 11698 // reciprocal if there are two or more FDIVs (for embedded cores with only 11699 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 11700 switch (Subtarget.getDarwinDirective()) { 11701 default: 11702 return 3; 11703 case PPC::DIR_440: 11704 case PPC::DIR_A2: 11705 case PPC::DIR_E500: 11706 case PPC::DIR_E500mc: 11707 case PPC::DIR_E5500: 11708 return 2; 11709 } 11710 } 11711 11712 // isConsecutiveLSLoc needs to work even if all adds have not yet been 11713 // collapsed, and so we need to look through chains of them. 11714 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 11715 int64_t& Offset, SelectionDAG &DAG) { 11716 if (DAG.isBaseWithConstantOffset(Loc)) { 11717 Base = Loc.getOperand(0); 11718 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 11719 11720 // The base might itself be a base plus an offset, and if so, accumulate 11721 // that as well. 11722 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 11723 } 11724 } 11725 11726 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 11727 unsigned Bytes, int Dist, 11728 SelectionDAG &DAG) { 11729 if (VT.getSizeInBits() / 8 != Bytes) 11730 return false; 11731 11732 SDValue BaseLoc = Base->getBasePtr(); 11733 if (Loc.getOpcode() == ISD::FrameIndex) { 11734 if (BaseLoc.getOpcode() != ISD::FrameIndex) 11735 return false; 11736 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 11737 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 11738 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 11739 int FS = MFI.getObjectSize(FI); 11740 int BFS = MFI.getObjectSize(BFI); 11741 if (FS != BFS || FS != (int)Bytes) return false; 11742 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 11743 } 11744 11745 SDValue Base1 = Loc, Base2 = BaseLoc; 11746 int64_t Offset1 = 0, Offset2 = 0; 11747 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 11748 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 11749 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 11750 return true; 11751 11752 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11753 const GlobalValue *GV1 = nullptr; 11754 const GlobalValue *GV2 = nullptr; 11755 Offset1 = 0; 11756 Offset2 = 0; 11757 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 11758 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 11759 if (isGA1 && isGA2 && GV1 == GV2) 11760 return Offset1 == (Offset2 + Dist*Bytes); 11761 return false; 11762 } 11763 11764 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 11765 // not enforce equality of the chain operands. 11766 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 11767 unsigned Bytes, int Dist, 11768 SelectionDAG &DAG) { 11769 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 11770 EVT VT = LS->getMemoryVT(); 11771 SDValue Loc = LS->getBasePtr(); 11772 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 11773 } 11774 11775 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 11776 EVT VT; 11777 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11778 default: return false; 11779 case Intrinsic::ppc_qpx_qvlfd: 11780 case Intrinsic::ppc_qpx_qvlfda: 11781 VT = MVT::v4f64; 11782 break; 11783 case Intrinsic::ppc_qpx_qvlfs: 11784 case Intrinsic::ppc_qpx_qvlfsa: 11785 VT = MVT::v4f32; 11786 break; 11787 case Intrinsic::ppc_qpx_qvlfcd: 11788 case Intrinsic::ppc_qpx_qvlfcda: 11789 VT = MVT::v2f64; 11790 break; 11791 case Intrinsic::ppc_qpx_qvlfcs: 11792 case Intrinsic::ppc_qpx_qvlfcsa: 11793 VT = MVT::v2f32; 11794 break; 11795 case Intrinsic::ppc_qpx_qvlfiwa: 11796 case Intrinsic::ppc_qpx_qvlfiwz: 11797 case Intrinsic::ppc_altivec_lvx: 11798 case Intrinsic::ppc_altivec_lvxl: 11799 case Intrinsic::ppc_vsx_lxvw4x: 11800 case Intrinsic::ppc_vsx_lxvw4x_be: 11801 VT = MVT::v4i32; 11802 break; 11803 case Intrinsic::ppc_vsx_lxvd2x: 11804 case Intrinsic::ppc_vsx_lxvd2x_be: 11805 VT = MVT::v2f64; 11806 break; 11807 case Intrinsic::ppc_altivec_lvebx: 11808 VT = MVT::i8; 11809 break; 11810 case Intrinsic::ppc_altivec_lvehx: 11811 VT = MVT::i16; 11812 break; 11813 case Intrinsic::ppc_altivec_lvewx: 11814 VT = MVT::i32; 11815 break; 11816 } 11817 11818 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 11819 } 11820 11821 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 11822 EVT VT; 11823 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11824 default: return false; 11825 case Intrinsic::ppc_qpx_qvstfd: 11826 case Intrinsic::ppc_qpx_qvstfda: 11827 VT = MVT::v4f64; 11828 break; 11829 case Intrinsic::ppc_qpx_qvstfs: 11830 case Intrinsic::ppc_qpx_qvstfsa: 11831 VT = MVT::v4f32; 11832 break; 11833 case Intrinsic::ppc_qpx_qvstfcd: 11834 case Intrinsic::ppc_qpx_qvstfcda: 11835 VT = MVT::v2f64; 11836 break; 11837 case Intrinsic::ppc_qpx_qvstfcs: 11838 case Intrinsic::ppc_qpx_qvstfcsa: 11839 VT = MVT::v2f32; 11840 break; 11841 case Intrinsic::ppc_qpx_qvstfiw: 11842 case Intrinsic::ppc_qpx_qvstfiwa: 11843 case Intrinsic::ppc_altivec_stvx: 11844 case Intrinsic::ppc_altivec_stvxl: 11845 case Intrinsic::ppc_vsx_stxvw4x: 11846 VT = MVT::v4i32; 11847 break; 11848 case Intrinsic::ppc_vsx_stxvd2x: 11849 VT = MVT::v2f64; 11850 break; 11851 case Intrinsic::ppc_vsx_stxvw4x_be: 11852 VT = MVT::v4i32; 11853 break; 11854 case Intrinsic::ppc_vsx_stxvd2x_be: 11855 VT = MVT::v2f64; 11856 break; 11857 case Intrinsic::ppc_altivec_stvebx: 11858 VT = MVT::i8; 11859 break; 11860 case Intrinsic::ppc_altivec_stvehx: 11861 VT = MVT::i16; 11862 break; 11863 case Intrinsic::ppc_altivec_stvewx: 11864 VT = MVT::i32; 11865 break; 11866 } 11867 11868 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 11869 } 11870 11871 return false; 11872 } 11873 11874 // Return true is there is a nearyby consecutive load to the one provided 11875 // (regardless of alignment). We search up and down the chain, looking though 11876 // token factors and other loads (but nothing else). As a result, a true result 11877 // indicates that it is safe to create a new consecutive load adjacent to the 11878 // load provided. 11879 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 11880 SDValue Chain = LD->getChain(); 11881 EVT VT = LD->getMemoryVT(); 11882 11883 SmallSet<SDNode *, 16> LoadRoots; 11884 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 11885 SmallSet<SDNode *, 16> Visited; 11886 11887 // First, search up the chain, branching to follow all token-factor operands. 11888 // If we find a consecutive load, then we're done, otherwise, record all 11889 // nodes just above the top-level loads and token factors. 11890 while (!Queue.empty()) { 11891 SDNode *ChainNext = Queue.pop_back_val(); 11892 if (!Visited.insert(ChainNext).second) 11893 continue; 11894 11895 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 11896 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 11897 return true; 11898 11899 if (!Visited.count(ChainLD->getChain().getNode())) 11900 Queue.push_back(ChainLD->getChain().getNode()); 11901 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 11902 for (const SDUse &O : ChainNext->ops()) 11903 if (!Visited.count(O.getNode())) 11904 Queue.push_back(O.getNode()); 11905 } else 11906 LoadRoots.insert(ChainNext); 11907 } 11908 11909 // Second, search down the chain, starting from the top-level nodes recorded 11910 // in the first phase. These top-level nodes are the nodes just above all 11911 // loads and token factors. Starting with their uses, recursively look though 11912 // all loads (just the chain uses) and token factors to find a consecutive 11913 // load. 11914 Visited.clear(); 11915 Queue.clear(); 11916 11917 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 11918 IE = LoadRoots.end(); I != IE; ++I) { 11919 Queue.push_back(*I); 11920 11921 while (!Queue.empty()) { 11922 SDNode *LoadRoot = Queue.pop_back_val(); 11923 if (!Visited.insert(LoadRoot).second) 11924 continue; 11925 11926 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 11927 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 11928 return true; 11929 11930 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 11931 UE = LoadRoot->use_end(); UI != UE; ++UI) 11932 if (((isa<MemSDNode>(*UI) && 11933 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 11934 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 11935 Queue.push_back(*UI); 11936 } 11937 } 11938 11939 return false; 11940 } 11941 11942 /// This function is called when we have proved that a SETCC node can be replaced 11943 /// by subtraction (and other supporting instructions) so that the result of 11944 /// comparison is kept in a GPR instead of CR. This function is purely for 11945 /// codegen purposes and has some flags to guide the codegen process. 11946 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 11947 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 11948 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 11949 11950 // Zero extend the operands to the largest legal integer. Originally, they 11951 // must be of a strictly smaller size. 11952 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 11953 DAG.getConstant(Size, DL, MVT::i32)); 11954 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 11955 DAG.getConstant(Size, DL, MVT::i32)); 11956 11957 // Swap if needed. Depends on the condition code. 11958 if (Swap) 11959 std::swap(Op0, Op1); 11960 11961 // Subtract extended integers. 11962 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 11963 11964 // Move the sign bit to the least significant position and zero out the rest. 11965 // Now the least significant bit carries the result of original comparison. 11966 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 11967 DAG.getConstant(Size - 1, DL, MVT::i32)); 11968 auto Final = Shifted; 11969 11970 // Complement the result if needed. Based on the condition code. 11971 if (Complement) 11972 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 11973 DAG.getConstant(1, DL, MVT::i64)); 11974 11975 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 11976 } 11977 11978 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 11979 DAGCombinerInfo &DCI) const { 11980 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 11981 11982 SelectionDAG &DAG = DCI.DAG; 11983 SDLoc DL(N); 11984 11985 // Size of integers being compared has a critical role in the following 11986 // analysis, so we prefer to do this when all types are legal. 11987 if (!DCI.isAfterLegalizeDAG()) 11988 return SDValue(); 11989 11990 // If all users of SETCC extend its value to a legal integer type 11991 // then we replace SETCC with a subtraction 11992 for (SDNode::use_iterator UI = N->use_begin(), 11993 UE = N->use_end(); UI != UE; ++UI) { 11994 if (UI->getOpcode() != ISD::ZERO_EXTEND) 11995 return SDValue(); 11996 } 11997 11998 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 11999 auto OpSize = N->getOperand(0).getValueSizeInBits(); 12000 12001 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 12002 12003 if (OpSize < Size) { 12004 switch (CC) { 12005 default: break; 12006 case ISD::SETULT: 12007 return generateEquivalentSub(N, Size, false, false, DL, DAG); 12008 case ISD::SETULE: 12009 return generateEquivalentSub(N, Size, true, true, DL, DAG); 12010 case ISD::SETUGT: 12011 return generateEquivalentSub(N, Size, false, true, DL, DAG); 12012 case ISD::SETUGE: 12013 return generateEquivalentSub(N, Size, true, false, DL, DAG); 12014 } 12015 } 12016 12017 return SDValue(); 12018 } 12019 12020 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 12021 DAGCombinerInfo &DCI) const { 12022 SelectionDAG &DAG = DCI.DAG; 12023 SDLoc dl(N); 12024 12025 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 12026 // If we're tracking CR bits, we need to be careful that we don't have: 12027 // trunc(binary-ops(zext(x), zext(y))) 12028 // or 12029 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 12030 // such that we're unnecessarily moving things into GPRs when it would be 12031 // better to keep them in CR bits. 12032 12033 // Note that trunc here can be an actual i1 trunc, or can be the effective 12034 // truncation that comes from a setcc or select_cc. 12035 if (N->getOpcode() == ISD::TRUNCATE && 12036 N->getValueType(0) != MVT::i1) 12037 return SDValue(); 12038 12039 if (N->getOperand(0).getValueType() != MVT::i32 && 12040 N->getOperand(0).getValueType() != MVT::i64) 12041 return SDValue(); 12042 12043 if (N->getOpcode() == ISD::SETCC || 12044 N->getOpcode() == ISD::SELECT_CC) { 12045 // If we're looking at a comparison, then we need to make sure that the 12046 // high bits (all except for the first) don't matter the result. 12047 ISD::CondCode CC = 12048 cast<CondCodeSDNode>(N->getOperand( 12049 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 12050 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 12051 12052 if (ISD::isSignedIntSetCC(CC)) { 12053 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 12054 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 12055 return SDValue(); 12056 } else if (ISD::isUnsignedIntSetCC(CC)) { 12057 if (!DAG.MaskedValueIsZero(N->getOperand(0), 12058 APInt::getHighBitsSet(OpBits, OpBits-1)) || 12059 !DAG.MaskedValueIsZero(N->getOperand(1), 12060 APInt::getHighBitsSet(OpBits, OpBits-1))) 12061 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 12062 : SDValue()); 12063 } else { 12064 // This is neither a signed nor an unsigned comparison, just make sure 12065 // that the high bits are equal. 12066 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0)); 12067 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1)); 12068 12069 // We don't really care about what is known about the first bit (if 12070 // anything), so clear it in all masks prior to comparing them. 12071 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 12072 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 12073 12074 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 12075 return SDValue(); 12076 } 12077 } 12078 12079 // We now know that the higher-order bits are irrelevant, we just need to 12080 // make sure that all of the intermediate operations are bit operations, and 12081 // all inputs are extensions. 12082 if (N->getOperand(0).getOpcode() != ISD::AND && 12083 N->getOperand(0).getOpcode() != ISD::OR && 12084 N->getOperand(0).getOpcode() != ISD::XOR && 12085 N->getOperand(0).getOpcode() != ISD::SELECT && 12086 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 12087 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 12088 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 12089 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 12090 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 12091 return SDValue(); 12092 12093 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 12094 N->getOperand(1).getOpcode() != ISD::AND && 12095 N->getOperand(1).getOpcode() != ISD::OR && 12096 N->getOperand(1).getOpcode() != ISD::XOR && 12097 N->getOperand(1).getOpcode() != ISD::SELECT && 12098 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 12099 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 12100 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 12101 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 12102 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 12103 return SDValue(); 12104 12105 SmallVector<SDValue, 4> Inputs; 12106 SmallVector<SDValue, 8> BinOps, PromOps; 12107 SmallPtrSet<SDNode *, 16> Visited; 12108 12109 for (unsigned i = 0; i < 2; ++i) { 12110 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12111 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12112 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12113 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12114 isa<ConstantSDNode>(N->getOperand(i))) 12115 Inputs.push_back(N->getOperand(i)); 12116 else 12117 BinOps.push_back(N->getOperand(i)); 12118 12119 if (N->getOpcode() == ISD::TRUNCATE) 12120 break; 12121 } 12122 12123 // Visit all inputs, collect all binary operations (and, or, xor and 12124 // select) that are all fed by extensions. 12125 while (!BinOps.empty()) { 12126 SDValue BinOp = BinOps.back(); 12127 BinOps.pop_back(); 12128 12129 if (!Visited.insert(BinOp.getNode()).second) 12130 continue; 12131 12132 PromOps.push_back(BinOp); 12133 12134 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12135 // The condition of the select is not promoted. 12136 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12137 continue; 12138 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12139 continue; 12140 12141 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12142 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12143 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12144 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12145 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12146 Inputs.push_back(BinOp.getOperand(i)); 12147 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12148 BinOp.getOperand(i).getOpcode() == ISD::OR || 12149 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12150 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12151 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 12152 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12153 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12154 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12155 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 12156 BinOps.push_back(BinOp.getOperand(i)); 12157 } else { 12158 // We have an input that is not an extension or another binary 12159 // operation; we'll abort this transformation. 12160 return SDValue(); 12161 } 12162 } 12163 } 12164 12165 // Make sure that this is a self-contained cluster of operations (which 12166 // is not quite the same thing as saying that everything has only one 12167 // use). 12168 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12169 if (isa<ConstantSDNode>(Inputs[i])) 12170 continue; 12171 12172 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12173 UE = Inputs[i].getNode()->use_end(); 12174 UI != UE; ++UI) { 12175 SDNode *User = *UI; 12176 if (User != N && !Visited.count(User)) 12177 return SDValue(); 12178 12179 // Make sure that we're not going to promote the non-output-value 12180 // operand(s) or SELECT or SELECT_CC. 12181 // FIXME: Although we could sometimes handle this, and it does occur in 12182 // practice that one of the condition inputs to the select is also one of 12183 // the outputs, we currently can't deal with this. 12184 if (User->getOpcode() == ISD::SELECT) { 12185 if (User->getOperand(0) == Inputs[i]) 12186 return SDValue(); 12187 } else if (User->getOpcode() == ISD::SELECT_CC) { 12188 if (User->getOperand(0) == Inputs[i] || 12189 User->getOperand(1) == Inputs[i]) 12190 return SDValue(); 12191 } 12192 } 12193 } 12194 12195 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12196 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12197 UE = PromOps[i].getNode()->use_end(); 12198 UI != UE; ++UI) { 12199 SDNode *User = *UI; 12200 if (User != N && !Visited.count(User)) 12201 return SDValue(); 12202 12203 // Make sure that we're not going to promote the non-output-value 12204 // operand(s) or SELECT or SELECT_CC. 12205 // FIXME: Although we could sometimes handle this, and it does occur in 12206 // practice that one of the condition inputs to the select is also one of 12207 // the outputs, we currently can't deal with this. 12208 if (User->getOpcode() == ISD::SELECT) { 12209 if (User->getOperand(0) == PromOps[i]) 12210 return SDValue(); 12211 } else if (User->getOpcode() == ISD::SELECT_CC) { 12212 if (User->getOperand(0) == PromOps[i] || 12213 User->getOperand(1) == PromOps[i]) 12214 return SDValue(); 12215 } 12216 } 12217 } 12218 12219 // Replace all inputs with the extension operand. 12220 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12221 // Constants may have users outside the cluster of to-be-promoted nodes, 12222 // and so we need to replace those as we do the promotions. 12223 if (isa<ConstantSDNode>(Inputs[i])) 12224 continue; 12225 else 12226 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 12227 } 12228 12229 std::list<HandleSDNode> PromOpHandles; 12230 for (auto &PromOp : PromOps) 12231 PromOpHandles.emplace_back(PromOp); 12232 12233 // Replace all operations (these are all the same, but have a different 12234 // (i1) return type). DAG.getNode will validate that the types of 12235 // a binary operator match, so go through the list in reverse so that 12236 // we've likely promoted both operands first. Any intermediate truncations or 12237 // extensions disappear. 12238 while (!PromOpHandles.empty()) { 12239 SDValue PromOp = PromOpHandles.back().getValue(); 12240 PromOpHandles.pop_back(); 12241 12242 if (PromOp.getOpcode() == ISD::TRUNCATE || 12243 PromOp.getOpcode() == ISD::SIGN_EXTEND || 12244 PromOp.getOpcode() == ISD::ZERO_EXTEND || 12245 PromOp.getOpcode() == ISD::ANY_EXTEND) { 12246 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 12247 PromOp.getOperand(0).getValueType() != MVT::i1) { 12248 // The operand is not yet ready (see comment below). 12249 PromOpHandles.emplace_front(PromOp); 12250 continue; 12251 } 12252 12253 SDValue RepValue = PromOp.getOperand(0); 12254 if (isa<ConstantSDNode>(RepValue)) 12255 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 12256 12257 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 12258 continue; 12259 } 12260 12261 unsigned C; 12262 switch (PromOp.getOpcode()) { 12263 default: C = 0; break; 12264 case ISD::SELECT: C = 1; break; 12265 case ISD::SELECT_CC: C = 2; break; 12266 } 12267 12268 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12269 PromOp.getOperand(C).getValueType() != MVT::i1) || 12270 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12271 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 12272 // The to-be-promoted operands of this node have not yet been 12273 // promoted (this should be rare because we're going through the 12274 // list backward, but if one of the operands has several users in 12275 // this cluster of to-be-promoted nodes, it is possible). 12276 PromOpHandles.emplace_front(PromOp); 12277 continue; 12278 } 12279 12280 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12281 PromOp.getNode()->op_end()); 12282 12283 // If there are any constant inputs, make sure they're replaced now. 12284 for (unsigned i = 0; i < 2; ++i) 12285 if (isa<ConstantSDNode>(Ops[C+i])) 12286 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 12287 12288 DAG.ReplaceAllUsesOfValueWith(PromOp, 12289 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 12290 } 12291 12292 // Now we're left with the initial truncation itself. 12293 if (N->getOpcode() == ISD::TRUNCATE) 12294 return N->getOperand(0); 12295 12296 // Otherwise, this is a comparison. The operands to be compared have just 12297 // changed type (to i1), but everything else is the same. 12298 return SDValue(N, 0); 12299 } 12300 12301 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 12302 DAGCombinerInfo &DCI) const { 12303 SelectionDAG &DAG = DCI.DAG; 12304 SDLoc dl(N); 12305 12306 // If we're tracking CR bits, we need to be careful that we don't have: 12307 // zext(binary-ops(trunc(x), trunc(y))) 12308 // or 12309 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 12310 // such that we're unnecessarily moving things into CR bits that can more 12311 // efficiently stay in GPRs. Note that if we're not certain that the high 12312 // bits are set as required by the final extension, we still may need to do 12313 // some masking to get the proper behavior. 12314 12315 // This same functionality is important on PPC64 when dealing with 12316 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 12317 // the return values of functions. Because it is so similar, it is handled 12318 // here as well. 12319 12320 if (N->getValueType(0) != MVT::i32 && 12321 N->getValueType(0) != MVT::i64) 12322 return SDValue(); 12323 12324 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 12325 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 12326 return SDValue(); 12327 12328 if (N->getOperand(0).getOpcode() != ISD::AND && 12329 N->getOperand(0).getOpcode() != ISD::OR && 12330 N->getOperand(0).getOpcode() != ISD::XOR && 12331 N->getOperand(0).getOpcode() != ISD::SELECT && 12332 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 12333 return SDValue(); 12334 12335 SmallVector<SDValue, 4> Inputs; 12336 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 12337 SmallPtrSet<SDNode *, 16> Visited; 12338 12339 // Visit all inputs, collect all binary operations (and, or, xor and 12340 // select) that are all fed by truncations. 12341 while (!BinOps.empty()) { 12342 SDValue BinOp = BinOps.back(); 12343 BinOps.pop_back(); 12344 12345 if (!Visited.insert(BinOp.getNode()).second) 12346 continue; 12347 12348 PromOps.push_back(BinOp); 12349 12350 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12351 // The condition of the select is not promoted. 12352 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12353 continue; 12354 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12355 continue; 12356 12357 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12358 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12359 Inputs.push_back(BinOp.getOperand(i)); 12360 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12361 BinOp.getOperand(i).getOpcode() == ISD::OR || 12362 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12363 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12364 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 12365 BinOps.push_back(BinOp.getOperand(i)); 12366 } else { 12367 // We have an input that is not a truncation or another binary 12368 // operation; we'll abort this transformation. 12369 return SDValue(); 12370 } 12371 } 12372 } 12373 12374 // The operands of a select that must be truncated when the select is 12375 // promoted because the operand is actually part of the to-be-promoted set. 12376 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 12377 12378 // Make sure that this is a self-contained cluster of operations (which 12379 // is not quite the same thing as saying that everything has only one 12380 // use). 12381 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12382 if (isa<ConstantSDNode>(Inputs[i])) 12383 continue; 12384 12385 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12386 UE = Inputs[i].getNode()->use_end(); 12387 UI != UE; ++UI) { 12388 SDNode *User = *UI; 12389 if (User != N && !Visited.count(User)) 12390 return SDValue(); 12391 12392 // If we're going to promote the non-output-value operand(s) or SELECT or 12393 // SELECT_CC, record them for truncation. 12394 if (User->getOpcode() == ISD::SELECT) { 12395 if (User->getOperand(0) == Inputs[i]) 12396 SelectTruncOp[0].insert(std::make_pair(User, 12397 User->getOperand(0).getValueType())); 12398 } else if (User->getOpcode() == ISD::SELECT_CC) { 12399 if (User->getOperand(0) == Inputs[i]) 12400 SelectTruncOp[0].insert(std::make_pair(User, 12401 User->getOperand(0).getValueType())); 12402 if (User->getOperand(1) == Inputs[i]) 12403 SelectTruncOp[1].insert(std::make_pair(User, 12404 User->getOperand(1).getValueType())); 12405 } 12406 } 12407 } 12408 12409 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12410 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12411 UE = PromOps[i].getNode()->use_end(); 12412 UI != UE; ++UI) { 12413 SDNode *User = *UI; 12414 if (User != N && !Visited.count(User)) 12415 return SDValue(); 12416 12417 // If we're going to promote the non-output-value operand(s) or SELECT or 12418 // SELECT_CC, record them for truncation. 12419 if (User->getOpcode() == ISD::SELECT) { 12420 if (User->getOperand(0) == PromOps[i]) 12421 SelectTruncOp[0].insert(std::make_pair(User, 12422 User->getOperand(0).getValueType())); 12423 } else if (User->getOpcode() == ISD::SELECT_CC) { 12424 if (User->getOperand(0) == PromOps[i]) 12425 SelectTruncOp[0].insert(std::make_pair(User, 12426 User->getOperand(0).getValueType())); 12427 if (User->getOperand(1) == PromOps[i]) 12428 SelectTruncOp[1].insert(std::make_pair(User, 12429 User->getOperand(1).getValueType())); 12430 } 12431 } 12432 } 12433 12434 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 12435 bool ReallyNeedsExt = false; 12436 if (N->getOpcode() != ISD::ANY_EXTEND) { 12437 // If all of the inputs are not already sign/zero extended, then 12438 // we'll still need to do that at the end. 12439 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12440 if (isa<ConstantSDNode>(Inputs[i])) 12441 continue; 12442 12443 unsigned OpBits = 12444 Inputs[i].getOperand(0).getValueSizeInBits(); 12445 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 12446 12447 if ((N->getOpcode() == ISD::ZERO_EXTEND && 12448 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 12449 APInt::getHighBitsSet(OpBits, 12450 OpBits-PromBits))) || 12451 (N->getOpcode() == ISD::SIGN_EXTEND && 12452 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 12453 (OpBits-(PromBits-1)))) { 12454 ReallyNeedsExt = true; 12455 break; 12456 } 12457 } 12458 } 12459 12460 // Replace all inputs, either with the truncation operand, or a 12461 // truncation or extension to the final output type. 12462 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12463 // Constant inputs need to be replaced with the to-be-promoted nodes that 12464 // use them because they might have users outside of the cluster of 12465 // promoted nodes. 12466 if (isa<ConstantSDNode>(Inputs[i])) 12467 continue; 12468 12469 SDValue InSrc = Inputs[i].getOperand(0); 12470 if (Inputs[i].getValueType() == N->getValueType(0)) 12471 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 12472 else if (N->getOpcode() == ISD::SIGN_EXTEND) 12473 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12474 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 12475 else if (N->getOpcode() == ISD::ZERO_EXTEND) 12476 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12477 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 12478 else 12479 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12480 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 12481 } 12482 12483 std::list<HandleSDNode> PromOpHandles; 12484 for (auto &PromOp : PromOps) 12485 PromOpHandles.emplace_back(PromOp); 12486 12487 // Replace all operations (these are all the same, but have a different 12488 // (promoted) return type). DAG.getNode will validate that the types of 12489 // a binary operator match, so go through the list in reverse so that 12490 // we've likely promoted both operands first. 12491 while (!PromOpHandles.empty()) { 12492 SDValue PromOp = PromOpHandles.back().getValue(); 12493 PromOpHandles.pop_back(); 12494 12495 unsigned C; 12496 switch (PromOp.getOpcode()) { 12497 default: C = 0; break; 12498 case ISD::SELECT: C = 1; break; 12499 case ISD::SELECT_CC: C = 2; break; 12500 } 12501 12502 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12503 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 12504 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12505 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 12506 // The to-be-promoted operands of this node have not yet been 12507 // promoted (this should be rare because we're going through the 12508 // list backward, but if one of the operands has several users in 12509 // this cluster of to-be-promoted nodes, it is possible). 12510 PromOpHandles.emplace_front(PromOp); 12511 continue; 12512 } 12513 12514 // For SELECT and SELECT_CC nodes, we do a similar check for any 12515 // to-be-promoted comparison inputs. 12516 if (PromOp.getOpcode() == ISD::SELECT || 12517 PromOp.getOpcode() == ISD::SELECT_CC) { 12518 if ((SelectTruncOp[0].count(PromOp.getNode()) && 12519 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 12520 (SelectTruncOp[1].count(PromOp.getNode()) && 12521 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 12522 PromOpHandles.emplace_front(PromOp); 12523 continue; 12524 } 12525 } 12526 12527 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12528 PromOp.getNode()->op_end()); 12529 12530 // If this node has constant inputs, then they'll need to be promoted here. 12531 for (unsigned i = 0; i < 2; ++i) { 12532 if (!isa<ConstantSDNode>(Ops[C+i])) 12533 continue; 12534 if (Ops[C+i].getValueType() == N->getValueType(0)) 12535 continue; 12536 12537 if (N->getOpcode() == ISD::SIGN_EXTEND) 12538 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12539 else if (N->getOpcode() == ISD::ZERO_EXTEND) 12540 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12541 else 12542 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12543 } 12544 12545 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 12546 // truncate them again to the original value type. 12547 if (PromOp.getOpcode() == ISD::SELECT || 12548 PromOp.getOpcode() == ISD::SELECT_CC) { 12549 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 12550 if (SI0 != SelectTruncOp[0].end()) 12551 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 12552 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 12553 if (SI1 != SelectTruncOp[1].end()) 12554 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 12555 } 12556 12557 DAG.ReplaceAllUsesOfValueWith(PromOp, 12558 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 12559 } 12560 12561 // Now we're left with the initial extension itself. 12562 if (!ReallyNeedsExt) 12563 return N->getOperand(0); 12564 12565 // To zero extend, just mask off everything except for the first bit (in the 12566 // i1 case). 12567 if (N->getOpcode() == ISD::ZERO_EXTEND) 12568 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 12569 DAG.getConstant(APInt::getLowBitsSet( 12570 N->getValueSizeInBits(0), PromBits), 12571 dl, N->getValueType(0))); 12572 12573 assert(N->getOpcode() == ISD::SIGN_EXTEND && 12574 "Invalid extension type"); 12575 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 12576 SDValue ShiftCst = 12577 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 12578 return DAG.getNode( 12579 ISD::SRA, dl, N->getValueType(0), 12580 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 12581 ShiftCst); 12582 } 12583 12584 SDValue PPCTargetLowering::combineSetCC(SDNode *N, 12585 DAGCombinerInfo &DCI) const { 12586 assert(N->getOpcode() == ISD::SETCC && 12587 "Should be called with a SETCC node"); 12588 12589 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 12590 if (CC == ISD::SETNE || CC == ISD::SETEQ) { 12591 SDValue LHS = N->getOperand(0); 12592 SDValue RHS = N->getOperand(1); 12593 12594 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS. 12595 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && 12596 LHS.hasOneUse()) 12597 std::swap(LHS, RHS); 12598 12599 // x == 0-y --> x+y == 0 12600 // x != 0-y --> x+y != 0 12601 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && 12602 RHS.hasOneUse()) { 12603 SDLoc DL(N); 12604 SelectionDAG &DAG = DCI.DAG; 12605 EVT VT = N->getValueType(0); 12606 EVT OpVT = LHS.getValueType(); 12607 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1)); 12608 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); 12609 } 12610 } 12611 12612 return DAGCombineTruncBoolExt(N, DCI); 12613 } 12614 12615 // Is this an extending load from an f32 to an f64? 12616 static bool isFPExtLoad(SDValue Op) { 12617 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 12618 return LD->getExtensionType() == ISD::EXTLOAD && 12619 Op.getValueType() == MVT::f64; 12620 return false; 12621 } 12622 12623 /// Reduces the number of fp-to-int conversion when building a vector. 12624 /// 12625 /// If this vector is built out of floating to integer conversions, 12626 /// transform it to a vector built out of floating point values followed by a 12627 /// single floating to integer conversion of the vector. 12628 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 12629 /// becomes (fptosi (build_vector ($A, $B, ...))) 12630 SDValue PPCTargetLowering:: 12631 combineElementTruncationToVectorTruncation(SDNode *N, 12632 DAGCombinerInfo &DCI) const { 12633 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12634 "Should be called with a BUILD_VECTOR node"); 12635 12636 SelectionDAG &DAG = DCI.DAG; 12637 SDLoc dl(N); 12638 12639 SDValue FirstInput = N->getOperand(0); 12640 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 12641 "The input operand must be an fp-to-int conversion."); 12642 12643 // This combine happens after legalization so the fp_to_[su]i nodes are 12644 // already converted to PPCSISD nodes. 12645 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 12646 if (FirstConversion == PPCISD::FCTIDZ || 12647 FirstConversion == PPCISD::FCTIDUZ || 12648 FirstConversion == PPCISD::FCTIWZ || 12649 FirstConversion == PPCISD::FCTIWUZ) { 12650 bool IsSplat = true; 12651 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 12652 FirstConversion == PPCISD::FCTIWUZ; 12653 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 12654 SmallVector<SDValue, 4> Ops; 12655 EVT TargetVT = N->getValueType(0); 12656 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 12657 SDValue NextOp = N->getOperand(i); 12658 if (NextOp.getOpcode() != PPCISD::MFVSR) 12659 return SDValue(); 12660 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 12661 if (NextConversion != FirstConversion) 12662 return SDValue(); 12663 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 12664 // This is not valid if the input was originally double precision. It is 12665 // also not profitable to do unless this is an extending load in which 12666 // case doing this combine will allow us to combine consecutive loads. 12667 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 12668 return SDValue(); 12669 if (N->getOperand(i) != FirstInput) 12670 IsSplat = false; 12671 } 12672 12673 // If this is a splat, we leave it as-is since there will be only a single 12674 // fp-to-int conversion followed by a splat of the integer. This is better 12675 // for 32-bit and smaller ints and neutral for 64-bit ints. 12676 if (IsSplat) 12677 return SDValue(); 12678 12679 // Now that we know we have the right type of node, get its operands 12680 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 12681 SDValue In = N->getOperand(i).getOperand(0); 12682 if (Is32Bit) { 12683 // For 32-bit values, we need to add an FP_ROUND node (if we made it 12684 // here, we know that all inputs are extending loads so this is safe). 12685 if (In.isUndef()) 12686 Ops.push_back(DAG.getUNDEF(SrcVT)); 12687 else { 12688 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 12689 MVT::f32, In.getOperand(0), 12690 DAG.getIntPtrConstant(1, dl)); 12691 Ops.push_back(Trunc); 12692 } 12693 } else 12694 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 12695 } 12696 12697 unsigned Opcode; 12698 if (FirstConversion == PPCISD::FCTIDZ || 12699 FirstConversion == PPCISD::FCTIWZ) 12700 Opcode = ISD::FP_TO_SINT; 12701 else 12702 Opcode = ISD::FP_TO_UINT; 12703 12704 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 12705 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 12706 return DAG.getNode(Opcode, dl, TargetVT, BV); 12707 } 12708 return SDValue(); 12709 } 12710 12711 /// Reduce the number of loads when building a vector. 12712 /// 12713 /// Building a vector out of multiple loads can be converted to a load 12714 /// of the vector type if the loads are consecutive. If the loads are 12715 /// consecutive but in descending order, a shuffle is added at the end 12716 /// to reorder the vector. 12717 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 12718 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12719 "Should be called with a BUILD_VECTOR node"); 12720 12721 SDLoc dl(N); 12722 12723 // Return early for non byte-sized type, as they can't be consecutive. 12724 if (!N->getValueType(0).getVectorElementType().isByteSized()) 12725 return SDValue(); 12726 12727 bool InputsAreConsecutiveLoads = true; 12728 bool InputsAreReverseConsecutive = true; 12729 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize(); 12730 SDValue FirstInput = N->getOperand(0); 12731 bool IsRoundOfExtLoad = false; 12732 12733 if (FirstInput.getOpcode() == ISD::FP_ROUND && 12734 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 12735 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 12736 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 12737 } 12738 // Not a build vector of (possibly fp_rounded) loads. 12739 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) || 12740 N->getNumOperands() == 1) 12741 return SDValue(); 12742 12743 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 12744 // If any inputs are fp_round(extload), they all must be. 12745 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 12746 return SDValue(); 12747 12748 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 12749 N->getOperand(i); 12750 if (NextInput.getOpcode() != ISD::LOAD) 12751 return SDValue(); 12752 12753 SDValue PreviousInput = 12754 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 12755 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 12756 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 12757 12758 // If any inputs are fp_round(extload), they all must be. 12759 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 12760 return SDValue(); 12761 12762 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 12763 InputsAreConsecutiveLoads = false; 12764 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 12765 InputsAreReverseConsecutive = false; 12766 12767 // Exit early if the loads are neither consecutive nor reverse consecutive. 12768 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 12769 return SDValue(); 12770 } 12771 12772 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 12773 "The loads cannot be both consecutive and reverse consecutive."); 12774 12775 SDValue FirstLoadOp = 12776 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 12777 SDValue LastLoadOp = 12778 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 12779 N->getOperand(N->getNumOperands()-1); 12780 12781 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 12782 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 12783 if (InputsAreConsecutiveLoads) { 12784 assert(LD1 && "Input needs to be a LoadSDNode."); 12785 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 12786 LD1->getBasePtr(), LD1->getPointerInfo(), 12787 LD1->getAlignment()); 12788 } 12789 if (InputsAreReverseConsecutive) { 12790 assert(LDL && "Input needs to be a LoadSDNode."); 12791 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 12792 LDL->getBasePtr(), LDL->getPointerInfo(), 12793 LDL->getAlignment()); 12794 SmallVector<int, 16> Ops; 12795 for (int i = N->getNumOperands() - 1; i >= 0; i--) 12796 Ops.push_back(i); 12797 12798 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 12799 DAG.getUNDEF(N->getValueType(0)), Ops); 12800 } 12801 return SDValue(); 12802 } 12803 12804 // This function adds the required vector_shuffle needed to get 12805 // the elements of the vector extract in the correct position 12806 // as specified by the CorrectElems encoding. 12807 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 12808 SDValue Input, uint64_t Elems, 12809 uint64_t CorrectElems) { 12810 SDLoc dl(N); 12811 12812 unsigned NumElems = Input.getValueType().getVectorNumElements(); 12813 SmallVector<int, 16> ShuffleMask(NumElems, -1); 12814 12815 // Knowing the element indices being extracted from the original 12816 // vector and the order in which they're being inserted, just put 12817 // them at element indices required for the instruction. 12818 for (unsigned i = 0; i < N->getNumOperands(); i++) { 12819 if (DAG.getDataLayout().isLittleEndian()) 12820 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 12821 else 12822 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 12823 CorrectElems = CorrectElems >> 8; 12824 Elems = Elems >> 8; 12825 } 12826 12827 SDValue Shuffle = 12828 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 12829 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 12830 12831 EVT Ty = N->getValueType(0); 12832 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 12833 return BV; 12834 } 12835 12836 // Look for build vector patterns where input operands come from sign 12837 // extended vector_extract elements of specific indices. If the correct indices 12838 // aren't used, add a vector shuffle to fix up the indices and create a new 12839 // PPCISD:SExtVElems node which selects the vector sign extend instructions 12840 // during instruction selection. 12841 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 12842 // This array encodes the indices that the vector sign extend instructions 12843 // extract from when extending from one type to another for both BE and LE. 12844 // The right nibble of each byte corresponds to the LE incides. 12845 // and the left nibble of each byte corresponds to the BE incides. 12846 // For example: 0x3074B8FC byte->word 12847 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 12848 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 12849 // For example: 0x000070F8 byte->double word 12850 // For LE: the allowed indices are: 0x0,0x8 12851 // For BE: the allowed indices are: 0x7,0xF 12852 uint64_t TargetElems[] = { 12853 0x3074B8FC, // b->w 12854 0x000070F8, // b->d 12855 0x10325476, // h->w 12856 0x00003074, // h->d 12857 0x00001032, // w->d 12858 }; 12859 12860 uint64_t Elems = 0; 12861 int Index; 12862 SDValue Input; 12863 12864 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 12865 if (!Op) 12866 return false; 12867 if (Op.getOpcode() != ISD::SIGN_EXTEND && 12868 Op.getOpcode() != ISD::SIGN_EXTEND_INREG) 12869 return false; 12870 12871 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value 12872 // of the right width. 12873 SDValue Extract = Op.getOperand(0); 12874 if (Extract.getOpcode() == ISD::ANY_EXTEND) 12875 Extract = Extract.getOperand(0); 12876 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12877 return false; 12878 12879 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 12880 if (!ExtOp) 12881 return false; 12882 12883 Index = ExtOp->getZExtValue(); 12884 if (Input && Input != Extract.getOperand(0)) 12885 return false; 12886 12887 if (!Input) 12888 Input = Extract.getOperand(0); 12889 12890 Elems = Elems << 8; 12891 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 12892 Elems |= Index; 12893 12894 return true; 12895 }; 12896 12897 // If the build vector operands aren't sign extended vector extracts, 12898 // of the same input vector, then return. 12899 for (unsigned i = 0; i < N->getNumOperands(); i++) { 12900 if (!isSExtOfVecExtract(N->getOperand(i))) { 12901 return SDValue(); 12902 } 12903 } 12904 12905 // If the vector extract indicies are not correct, add the appropriate 12906 // vector_shuffle. 12907 int TgtElemArrayIdx; 12908 int InputSize = Input.getValueType().getScalarSizeInBits(); 12909 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 12910 if (InputSize + OutputSize == 40) 12911 TgtElemArrayIdx = 0; 12912 else if (InputSize + OutputSize == 72) 12913 TgtElemArrayIdx = 1; 12914 else if (InputSize + OutputSize == 48) 12915 TgtElemArrayIdx = 2; 12916 else if (InputSize + OutputSize == 80) 12917 TgtElemArrayIdx = 3; 12918 else if (InputSize + OutputSize == 96) 12919 TgtElemArrayIdx = 4; 12920 else 12921 return SDValue(); 12922 12923 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 12924 CorrectElems = DAG.getDataLayout().isLittleEndian() 12925 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 12926 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 12927 if (Elems != CorrectElems) { 12928 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 12929 } 12930 12931 // Regular lowering will catch cases where a shuffle is not needed. 12932 return SDValue(); 12933 } 12934 12935 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 12936 DAGCombinerInfo &DCI) const { 12937 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12938 "Should be called with a BUILD_VECTOR node"); 12939 12940 SelectionDAG &DAG = DCI.DAG; 12941 SDLoc dl(N); 12942 12943 if (!Subtarget.hasVSX()) 12944 return SDValue(); 12945 12946 // The target independent DAG combiner will leave a build_vector of 12947 // float-to-int conversions intact. We can generate MUCH better code for 12948 // a float-to-int conversion of a vector of floats. 12949 SDValue FirstInput = N->getOperand(0); 12950 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 12951 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 12952 if (Reduced) 12953 return Reduced; 12954 } 12955 12956 // If we're building a vector out of consecutive loads, just load that 12957 // vector type. 12958 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 12959 if (Reduced) 12960 return Reduced; 12961 12962 // If we're building a vector out of extended elements from another vector 12963 // we have P9 vector integer extend instructions. The code assumes legal 12964 // input types (i.e. it can't handle things like v4i16) so do not run before 12965 // legalization. 12966 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) { 12967 Reduced = combineBVOfVecSExt(N, DAG); 12968 if (Reduced) 12969 return Reduced; 12970 } 12971 12972 12973 if (N->getValueType(0) != MVT::v2f64) 12974 return SDValue(); 12975 12976 // Looking for: 12977 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 12978 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 12979 FirstInput.getOpcode() != ISD::UINT_TO_FP) 12980 return SDValue(); 12981 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 12982 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 12983 return SDValue(); 12984 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 12985 return SDValue(); 12986 12987 SDValue Ext1 = FirstInput.getOperand(0); 12988 SDValue Ext2 = N->getOperand(1).getOperand(0); 12989 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 12990 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12991 return SDValue(); 12992 12993 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 12994 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 12995 if (!Ext1Op || !Ext2Op) 12996 return SDValue(); 12997 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 || 12998 Ext1.getOperand(0) != Ext2.getOperand(0)) 12999 return SDValue(); 13000 13001 int FirstElem = Ext1Op->getZExtValue(); 13002 int SecondElem = Ext2Op->getZExtValue(); 13003 int SubvecIdx; 13004 if (FirstElem == 0 && SecondElem == 1) 13005 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 13006 else if (FirstElem == 2 && SecondElem == 3) 13007 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 13008 else 13009 return SDValue(); 13010 13011 SDValue SrcVec = Ext1.getOperand(0); 13012 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 13013 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 13014 return DAG.getNode(NodeType, dl, MVT::v2f64, 13015 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 13016 } 13017 13018 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 13019 DAGCombinerInfo &DCI) const { 13020 assert((N->getOpcode() == ISD::SINT_TO_FP || 13021 N->getOpcode() == ISD::UINT_TO_FP) && 13022 "Need an int -> FP conversion node here"); 13023 13024 if (useSoftFloat() || !Subtarget.has64BitSupport()) 13025 return SDValue(); 13026 13027 SelectionDAG &DAG = DCI.DAG; 13028 SDLoc dl(N); 13029 SDValue Op(N, 0); 13030 13031 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 13032 // from the hardware. 13033 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 13034 return SDValue(); 13035 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 13036 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 13037 return SDValue(); 13038 13039 SDValue FirstOperand(Op.getOperand(0)); 13040 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 13041 (FirstOperand.getValueType() == MVT::i8 || 13042 FirstOperand.getValueType() == MVT::i16); 13043 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 13044 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 13045 bool DstDouble = Op.getValueType() == MVT::f64; 13046 unsigned ConvOp = Signed ? 13047 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 13048 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 13049 SDValue WidthConst = 13050 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 13051 dl, false); 13052 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 13053 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 13054 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 13055 DAG.getVTList(MVT::f64, MVT::Other), 13056 Ops, MVT::i8, LDN->getMemOperand()); 13057 13058 // For signed conversion, we need to sign-extend the value in the VSR 13059 if (Signed) { 13060 SDValue ExtOps[] = { Ld, WidthConst }; 13061 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 13062 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 13063 } else 13064 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 13065 } 13066 13067 13068 // For i32 intermediate values, unfortunately, the conversion functions 13069 // leave the upper 32 bits of the value are undefined. Within the set of 13070 // scalar instructions, we have no method for zero- or sign-extending the 13071 // value. Thus, we cannot handle i32 intermediate values here. 13072 if (Op.getOperand(0).getValueType() == MVT::i32) 13073 return SDValue(); 13074 13075 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 13076 "UINT_TO_FP is supported only with FPCVT"); 13077 13078 // If we have FCFIDS, then use it when converting to single-precision. 13079 // Otherwise, convert to double-precision and then round. 13080 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13081 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 13082 : PPCISD::FCFIDS) 13083 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 13084 : PPCISD::FCFID); 13085 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13086 ? MVT::f32 13087 : MVT::f64; 13088 13089 // If we're converting from a float, to an int, and back to a float again, 13090 // then we don't need the store/load pair at all. 13091 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 13092 Subtarget.hasFPCVT()) || 13093 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 13094 SDValue Src = Op.getOperand(0).getOperand(0); 13095 if (Src.getValueType() == MVT::f32) { 13096 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 13097 DCI.AddToWorklist(Src.getNode()); 13098 } else if (Src.getValueType() != MVT::f64) { 13099 // Make sure that we don't pick up a ppc_fp128 source value. 13100 return SDValue(); 13101 } 13102 13103 unsigned FCTOp = 13104 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 13105 PPCISD::FCTIDUZ; 13106 13107 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 13108 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 13109 13110 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 13111 FP = DAG.getNode(ISD::FP_ROUND, dl, 13112 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 13113 DCI.AddToWorklist(FP.getNode()); 13114 } 13115 13116 return FP; 13117 } 13118 13119 return SDValue(); 13120 } 13121 13122 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 13123 // builtins) into loads with swaps. 13124 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 13125 DAGCombinerInfo &DCI) const { 13126 SelectionDAG &DAG = DCI.DAG; 13127 SDLoc dl(N); 13128 SDValue Chain; 13129 SDValue Base; 13130 MachineMemOperand *MMO; 13131 13132 switch (N->getOpcode()) { 13133 default: 13134 llvm_unreachable("Unexpected opcode for little endian VSX load"); 13135 case ISD::LOAD: { 13136 LoadSDNode *LD = cast<LoadSDNode>(N); 13137 Chain = LD->getChain(); 13138 Base = LD->getBasePtr(); 13139 MMO = LD->getMemOperand(); 13140 // If the MMO suggests this isn't a load of a full vector, leave 13141 // things alone. For a built-in, we have to make the change for 13142 // correctness, so if there is a size problem that will be a bug. 13143 if (MMO->getSize() < 16) 13144 return SDValue(); 13145 break; 13146 } 13147 case ISD::INTRINSIC_W_CHAIN: { 13148 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13149 Chain = Intrin->getChain(); 13150 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 13151 // us what we want. Get operand 2 instead. 13152 Base = Intrin->getOperand(2); 13153 MMO = Intrin->getMemOperand(); 13154 break; 13155 } 13156 } 13157 13158 MVT VecTy = N->getValueType(0).getSimpleVT(); 13159 13160 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 13161 // aligned and the type is a vector with elements up to 4 bytes 13162 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 13163 && VecTy.getScalarSizeInBits() <= 32 ) { 13164 return SDValue(); 13165 } 13166 13167 SDValue LoadOps[] = { Chain, Base }; 13168 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 13169 DAG.getVTList(MVT::v2f64, MVT::Other), 13170 LoadOps, MVT::v2f64, MMO); 13171 13172 DCI.AddToWorklist(Load.getNode()); 13173 Chain = Load.getValue(1); 13174 SDValue Swap = DAG.getNode( 13175 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 13176 DCI.AddToWorklist(Swap.getNode()); 13177 13178 // Add a bitcast if the resulting load type doesn't match v2f64. 13179 if (VecTy != MVT::v2f64) { 13180 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 13181 DCI.AddToWorklist(N.getNode()); 13182 // Package {bitcast value, swap's chain} to match Load's shape. 13183 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 13184 N, Swap.getValue(1)); 13185 } 13186 13187 return Swap; 13188 } 13189 13190 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 13191 // builtins) into stores with swaps. 13192 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 13193 DAGCombinerInfo &DCI) const { 13194 SelectionDAG &DAG = DCI.DAG; 13195 SDLoc dl(N); 13196 SDValue Chain; 13197 SDValue Base; 13198 unsigned SrcOpnd; 13199 MachineMemOperand *MMO; 13200 13201 switch (N->getOpcode()) { 13202 default: 13203 llvm_unreachable("Unexpected opcode for little endian VSX store"); 13204 case ISD::STORE: { 13205 StoreSDNode *ST = cast<StoreSDNode>(N); 13206 Chain = ST->getChain(); 13207 Base = ST->getBasePtr(); 13208 MMO = ST->getMemOperand(); 13209 SrcOpnd = 1; 13210 // If the MMO suggests this isn't a store of a full vector, leave 13211 // things alone. For a built-in, we have to make the change for 13212 // correctness, so if there is a size problem that will be a bug. 13213 if (MMO->getSize() < 16) 13214 return SDValue(); 13215 break; 13216 } 13217 case ISD::INTRINSIC_VOID: { 13218 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13219 Chain = Intrin->getChain(); 13220 // Intrin->getBasePtr() oddly does not get what we want. 13221 Base = Intrin->getOperand(3); 13222 MMO = Intrin->getMemOperand(); 13223 SrcOpnd = 2; 13224 break; 13225 } 13226 } 13227 13228 SDValue Src = N->getOperand(SrcOpnd); 13229 MVT VecTy = Src.getValueType().getSimpleVT(); 13230 13231 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 13232 // aligned and the type is a vector with elements up to 4 bytes 13233 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 13234 && VecTy.getScalarSizeInBits() <= 32 ) { 13235 return SDValue(); 13236 } 13237 13238 // All stores are done as v2f64 and possible bit cast. 13239 if (VecTy != MVT::v2f64) { 13240 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 13241 DCI.AddToWorklist(Src.getNode()); 13242 } 13243 13244 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 13245 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 13246 DCI.AddToWorklist(Swap.getNode()); 13247 Chain = Swap.getValue(1); 13248 SDValue StoreOps[] = { Chain, Swap, Base }; 13249 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 13250 DAG.getVTList(MVT::Other), 13251 StoreOps, VecTy, MMO); 13252 DCI.AddToWorklist(Store.getNode()); 13253 return Store; 13254 } 13255 13256 // Handle DAG combine for STORE (FP_TO_INT F). 13257 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 13258 DAGCombinerInfo &DCI) const { 13259 13260 SelectionDAG &DAG = DCI.DAG; 13261 SDLoc dl(N); 13262 unsigned Opcode = N->getOperand(1).getOpcode(); 13263 13264 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 13265 && "Not a FP_TO_INT Instruction!"); 13266 13267 SDValue Val = N->getOperand(1).getOperand(0); 13268 EVT Op1VT = N->getOperand(1).getValueType(); 13269 EVT ResVT = Val.getValueType(); 13270 13271 // Floating point types smaller than 32 bits are not legal on Power. 13272 if (ResVT.getScalarSizeInBits() < 32) 13273 return SDValue(); 13274 13275 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 13276 bool ValidTypeForStoreFltAsInt = 13277 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 13278 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 13279 13280 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() || 13281 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 13282 return SDValue(); 13283 13284 // Extend f32 values to f64 13285 if (ResVT.getScalarSizeInBits() == 32) { 13286 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 13287 DCI.AddToWorklist(Val.getNode()); 13288 } 13289 13290 // Set signed or unsigned conversion opcode. 13291 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 13292 PPCISD::FP_TO_SINT_IN_VSR : 13293 PPCISD::FP_TO_UINT_IN_VSR; 13294 13295 Val = DAG.getNode(ConvOpcode, 13296 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 13297 DCI.AddToWorklist(Val.getNode()); 13298 13299 // Set number of bytes being converted. 13300 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 13301 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 13302 DAG.getIntPtrConstant(ByteSize, dl, false), 13303 DAG.getValueType(Op1VT) }; 13304 13305 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 13306 DAG.getVTList(MVT::Other), Ops, 13307 cast<StoreSDNode>(N)->getMemoryVT(), 13308 cast<StoreSDNode>(N)->getMemOperand()); 13309 13310 DCI.AddToWorklist(Val.getNode()); 13311 return Val; 13312 } 13313 13314 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN, 13315 LSBaseSDNode *LSBase, 13316 DAGCombinerInfo &DCI) const { 13317 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) && 13318 "Not a reverse memop pattern!"); 13319 13320 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool { 13321 auto Mask = SVN->getMask(); 13322 int i = 0; 13323 auto I = Mask.rbegin(); 13324 auto E = Mask.rend(); 13325 13326 for (; I != E; ++I) { 13327 if (*I != i) 13328 return false; 13329 i++; 13330 } 13331 return true; 13332 }; 13333 13334 SelectionDAG &DAG = DCI.DAG; 13335 EVT VT = SVN->getValueType(0); 13336 13337 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX()) 13338 return SDValue(); 13339 13340 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order. 13341 // See comment in PPCVSXSwapRemoval.cpp. 13342 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it. 13343 if (!Subtarget.hasP9Vector()) 13344 return SDValue(); 13345 13346 if(!IsElementReverse(SVN)) 13347 return SDValue(); 13348 13349 if (LSBase->getOpcode() == ISD::LOAD) { 13350 SDLoc dl(SVN); 13351 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()}; 13352 return DAG.getMemIntrinsicNode( 13353 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps, 13354 LSBase->getMemoryVT(), LSBase->getMemOperand()); 13355 } 13356 13357 if (LSBase->getOpcode() == ISD::STORE) { 13358 SDLoc dl(LSBase); 13359 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0), 13360 LSBase->getBasePtr()}; 13361 return DAG.getMemIntrinsicNode( 13362 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps, 13363 LSBase->getMemoryVT(), LSBase->getMemOperand()); 13364 } 13365 13366 llvm_unreachable("Expected a load or store node here"); 13367 } 13368 13369 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 13370 DAGCombinerInfo &DCI) const { 13371 SelectionDAG &DAG = DCI.DAG; 13372 SDLoc dl(N); 13373 switch (N->getOpcode()) { 13374 default: break; 13375 case ISD::ADD: 13376 return combineADD(N, DCI); 13377 case ISD::SHL: 13378 return combineSHL(N, DCI); 13379 case ISD::SRA: 13380 return combineSRA(N, DCI); 13381 case ISD::SRL: 13382 return combineSRL(N, DCI); 13383 case ISD::MUL: 13384 return combineMUL(N, DCI); 13385 case PPCISD::SHL: 13386 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 13387 return N->getOperand(0); 13388 break; 13389 case PPCISD::SRL: 13390 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 13391 return N->getOperand(0); 13392 break; 13393 case PPCISD::SRA: 13394 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 13395 if (C->isNullValue() || // 0 >>s V -> 0. 13396 C->isAllOnesValue()) // -1 >>s V -> -1. 13397 return N->getOperand(0); 13398 } 13399 break; 13400 case ISD::SIGN_EXTEND: 13401 case ISD::ZERO_EXTEND: 13402 case ISD::ANY_EXTEND: 13403 return DAGCombineExtBoolTrunc(N, DCI); 13404 case ISD::TRUNCATE: 13405 return combineTRUNCATE(N, DCI); 13406 case ISD::SETCC: 13407 if (SDValue CSCC = combineSetCC(N, DCI)) 13408 return CSCC; 13409 LLVM_FALLTHROUGH; 13410 case ISD::SELECT_CC: 13411 return DAGCombineTruncBoolExt(N, DCI); 13412 case ISD::SINT_TO_FP: 13413 case ISD::UINT_TO_FP: 13414 return combineFPToIntToFP(N, DCI); 13415 case ISD::VECTOR_SHUFFLE: 13416 if (ISD::isNormalLoad(N->getOperand(0).getNode())) { 13417 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0)); 13418 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI); 13419 } 13420 break; 13421 case ISD::STORE: { 13422 13423 EVT Op1VT = N->getOperand(1).getValueType(); 13424 unsigned Opcode = N->getOperand(1).getOpcode(); 13425 13426 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 13427 SDValue Val= combineStoreFPToInt(N, DCI); 13428 if (Val) 13429 return Val; 13430 } 13431 13432 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) { 13433 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1)); 13434 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI); 13435 if (Val) 13436 return Val; 13437 } 13438 13439 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 13440 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 13441 N->getOperand(1).getNode()->hasOneUse() && 13442 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 13443 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 13444 13445 // STBRX can only handle simple types and it makes no sense to store less 13446 // two bytes in byte-reversed order. 13447 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 13448 if (mVT.isExtended() || mVT.getSizeInBits() < 16) 13449 break; 13450 13451 SDValue BSwapOp = N->getOperand(1).getOperand(0); 13452 // Do an any-extend to 32-bits if this is a half-word input. 13453 if (BSwapOp.getValueType() == MVT::i16) 13454 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 13455 13456 // If the type of BSWAP operand is wider than stored memory width 13457 // it need to be shifted to the right side before STBRX. 13458 if (Op1VT.bitsGT(mVT)) { 13459 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 13460 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 13461 DAG.getConstant(Shift, dl, MVT::i32)); 13462 // Need to truncate if this is a bswap of i64 stored as i32/i16. 13463 if (Op1VT == MVT::i64) 13464 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 13465 } 13466 13467 SDValue Ops[] = { 13468 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 13469 }; 13470 return 13471 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 13472 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 13473 cast<StoreSDNode>(N)->getMemOperand()); 13474 } 13475 13476 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 13477 // So it can increase the chance of CSE constant construction. 13478 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 13479 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 13480 // Need to sign-extended to 64-bits to handle negative values. 13481 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 13482 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 13483 MemVT.getSizeInBits()); 13484 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 13485 13486 // DAG.getTruncStore() can't be used here because it doesn't accept 13487 // the general (base + offset) addressing mode. 13488 // So we use UpdateNodeOperands and setTruncatingStore instead. 13489 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 13490 N->getOperand(3)); 13491 cast<StoreSDNode>(N)->setTruncatingStore(true); 13492 return SDValue(N, 0); 13493 } 13494 13495 // For little endian, VSX stores require generating xxswapd/lxvd2x. 13496 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 13497 if (Op1VT.isSimple()) { 13498 MVT StoreVT = Op1VT.getSimpleVT(); 13499 if (Subtarget.needsSwapsForVSXMemOps() && 13500 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 13501 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 13502 return expandVSXStoreForLE(N, DCI); 13503 } 13504 break; 13505 } 13506 case ISD::LOAD: { 13507 LoadSDNode *LD = cast<LoadSDNode>(N); 13508 EVT VT = LD->getValueType(0); 13509 13510 // For little endian, VSX loads require generating lxvd2x/xxswapd. 13511 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 13512 if (VT.isSimple()) { 13513 MVT LoadVT = VT.getSimpleVT(); 13514 if (Subtarget.needsSwapsForVSXMemOps() && 13515 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 13516 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 13517 return expandVSXLoadForLE(N, DCI); 13518 } 13519 13520 // We sometimes end up with a 64-bit integer load, from which we extract 13521 // two single-precision floating-point numbers. This happens with 13522 // std::complex<float>, and other similar structures, because of the way we 13523 // canonicalize structure copies. However, if we lack direct moves, 13524 // then the final bitcasts from the extracted integer values to the 13525 // floating-point numbers turn into store/load pairs. Even with direct moves, 13526 // just loading the two floating-point numbers is likely better. 13527 auto ReplaceTwoFloatLoad = [&]() { 13528 if (VT != MVT::i64) 13529 return false; 13530 13531 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 13532 LD->isVolatile()) 13533 return false; 13534 13535 // We're looking for a sequence like this: 13536 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 13537 // t16: i64 = srl t13, Constant:i32<32> 13538 // t17: i32 = truncate t16 13539 // t18: f32 = bitcast t17 13540 // t19: i32 = truncate t13 13541 // t20: f32 = bitcast t19 13542 13543 if (!LD->hasNUsesOfValue(2, 0)) 13544 return false; 13545 13546 auto UI = LD->use_begin(); 13547 while (UI.getUse().getResNo() != 0) ++UI; 13548 SDNode *Trunc = *UI++; 13549 while (UI.getUse().getResNo() != 0) ++UI; 13550 SDNode *RightShift = *UI; 13551 if (Trunc->getOpcode() != ISD::TRUNCATE) 13552 std::swap(Trunc, RightShift); 13553 13554 if (Trunc->getOpcode() != ISD::TRUNCATE || 13555 Trunc->getValueType(0) != MVT::i32 || 13556 !Trunc->hasOneUse()) 13557 return false; 13558 if (RightShift->getOpcode() != ISD::SRL || 13559 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 13560 RightShift->getConstantOperandVal(1) != 32 || 13561 !RightShift->hasOneUse()) 13562 return false; 13563 13564 SDNode *Trunc2 = *RightShift->use_begin(); 13565 if (Trunc2->getOpcode() != ISD::TRUNCATE || 13566 Trunc2->getValueType(0) != MVT::i32 || 13567 !Trunc2->hasOneUse()) 13568 return false; 13569 13570 SDNode *Bitcast = *Trunc->use_begin(); 13571 SDNode *Bitcast2 = *Trunc2->use_begin(); 13572 13573 if (Bitcast->getOpcode() != ISD::BITCAST || 13574 Bitcast->getValueType(0) != MVT::f32) 13575 return false; 13576 if (Bitcast2->getOpcode() != ISD::BITCAST || 13577 Bitcast2->getValueType(0) != MVT::f32) 13578 return false; 13579 13580 if (Subtarget.isLittleEndian()) 13581 std::swap(Bitcast, Bitcast2); 13582 13583 // Bitcast has the second float (in memory-layout order) and Bitcast2 13584 // has the first one. 13585 13586 SDValue BasePtr = LD->getBasePtr(); 13587 if (LD->isIndexed()) { 13588 assert(LD->getAddressingMode() == ISD::PRE_INC && 13589 "Non-pre-inc AM on PPC?"); 13590 BasePtr = 13591 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 13592 LD->getOffset()); 13593 } 13594 13595 auto MMOFlags = 13596 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 13597 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 13598 LD->getPointerInfo(), LD->getAlignment(), 13599 MMOFlags, LD->getAAInfo()); 13600 SDValue AddPtr = 13601 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 13602 BasePtr, DAG.getIntPtrConstant(4, dl)); 13603 SDValue FloatLoad2 = DAG.getLoad( 13604 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 13605 LD->getPointerInfo().getWithOffset(4), 13606 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 13607 13608 if (LD->isIndexed()) { 13609 // Note that DAGCombine should re-form any pre-increment load(s) from 13610 // what is produced here if that makes sense. 13611 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 13612 } 13613 13614 DCI.CombineTo(Bitcast2, FloatLoad); 13615 DCI.CombineTo(Bitcast, FloatLoad2); 13616 13617 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 13618 SDValue(FloatLoad2.getNode(), 1)); 13619 return true; 13620 }; 13621 13622 if (ReplaceTwoFloatLoad()) 13623 return SDValue(N, 0); 13624 13625 EVT MemVT = LD->getMemoryVT(); 13626 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 13627 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 13628 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 13629 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 13630 if (LD->isUnindexed() && VT.isVector() && 13631 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 13632 // P8 and later hardware should just use LOAD. 13633 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 13634 VT == MVT::v4i32 || VT == MVT::v4f32)) || 13635 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 13636 LD->getAlignment() >= ScalarABIAlignment)) && 13637 LD->getAlignment() < ABIAlignment) { 13638 // This is a type-legal unaligned Altivec or QPX load. 13639 SDValue Chain = LD->getChain(); 13640 SDValue Ptr = LD->getBasePtr(); 13641 bool isLittleEndian = Subtarget.isLittleEndian(); 13642 13643 // This implements the loading of unaligned vectors as described in 13644 // the venerable Apple Velocity Engine overview. Specifically: 13645 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 13646 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 13647 // 13648 // The general idea is to expand a sequence of one or more unaligned 13649 // loads into an alignment-based permutation-control instruction (lvsl 13650 // or lvsr), a series of regular vector loads (which always truncate 13651 // their input address to an aligned address), and a series of 13652 // permutations. The results of these permutations are the requested 13653 // loaded values. The trick is that the last "extra" load is not taken 13654 // from the address you might suspect (sizeof(vector) bytes after the 13655 // last requested load), but rather sizeof(vector) - 1 bytes after the 13656 // last requested vector. The point of this is to avoid a page fault if 13657 // the base address happened to be aligned. This works because if the 13658 // base address is aligned, then adding less than a full vector length 13659 // will cause the last vector in the sequence to be (re)loaded. 13660 // Otherwise, the next vector will be fetched as you might suspect was 13661 // necessary. 13662 13663 // We might be able to reuse the permutation generation from 13664 // a different base address offset from this one by an aligned amount. 13665 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 13666 // optimization later. 13667 Intrinsic::ID Intr, IntrLD, IntrPerm; 13668 MVT PermCntlTy, PermTy, LDTy; 13669 if (Subtarget.hasAltivec()) { 13670 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 13671 Intrinsic::ppc_altivec_lvsl; 13672 IntrLD = Intrinsic::ppc_altivec_lvx; 13673 IntrPerm = Intrinsic::ppc_altivec_vperm; 13674 PermCntlTy = MVT::v16i8; 13675 PermTy = MVT::v4i32; 13676 LDTy = MVT::v4i32; 13677 } else { 13678 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 13679 Intrinsic::ppc_qpx_qvlpcls; 13680 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 13681 Intrinsic::ppc_qpx_qvlfs; 13682 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 13683 PermCntlTy = MVT::v4f64; 13684 PermTy = MVT::v4f64; 13685 LDTy = MemVT.getSimpleVT(); 13686 } 13687 13688 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 13689 13690 // Create the new MMO for the new base load. It is like the original MMO, 13691 // but represents an area in memory almost twice the vector size centered 13692 // on the original address. If the address is unaligned, we might start 13693 // reading up to (sizeof(vector)-1) bytes below the address of the 13694 // original unaligned load. 13695 MachineFunction &MF = DAG.getMachineFunction(); 13696 MachineMemOperand *BaseMMO = 13697 MF.getMachineMemOperand(LD->getMemOperand(), 13698 -(long)MemVT.getStoreSize()+1, 13699 2*MemVT.getStoreSize()-1); 13700 13701 // Create the new base load. 13702 SDValue LDXIntID = 13703 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 13704 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 13705 SDValue BaseLoad = 13706 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 13707 DAG.getVTList(PermTy, MVT::Other), 13708 BaseLoadOps, LDTy, BaseMMO); 13709 13710 // Note that the value of IncOffset (which is provided to the next 13711 // load's pointer info offset value, and thus used to calculate the 13712 // alignment), and the value of IncValue (which is actually used to 13713 // increment the pointer value) are different! This is because we 13714 // require the next load to appear to be aligned, even though it 13715 // is actually offset from the base pointer by a lesser amount. 13716 int IncOffset = VT.getSizeInBits() / 8; 13717 int IncValue = IncOffset; 13718 13719 // Walk (both up and down) the chain looking for another load at the real 13720 // (aligned) offset (the alignment of the other load does not matter in 13721 // this case). If found, then do not use the offset reduction trick, as 13722 // that will prevent the loads from being later combined (as they would 13723 // otherwise be duplicates). 13724 if (!findConsecutiveLoad(LD, DAG)) 13725 --IncValue; 13726 13727 SDValue Increment = 13728 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 13729 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 13730 13731 MachineMemOperand *ExtraMMO = 13732 MF.getMachineMemOperand(LD->getMemOperand(), 13733 1, 2*MemVT.getStoreSize()-1); 13734 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 13735 SDValue ExtraLoad = 13736 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 13737 DAG.getVTList(PermTy, MVT::Other), 13738 ExtraLoadOps, LDTy, ExtraMMO); 13739 13740 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 13741 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 13742 13743 // Because vperm has a big-endian bias, we must reverse the order 13744 // of the input vectors and complement the permute control vector 13745 // when generating little endian code. We have already handled the 13746 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 13747 // and ExtraLoad here. 13748 SDValue Perm; 13749 if (isLittleEndian) 13750 Perm = BuildIntrinsicOp(IntrPerm, 13751 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 13752 else 13753 Perm = BuildIntrinsicOp(IntrPerm, 13754 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 13755 13756 if (VT != PermTy) 13757 Perm = Subtarget.hasAltivec() ? 13758 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 13759 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 13760 DAG.getTargetConstant(1, dl, MVT::i64)); 13761 // second argument is 1 because this rounding 13762 // is always exact. 13763 13764 // The output of the permutation is our loaded result, the TokenFactor is 13765 // our new chain. 13766 DCI.CombineTo(N, Perm, TF); 13767 return SDValue(N, 0); 13768 } 13769 } 13770 break; 13771 case ISD::INTRINSIC_WO_CHAIN: { 13772 bool isLittleEndian = Subtarget.isLittleEndian(); 13773 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 13774 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 13775 : Intrinsic::ppc_altivec_lvsl); 13776 if ((IID == Intr || 13777 IID == Intrinsic::ppc_qpx_qvlpcld || 13778 IID == Intrinsic::ppc_qpx_qvlpcls) && 13779 N->getOperand(1)->getOpcode() == ISD::ADD) { 13780 SDValue Add = N->getOperand(1); 13781 13782 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 13783 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 13784 13785 if (DAG.MaskedValueIsZero(Add->getOperand(1), 13786 APInt::getAllOnesValue(Bits /* alignment */) 13787 .zext(Add.getScalarValueSizeInBits()))) { 13788 SDNode *BasePtr = Add->getOperand(0).getNode(); 13789 for (SDNode::use_iterator UI = BasePtr->use_begin(), 13790 UE = BasePtr->use_end(); 13791 UI != UE; ++UI) { 13792 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 13793 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 13794 // We've found another LVSL/LVSR, and this address is an aligned 13795 // multiple of that one. The results will be the same, so use the 13796 // one we've just found instead. 13797 13798 return SDValue(*UI, 0); 13799 } 13800 } 13801 } 13802 13803 if (isa<ConstantSDNode>(Add->getOperand(1))) { 13804 SDNode *BasePtr = Add->getOperand(0).getNode(); 13805 for (SDNode::use_iterator UI = BasePtr->use_begin(), 13806 UE = BasePtr->use_end(); UI != UE; ++UI) { 13807 if (UI->getOpcode() == ISD::ADD && 13808 isa<ConstantSDNode>(UI->getOperand(1)) && 13809 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 13810 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 13811 (1ULL << Bits) == 0) { 13812 SDNode *OtherAdd = *UI; 13813 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 13814 VE = OtherAdd->use_end(); VI != VE; ++VI) { 13815 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 13816 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 13817 return SDValue(*VI, 0); 13818 } 13819 } 13820 } 13821 } 13822 } 13823 } 13824 13825 // Combine vmaxsw/h/b(a, a's negation) to abs(a) 13826 // Expose the vabsduw/h/b opportunity for down stream 13827 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() && 13828 (IID == Intrinsic::ppc_altivec_vmaxsw || 13829 IID == Intrinsic::ppc_altivec_vmaxsh || 13830 IID == Intrinsic::ppc_altivec_vmaxsb)) { 13831 SDValue V1 = N->getOperand(1); 13832 SDValue V2 = N->getOperand(2); 13833 if ((V1.getSimpleValueType() == MVT::v4i32 || 13834 V1.getSimpleValueType() == MVT::v8i16 || 13835 V1.getSimpleValueType() == MVT::v16i8) && 13836 V1.getSimpleValueType() == V2.getSimpleValueType()) { 13837 // (0-a, a) 13838 if (V1.getOpcode() == ISD::SUB && 13839 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 13840 V1.getOperand(1) == V2) { 13841 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2); 13842 } 13843 // (a, 0-a) 13844 if (V2.getOpcode() == ISD::SUB && 13845 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 13846 V2.getOperand(1) == V1) { 13847 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 13848 } 13849 // (x-y, y-x) 13850 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB && 13851 V1.getOperand(0) == V2.getOperand(1) && 13852 V1.getOperand(1) == V2.getOperand(0)) { 13853 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 13854 } 13855 } 13856 } 13857 } 13858 13859 break; 13860 case ISD::INTRINSIC_W_CHAIN: 13861 // For little endian, VSX loads require generating lxvd2x/xxswapd. 13862 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 13863 if (Subtarget.needsSwapsForVSXMemOps()) { 13864 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 13865 default: 13866 break; 13867 case Intrinsic::ppc_vsx_lxvw4x: 13868 case Intrinsic::ppc_vsx_lxvd2x: 13869 return expandVSXLoadForLE(N, DCI); 13870 } 13871 } 13872 break; 13873 case ISD::INTRINSIC_VOID: 13874 // For little endian, VSX stores require generating xxswapd/stxvd2x. 13875 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 13876 if (Subtarget.needsSwapsForVSXMemOps()) { 13877 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 13878 default: 13879 break; 13880 case Intrinsic::ppc_vsx_stxvw4x: 13881 case Intrinsic::ppc_vsx_stxvd2x: 13882 return expandVSXStoreForLE(N, DCI); 13883 } 13884 } 13885 break; 13886 case ISD::BSWAP: 13887 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 13888 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 13889 N->getOperand(0).hasOneUse() && 13890 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 13891 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 13892 N->getValueType(0) == MVT::i64))) { 13893 SDValue Load = N->getOperand(0); 13894 LoadSDNode *LD = cast<LoadSDNode>(Load); 13895 // Create the byte-swapping load. 13896 SDValue Ops[] = { 13897 LD->getChain(), // Chain 13898 LD->getBasePtr(), // Ptr 13899 DAG.getValueType(N->getValueType(0)) // VT 13900 }; 13901 SDValue BSLoad = 13902 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 13903 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 13904 MVT::i64 : MVT::i32, MVT::Other), 13905 Ops, LD->getMemoryVT(), LD->getMemOperand()); 13906 13907 // If this is an i16 load, insert the truncate. 13908 SDValue ResVal = BSLoad; 13909 if (N->getValueType(0) == MVT::i16) 13910 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 13911 13912 // First, combine the bswap away. This makes the value produced by the 13913 // load dead. 13914 DCI.CombineTo(N, ResVal); 13915 13916 // Next, combine the load away, we give it a bogus result value but a real 13917 // chain result. The result value is dead because the bswap is dead. 13918 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 13919 13920 // Return N so it doesn't get rechecked! 13921 return SDValue(N, 0); 13922 } 13923 break; 13924 case PPCISD::VCMP: 13925 // If a VCMPo node already exists with exactly the same operands as this 13926 // node, use its result instead of this node (VCMPo computes both a CR6 and 13927 // a normal output). 13928 // 13929 if (!N->getOperand(0).hasOneUse() && 13930 !N->getOperand(1).hasOneUse() && 13931 !N->getOperand(2).hasOneUse()) { 13932 13933 // Scan all of the users of the LHS, looking for VCMPo's that match. 13934 SDNode *VCMPoNode = nullptr; 13935 13936 SDNode *LHSN = N->getOperand(0).getNode(); 13937 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 13938 UI != E; ++UI) 13939 if (UI->getOpcode() == PPCISD::VCMPo && 13940 UI->getOperand(1) == N->getOperand(1) && 13941 UI->getOperand(2) == N->getOperand(2) && 13942 UI->getOperand(0) == N->getOperand(0)) { 13943 VCMPoNode = *UI; 13944 break; 13945 } 13946 13947 // If there is no VCMPo node, or if the flag value has a single use, don't 13948 // transform this. 13949 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 13950 break; 13951 13952 // Look at the (necessarily single) use of the flag value. If it has a 13953 // chain, this transformation is more complex. Note that multiple things 13954 // could use the value result, which we should ignore. 13955 SDNode *FlagUser = nullptr; 13956 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 13957 FlagUser == nullptr; ++UI) { 13958 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 13959 SDNode *User = *UI; 13960 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 13961 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 13962 FlagUser = User; 13963 break; 13964 } 13965 } 13966 } 13967 13968 // If the user is a MFOCRF instruction, we know this is safe. 13969 // Otherwise we give up for right now. 13970 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 13971 return SDValue(VCMPoNode, 0); 13972 } 13973 break; 13974 case ISD::BRCOND: { 13975 SDValue Cond = N->getOperand(1); 13976 SDValue Target = N->getOperand(2); 13977 13978 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 13979 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 13980 Intrinsic::loop_decrement) { 13981 13982 // We now need to make the intrinsic dead (it cannot be instruction 13983 // selected). 13984 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 13985 assert(Cond.getNode()->hasOneUse() && 13986 "Counter decrement has more than one use"); 13987 13988 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 13989 N->getOperand(0), Target); 13990 } 13991 } 13992 break; 13993 case ISD::BR_CC: { 13994 // If this is a branch on an altivec predicate comparison, lower this so 13995 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 13996 // lowering is done pre-legalize, because the legalizer lowers the predicate 13997 // compare down to code that is difficult to reassemble. 13998 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 13999 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 14000 14001 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 14002 // value. If so, pass-through the AND to get to the intrinsic. 14003 if (LHS.getOpcode() == ISD::AND && 14004 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 14005 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 14006 Intrinsic::loop_decrement && 14007 isa<ConstantSDNode>(LHS.getOperand(1)) && 14008 !isNullConstant(LHS.getOperand(1))) 14009 LHS = LHS.getOperand(0); 14010 14011 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14012 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 14013 Intrinsic::loop_decrement && 14014 isa<ConstantSDNode>(RHS)) { 14015 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 14016 "Counter decrement comparison is not EQ or NE"); 14017 14018 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14019 bool isBDNZ = (CC == ISD::SETEQ && Val) || 14020 (CC == ISD::SETNE && !Val); 14021 14022 // We now need to make the intrinsic dead (it cannot be instruction 14023 // selected). 14024 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 14025 assert(LHS.getNode()->hasOneUse() && 14026 "Counter decrement has more than one use"); 14027 14028 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 14029 N->getOperand(0), N->getOperand(4)); 14030 } 14031 14032 int CompareOpc; 14033 bool isDot; 14034 14035 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14036 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 14037 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 14038 assert(isDot && "Can't compare against a vector result!"); 14039 14040 // If this is a comparison against something other than 0/1, then we know 14041 // that the condition is never/always true. 14042 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14043 if (Val != 0 && Val != 1) { 14044 if (CC == ISD::SETEQ) // Cond never true, remove branch. 14045 return N->getOperand(0); 14046 // Always !=, turn it into an unconditional branch. 14047 return DAG.getNode(ISD::BR, dl, MVT::Other, 14048 N->getOperand(0), N->getOperand(4)); 14049 } 14050 14051 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 14052 14053 // Create the PPCISD altivec 'dot' comparison node. 14054 SDValue Ops[] = { 14055 LHS.getOperand(2), // LHS of compare 14056 LHS.getOperand(3), // RHS of compare 14057 DAG.getConstant(CompareOpc, dl, MVT::i32) 14058 }; 14059 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 14060 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 14061 14062 // Unpack the result based on how the target uses it. 14063 PPC::Predicate CompOpc; 14064 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 14065 default: // Can't happen, don't crash on invalid number though. 14066 case 0: // Branch on the value of the EQ bit of CR6. 14067 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 14068 break; 14069 case 1: // Branch on the inverted value of the EQ bit of CR6. 14070 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 14071 break; 14072 case 2: // Branch on the value of the LT bit of CR6. 14073 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 14074 break; 14075 case 3: // Branch on the inverted value of the LT bit of CR6. 14076 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 14077 break; 14078 } 14079 14080 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 14081 DAG.getConstant(CompOpc, dl, MVT::i32), 14082 DAG.getRegister(PPC::CR6, MVT::i32), 14083 N->getOperand(4), CompNode.getValue(1)); 14084 } 14085 break; 14086 } 14087 case ISD::BUILD_VECTOR: 14088 return DAGCombineBuildVector(N, DCI); 14089 case ISD::ABS: 14090 return combineABS(N, DCI); 14091 case ISD::VSELECT: 14092 return combineVSelect(N, DCI); 14093 } 14094 14095 return SDValue(); 14096 } 14097 14098 SDValue 14099 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 14100 SelectionDAG &DAG, 14101 SmallVectorImpl<SDNode *> &Created) const { 14102 // fold (sdiv X, pow2) 14103 EVT VT = N->getValueType(0); 14104 if (VT == MVT::i64 && !Subtarget.isPPC64()) 14105 return SDValue(); 14106 if ((VT != MVT::i32 && VT != MVT::i64) || 14107 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 14108 return SDValue(); 14109 14110 SDLoc DL(N); 14111 SDValue N0 = N->getOperand(0); 14112 14113 bool IsNegPow2 = (-Divisor).isPowerOf2(); 14114 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 14115 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 14116 14117 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 14118 Created.push_back(Op.getNode()); 14119 14120 if (IsNegPow2) { 14121 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 14122 Created.push_back(Op.getNode()); 14123 } 14124 14125 return Op; 14126 } 14127 14128 //===----------------------------------------------------------------------===// 14129 // Inline Assembly Support 14130 //===----------------------------------------------------------------------===// 14131 14132 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 14133 KnownBits &Known, 14134 const APInt &DemandedElts, 14135 const SelectionDAG &DAG, 14136 unsigned Depth) const { 14137 Known.resetAll(); 14138 switch (Op.getOpcode()) { 14139 default: break; 14140 case PPCISD::LBRX: { 14141 // lhbrx is known to have the top bits cleared out. 14142 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 14143 Known.Zero = 0xFFFF0000; 14144 break; 14145 } 14146 case ISD::INTRINSIC_WO_CHAIN: { 14147 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 14148 default: break; 14149 case Intrinsic::ppc_altivec_vcmpbfp_p: 14150 case Intrinsic::ppc_altivec_vcmpeqfp_p: 14151 case Intrinsic::ppc_altivec_vcmpequb_p: 14152 case Intrinsic::ppc_altivec_vcmpequh_p: 14153 case Intrinsic::ppc_altivec_vcmpequw_p: 14154 case Intrinsic::ppc_altivec_vcmpequd_p: 14155 case Intrinsic::ppc_altivec_vcmpgefp_p: 14156 case Intrinsic::ppc_altivec_vcmpgtfp_p: 14157 case Intrinsic::ppc_altivec_vcmpgtsb_p: 14158 case Intrinsic::ppc_altivec_vcmpgtsh_p: 14159 case Intrinsic::ppc_altivec_vcmpgtsw_p: 14160 case Intrinsic::ppc_altivec_vcmpgtsd_p: 14161 case Intrinsic::ppc_altivec_vcmpgtub_p: 14162 case Intrinsic::ppc_altivec_vcmpgtuh_p: 14163 case Intrinsic::ppc_altivec_vcmpgtuw_p: 14164 case Intrinsic::ppc_altivec_vcmpgtud_p: 14165 Known.Zero = ~1U; // All bits but the low one are known to be zero. 14166 break; 14167 } 14168 } 14169 } 14170 } 14171 14172 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 14173 switch (Subtarget.getDarwinDirective()) { 14174 default: break; 14175 case PPC::DIR_970: 14176 case PPC::DIR_PWR4: 14177 case PPC::DIR_PWR5: 14178 case PPC::DIR_PWR5X: 14179 case PPC::DIR_PWR6: 14180 case PPC::DIR_PWR6X: 14181 case PPC::DIR_PWR7: 14182 case PPC::DIR_PWR8: 14183 case PPC::DIR_PWR9: { 14184 if (!ML) 14185 break; 14186 14187 if (!DisableInnermostLoopAlign32) { 14188 // If the nested loop is an innermost loop, prefer to a 32-byte alignment, 14189 // so that we can decrease cache misses and branch-prediction misses. 14190 // Actual alignment of the loop will depend on the hotness check and other 14191 // logic in alignBlocks. 14192 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) 14193 return Align(32); 14194 } 14195 14196 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 14197 14198 // For small loops (between 5 and 8 instructions), align to a 32-byte 14199 // boundary so that the entire loop fits in one instruction-cache line. 14200 uint64_t LoopSize = 0; 14201 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 14202 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 14203 LoopSize += TII->getInstSizeInBytes(*J); 14204 if (LoopSize > 32) 14205 break; 14206 } 14207 14208 if (LoopSize > 16 && LoopSize <= 32) 14209 return Align(32); 14210 14211 break; 14212 } 14213 } 14214 14215 return TargetLowering::getPrefLoopAlignment(ML); 14216 } 14217 14218 /// getConstraintType - Given a constraint, return the type of 14219 /// constraint it is for this target. 14220 PPCTargetLowering::ConstraintType 14221 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 14222 if (Constraint.size() == 1) { 14223 switch (Constraint[0]) { 14224 default: break; 14225 case 'b': 14226 case 'r': 14227 case 'f': 14228 case 'd': 14229 case 'v': 14230 case 'y': 14231 return C_RegisterClass; 14232 case 'Z': 14233 // FIXME: While Z does indicate a memory constraint, it specifically 14234 // indicates an r+r address (used in conjunction with the 'y' modifier 14235 // in the replacement string). Currently, we're forcing the base 14236 // register to be r0 in the asm printer (which is interpreted as zero) 14237 // and forming the complete address in the second register. This is 14238 // suboptimal. 14239 return C_Memory; 14240 } 14241 } else if (Constraint == "wc") { // individual CR bits. 14242 return C_RegisterClass; 14243 } else if (Constraint == "wa" || Constraint == "wd" || 14244 Constraint == "wf" || Constraint == "ws" || 14245 Constraint == "wi" || Constraint == "ww") { 14246 return C_RegisterClass; // VSX registers. 14247 } 14248 return TargetLowering::getConstraintType(Constraint); 14249 } 14250 14251 /// Examine constraint type and operand type and determine a weight value. 14252 /// This object must already have been set up with the operand type 14253 /// and the current alternative constraint selected. 14254 TargetLowering::ConstraintWeight 14255 PPCTargetLowering::getSingleConstraintMatchWeight( 14256 AsmOperandInfo &info, const char *constraint) const { 14257 ConstraintWeight weight = CW_Invalid; 14258 Value *CallOperandVal = info.CallOperandVal; 14259 // If we don't have a value, we can't do a match, 14260 // but allow it at the lowest weight. 14261 if (!CallOperandVal) 14262 return CW_Default; 14263 Type *type = CallOperandVal->getType(); 14264 14265 // Look at the constraint type. 14266 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 14267 return CW_Register; // an individual CR bit. 14268 else if ((StringRef(constraint) == "wa" || 14269 StringRef(constraint) == "wd" || 14270 StringRef(constraint) == "wf") && 14271 type->isVectorTy()) 14272 return CW_Register; 14273 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64)) 14274 return CW_Register; // just hold 64-bit integers data. 14275 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 14276 return CW_Register; 14277 else if (StringRef(constraint) == "ww" && type->isFloatTy()) 14278 return CW_Register; 14279 14280 switch (*constraint) { 14281 default: 14282 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 14283 break; 14284 case 'b': 14285 if (type->isIntegerTy()) 14286 weight = CW_Register; 14287 break; 14288 case 'f': 14289 if (type->isFloatTy()) 14290 weight = CW_Register; 14291 break; 14292 case 'd': 14293 if (type->isDoubleTy()) 14294 weight = CW_Register; 14295 break; 14296 case 'v': 14297 if (type->isVectorTy()) 14298 weight = CW_Register; 14299 break; 14300 case 'y': 14301 weight = CW_Register; 14302 break; 14303 case 'Z': 14304 weight = CW_Memory; 14305 break; 14306 } 14307 return weight; 14308 } 14309 14310 std::pair<unsigned, const TargetRegisterClass *> 14311 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 14312 StringRef Constraint, 14313 MVT VT) const { 14314 if (Constraint.size() == 1) { 14315 // GCC RS6000 Constraint Letters 14316 switch (Constraint[0]) { 14317 case 'b': // R1-R31 14318 if (VT == MVT::i64 && Subtarget.isPPC64()) 14319 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 14320 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 14321 case 'r': // R0-R31 14322 if (VT == MVT::i64 && Subtarget.isPPC64()) 14323 return std::make_pair(0U, &PPC::G8RCRegClass); 14324 return std::make_pair(0U, &PPC::GPRCRegClass); 14325 // 'd' and 'f' constraints are both defined to be "the floating point 14326 // registers", where one is for 32-bit and the other for 64-bit. We don't 14327 // really care overly much here so just give them all the same reg classes. 14328 case 'd': 14329 case 'f': 14330 if (Subtarget.hasSPE()) { 14331 if (VT == MVT::f32 || VT == MVT::i32) 14332 return std::make_pair(0U, &PPC::GPRCRegClass); 14333 if (VT == MVT::f64 || VT == MVT::i64) 14334 return std::make_pair(0U, &PPC::SPERCRegClass); 14335 } else { 14336 if (VT == MVT::f32 || VT == MVT::i32) 14337 return std::make_pair(0U, &PPC::F4RCRegClass); 14338 if (VT == MVT::f64 || VT == MVT::i64) 14339 return std::make_pair(0U, &PPC::F8RCRegClass); 14340 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 14341 return std::make_pair(0U, &PPC::QFRCRegClass); 14342 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 14343 return std::make_pair(0U, &PPC::QSRCRegClass); 14344 } 14345 break; 14346 case 'v': 14347 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 14348 return std::make_pair(0U, &PPC::QFRCRegClass); 14349 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 14350 return std::make_pair(0U, &PPC::QSRCRegClass); 14351 if (Subtarget.hasAltivec()) 14352 return std::make_pair(0U, &PPC::VRRCRegClass); 14353 break; 14354 case 'y': // crrc 14355 return std::make_pair(0U, &PPC::CRRCRegClass); 14356 } 14357 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 14358 // An individual CR bit. 14359 return std::make_pair(0U, &PPC::CRBITRCRegClass); 14360 } else if ((Constraint == "wa" || Constraint == "wd" || 14361 Constraint == "wf" || Constraint == "wi") && 14362 Subtarget.hasVSX()) { 14363 return std::make_pair(0U, &PPC::VSRCRegClass); 14364 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) { 14365 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 14366 return std::make_pair(0U, &PPC::VSSRCRegClass); 14367 else 14368 return std::make_pair(0U, &PPC::VSFRCRegClass); 14369 } 14370 14371 std::pair<unsigned, const TargetRegisterClass *> R = 14372 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 14373 14374 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 14375 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 14376 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 14377 // register. 14378 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 14379 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 14380 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 14381 PPC::GPRCRegClass.contains(R.first)) 14382 return std::make_pair(TRI->getMatchingSuperReg(R.first, 14383 PPC::sub_32, &PPC::G8RCRegClass), 14384 &PPC::G8RCRegClass); 14385 14386 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 14387 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 14388 R.first = PPC::CR0; 14389 R.second = &PPC::CRRCRegClass; 14390 } 14391 14392 return R; 14393 } 14394 14395 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 14396 /// vector. If it is invalid, don't add anything to Ops. 14397 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 14398 std::string &Constraint, 14399 std::vector<SDValue>&Ops, 14400 SelectionDAG &DAG) const { 14401 SDValue Result; 14402 14403 // Only support length 1 constraints. 14404 if (Constraint.length() > 1) return; 14405 14406 char Letter = Constraint[0]; 14407 switch (Letter) { 14408 default: break; 14409 case 'I': 14410 case 'J': 14411 case 'K': 14412 case 'L': 14413 case 'M': 14414 case 'N': 14415 case 'O': 14416 case 'P': { 14417 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 14418 if (!CST) return; // Must be an immediate to match. 14419 SDLoc dl(Op); 14420 int64_t Value = CST->getSExtValue(); 14421 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 14422 // numbers are printed as such. 14423 switch (Letter) { 14424 default: llvm_unreachable("Unknown constraint letter!"); 14425 case 'I': // "I" is a signed 16-bit constant. 14426 if (isInt<16>(Value)) 14427 Result = DAG.getTargetConstant(Value, dl, TCVT); 14428 break; 14429 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 14430 if (isShiftedUInt<16, 16>(Value)) 14431 Result = DAG.getTargetConstant(Value, dl, TCVT); 14432 break; 14433 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 14434 if (isShiftedInt<16, 16>(Value)) 14435 Result = DAG.getTargetConstant(Value, dl, TCVT); 14436 break; 14437 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 14438 if (isUInt<16>(Value)) 14439 Result = DAG.getTargetConstant(Value, dl, TCVT); 14440 break; 14441 case 'M': // "M" is a constant that is greater than 31. 14442 if (Value > 31) 14443 Result = DAG.getTargetConstant(Value, dl, TCVT); 14444 break; 14445 case 'N': // "N" is a positive constant that is an exact power of two. 14446 if (Value > 0 && isPowerOf2_64(Value)) 14447 Result = DAG.getTargetConstant(Value, dl, TCVT); 14448 break; 14449 case 'O': // "O" is the constant zero. 14450 if (Value == 0) 14451 Result = DAG.getTargetConstant(Value, dl, TCVT); 14452 break; 14453 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 14454 if (isInt<16>(-Value)) 14455 Result = DAG.getTargetConstant(Value, dl, TCVT); 14456 break; 14457 } 14458 break; 14459 } 14460 } 14461 14462 if (Result.getNode()) { 14463 Ops.push_back(Result); 14464 return; 14465 } 14466 14467 // Handle standard constraint letters. 14468 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 14469 } 14470 14471 // isLegalAddressingMode - Return true if the addressing mode represented 14472 // by AM is legal for this target, for a load/store of the specified type. 14473 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 14474 const AddrMode &AM, Type *Ty, 14475 unsigned AS, Instruction *I) const { 14476 // PPC does not allow r+i addressing modes for vectors! 14477 if (Ty->isVectorTy() && AM.BaseOffs != 0) 14478 return false; 14479 14480 // PPC allows a sign-extended 16-bit immediate field. 14481 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 14482 return false; 14483 14484 // No global is ever allowed as a base. 14485 if (AM.BaseGV) 14486 return false; 14487 14488 // PPC only support r+r, 14489 switch (AM.Scale) { 14490 case 0: // "r+i" or just "i", depending on HasBaseReg. 14491 break; 14492 case 1: 14493 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 14494 return false; 14495 // Otherwise we have r+r or r+i. 14496 break; 14497 case 2: 14498 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 14499 return false; 14500 // Allow 2*r as r+r. 14501 break; 14502 default: 14503 // No other scales are supported. 14504 return false; 14505 } 14506 14507 return true; 14508 } 14509 14510 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 14511 SelectionDAG &DAG) const { 14512 MachineFunction &MF = DAG.getMachineFunction(); 14513 MachineFrameInfo &MFI = MF.getFrameInfo(); 14514 MFI.setReturnAddressIsTaken(true); 14515 14516 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 14517 return SDValue(); 14518 14519 SDLoc dl(Op); 14520 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14521 14522 // Make sure the function does not optimize away the store of the RA to 14523 // the stack. 14524 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 14525 FuncInfo->setLRStoreRequired(); 14526 bool isPPC64 = Subtarget.isPPC64(); 14527 auto PtrVT = getPointerTy(MF.getDataLayout()); 14528 14529 if (Depth > 0) { 14530 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 14531 SDValue Offset = 14532 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 14533 isPPC64 ? MVT::i64 : MVT::i32); 14534 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 14535 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 14536 MachinePointerInfo()); 14537 } 14538 14539 // Just load the return address off the stack. 14540 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 14541 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 14542 MachinePointerInfo()); 14543 } 14544 14545 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 14546 SelectionDAG &DAG) const { 14547 SDLoc dl(Op); 14548 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14549 14550 MachineFunction &MF = DAG.getMachineFunction(); 14551 MachineFrameInfo &MFI = MF.getFrameInfo(); 14552 MFI.setFrameAddressIsTaken(true); 14553 14554 EVT PtrVT = getPointerTy(MF.getDataLayout()); 14555 bool isPPC64 = PtrVT == MVT::i64; 14556 14557 // Naked functions never have a frame pointer, and so we use r1. For all 14558 // other functions, this decision must be delayed until during PEI. 14559 unsigned FrameReg; 14560 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 14561 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 14562 else 14563 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 14564 14565 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 14566 PtrVT); 14567 while (Depth--) 14568 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 14569 FrameAddr, MachinePointerInfo()); 14570 return FrameAddr; 14571 } 14572 14573 // FIXME? Maybe this could be a TableGen attribute on some registers and 14574 // this table could be generated automatically from RegInfo. 14575 Register PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 14576 const MachineFunction &MF) const { 14577 bool isPPC64 = Subtarget.isPPC64(); 14578 bool IsDarwinABI = Subtarget.isDarwinABI(); 14579 14580 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 14581 (!isPPC64 && VT != MVT::i32)) 14582 report_fatal_error("Invalid register global variable type"); 14583 14584 bool is64Bit = isPPC64 && VT == MVT::i64; 14585 Register Reg = StringSwitch<Register>(RegName) 14586 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 14587 .Case("r2", (IsDarwinABI || isPPC64) ? Register() : PPC::R2) 14588 .Case("r13", (!isPPC64 && IsDarwinABI) ? Register() : 14589 (is64Bit ? PPC::X13 : PPC::R13)) 14590 .Default(Register()); 14591 14592 if (Reg) 14593 return Reg; 14594 report_fatal_error("Invalid register name global variable"); 14595 } 14596 14597 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const { 14598 // 32-bit SVR4 ABI access everything as got-indirect. 14599 if (Subtarget.is32BitELFABI()) 14600 return true; 14601 14602 // AIX accesses everything indirectly through the TOC, which is similar to 14603 // the GOT. 14604 if (Subtarget.isAIXABI()) 14605 return true; 14606 14607 CodeModel::Model CModel = getTargetMachine().getCodeModel(); 14608 // If it is small or large code model, module locals are accessed 14609 // indirectly by loading their address from .toc/.got. 14610 if (CModel == CodeModel::Small || CModel == CodeModel::Large) 14611 return true; 14612 14613 // JumpTable and BlockAddress are accessed as got-indirect. 14614 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA)) 14615 return true; 14616 14617 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) 14618 return Subtarget.isGVIndirectSymbol(G->getGlobal()); 14619 14620 return false; 14621 } 14622 14623 bool 14624 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 14625 // The PowerPC target isn't yet aware of offsets. 14626 return false; 14627 } 14628 14629 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 14630 const CallInst &I, 14631 MachineFunction &MF, 14632 unsigned Intrinsic) const { 14633 switch (Intrinsic) { 14634 case Intrinsic::ppc_qpx_qvlfd: 14635 case Intrinsic::ppc_qpx_qvlfs: 14636 case Intrinsic::ppc_qpx_qvlfcd: 14637 case Intrinsic::ppc_qpx_qvlfcs: 14638 case Intrinsic::ppc_qpx_qvlfiwa: 14639 case Intrinsic::ppc_qpx_qvlfiwz: 14640 case Intrinsic::ppc_altivec_lvx: 14641 case Intrinsic::ppc_altivec_lvxl: 14642 case Intrinsic::ppc_altivec_lvebx: 14643 case Intrinsic::ppc_altivec_lvehx: 14644 case Intrinsic::ppc_altivec_lvewx: 14645 case Intrinsic::ppc_vsx_lxvd2x: 14646 case Intrinsic::ppc_vsx_lxvw4x: { 14647 EVT VT; 14648 switch (Intrinsic) { 14649 case Intrinsic::ppc_altivec_lvebx: 14650 VT = MVT::i8; 14651 break; 14652 case Intrinsic::ppc_altivec_lvehx: 14653 VT = MVT::i16; 14654 break; 14655 case Intrinsic::ppc_altivec_lvewx: 14656 VT = MVT::i32; 14657 break; 14658 case Intrinsic::ppc_vsx_lxvd2x: 14659 VT = MVT::v2f64; 14660 break; 14661 case Intrinsic::ppc_qpx_qvlfd: 14662 VT = MVT::v4f64; 14663 break; 14664 case Intrinsic::ppc_qpx_qvlfs: 14665 VT = MVT::v4f32; 14666 break; 14667 case Intrinsic::ppc_qpx_qvlfcd: 14668 VT = MVT::v2f64; 14669 break; 14670 case Intrinsic::ppc_qpx_qvlfcs: 14671 VT = MVT::v2f32; 14672 break; 14673 default: 14674 VT = MVT::v4i32; 14675 break; 14676 } 14677 14678 Info.opc = ISD::INTRINSIC_W_CHAIN; 14679 Info.memVT = VT; 14680 Info.ptrVal = I.getArgOperand(0); 14681 Info.offset = -VT.getStoreSize()+1; 14682 Info.size = 2*VT.getStoreSize()-1; 14683 Info.align = Align::None(); 14684 Info.flags = MachineMemOperand::MOLoad; 14685 return true; 14686 } 14687 case Intrinsic::ppc_qpx_qvlfda: 14688 case Intrinsic::ppc_qpx_qvlfsa: 14689 case Intrinsic::ppc_qpx_qvlfcda: 14690 case Intrinsic::ppc_qpx_qvlfcsa: 14691 case Intrinsic::ppc_qpx_qvlfiwaa: 14692 case Intrinsic::ppc_qpx_qvlfiwza: { 14693 EVT VT; 14694 switch (Intrinsic) { 14695 case Intrinsic::ppc_qpx_qvlfda: 14696 VT = MVT::v4f64; 14697 break; 14698 case Intrinsic::ppc_qpx_qvlfsa: 14699 VT = MVT::v4f32; 14700 break; 14701 case Intrinsic::ppc_qpx_qvlfcda: 14702 VT = MVT::v2f64; 14703 break; 14704 case Intrinsic::ppc_qpx_qvlfcsa: 14705 VT = MVT::v2f32; 14706 break; 14707 default: 14708 VT = MVT::v4i32; 14709 break; 14710 } 14711 14712 Info.opc = ISD::INTRINSIC_W_CHAIN; 14713 Info.memVT = VT; 14714 Info.ptrVal = I.getArgOperand(0); 14715 Info.offset = 0; 14716 Info.size = VT.getStoreSize(); 14717 Info.align = Align::None(); 14718 Info.flags = MachineMemOperand::MOLoad; 14719 return true; 14720 } 14721 case Intrinsic::ppc_qpx_qvstfd: 14722 case Intrinsic::ppc_qpx_qvstfs: 14723 case Intrinsic::ppc_qpx_qvstfcd: 14724 case Intrinsic::ppc_qpx_qvstfcs: 14725 case Intrinsic::ppc_qpx_qvstfiw: 14726 case Intrinsic::ppc_altivec_stvx: 14727 case Intrinsic::ppc_altivec_stvxl: 14728 case Intrinsic::ppc_altivec_stvebx: 14729 case Intrinsic::ppc_altivec_stvehx: 14730 case Intrinsic::ppc_altivec_stvewx: 14731 case Intrinsic::ppc_vsx_stxvd2x: 14732 case Intrinsic::ppc_vsx_stxvw4x: { 14733 EVT VT; 14734 switch (Intrinsic) { 14735 case Intrinsic::ppc_altivec_stvebx: 14736 VT = MVT::i8; 14737 break; 14738 case Intrinsic::ppc_altivec_stvehx: 14739 VT = MVT::i16; 14740 break; 14741 case Intrinsic::ppc_altivec_stvewx: 14742 VT = MVT::i32; 14743 break; 14744 case Intrinsic::ppc_vsx_stxvd2x: 14745 VT = MVT::v2f64; 14746 break; 14747 case Intrinsic::ppc_qpx_qvstfd: 14748 VT = MVT::v4f64; 14749 break; 14750 case Intrinsic::ppc_qpx_qvstfs: 14751 VT = MVT::v4f32; 14752 break; 14753 case Intrinsic::ppc_qpx_qvstfcd: 14754 VT = MVT::v2f64; 14755 break; 14756 case Intrinsic::ppc_qpx_qvstfcs: 14757 VT = MVT::v2f32; 14758 break; 14759 default: 14760 VT = MVT::v4i32; 14761 break; 14762 } 14763 14764 Info.opc = ISD::INTRINSIC_VOID; 14765 Info.memVT = VT; 14766 Info.ptrVal = I.getArgOperand(1); 14767 Info.offset = -VT.getStoreSize()+1; 14768 Info.size = 2*VT.getStoreSize()-1; 14769 Info.align = Align::None(); 14770 Info.flags = MachineMemOperand::MOStore; 14771 return true; 14772 } 14773 case Intrinsic::ppc_qpx_qvstfda: 14774 case Intrinsic::ppc_qpx_qvstfsa: 14775 case Intrinsic::ppc_qpx_qvstfcda: 14776 case Intrinsic::ppc_qpx_qvstfcsa: 14777 case Intrinsic::ppc_qpx_qvstfiwa: { 14778 EVT VT; 14779 switch (Intrinsic) { 14780 case Intrinsic::ppc_qpx_qvstfda: 14781 VT = MVT::v4f64; 14782 break; 14783 case Intrinsic::ppc_qpx_qvstfsa: 14784 VT = MVT::v4f32; 14785 break; 14786 case Intrinsic::ppc_qpx_qvstfcda: 14787 VT = MVT::v2f64; 14788 break; 14789 case Intrinsic::ppc_qpx_qvstfcsa: 14790 VT = MVT::v2f32; 14791 break; 14792 default: 14793 VT = MVT::v4i32; 14794 break; 14795 } 14796 14797 Info.opc = ISD::INTRINSIC_VOID; 14798 Info.memVT = VT; 14799 Info.ptrVal = I.getArgOperand(1); 14800 Info.offset = 0; 14801 Info.size = VT.getStoreSize(); 14802 Info.align = Align::None(); 14803 Info.flags = MachineMemOperand::MOStore; 14804 return true; 14805 } 14806 default: 14807 break; 14808 } 14809 14810 return false; 14811 } 14812 14813 /// getOptimalMemOpType - Returns the target specific optimal type for load 14814 /// and store operations as a result of memset, memcpy, and memmove 14815 /// lowering. If DstAlign is zero that means it's safe to destination 14816 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 14817 /// means there isn't a need to check it against alignment requirement, 14818 /// probably because the source does not need to be loaded. If 'IsMemset' is 14819 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 14820 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 14821 /// source is constant so it does not need to be loaded. 14822 /// It returns EVT::Other if the type should be determined using generic 14823 /// target-independent logic. 14824 EVT PPCTargetLowering::getOptimalMemOpType( 14825 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, 14826 bool ZeroMemset, bool MemcpyStrSrc, 14827 const AttributeList &FuncAttributes) const { 14828 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 14829 // When expanding a memset, require at least two QPX instructions to cover 14830 // the cost of loading the value to be stored from the constant pool. 14831 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 14832 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 14833 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) { 14834 return MVT::v4f64; 14835 } 14836 14837 // We should use Altivec/VSX loads and stores when available. For unaligned 14838 // addresses, unaligned VSX loads are only fast starting with the P8. 14839 if (Subtarget.hasAltivec() && Size >= 16 && 14840 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 14841 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 14842 return MVT::v4i32; 14843 } 14844 14845 if (Subtarget.isPPC64()) { 14846 return MVT::i64; 14847 } 14848 14849 return MVT::i32; 14850 } 14851 14852 /// Returns true if it is beneficial to convert a load of a constant 14853 /// to just the constant itself. 14854 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 14855 Type *Ty) const { 14856 assert(Ty->isIntegerTy()); 14857 14858 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 14859 return !(BitSize == 0 || BitSize > 64); 14860 } 14861 14862 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 14863 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 14864 return false; 14865 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 14866 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 14867 return NumBits1 == 64 && NumBits2 == 32; 14868 } 14869 14870 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 14871 if (!VT1.isInteger() || !VT2.isInteger()) 14872 return false; 14873 unsigned NumBits1 = VT1.getSizeInBits(); 14874 unsigned NumBits2 = VT2.getSizeInBits(); 14875 return NumBits1 == 64 && NumBits2 == 32; 14876 } 14877 14878 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 14879 // Generally speaking, zexts are not free, but they are free when they can be 14880 // folded with other operations. 14881 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 14882 EVT MemVT = LD->getMemoryVT(); 14883 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 14884 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 14885 (LD->getExtensionType() == ISD::NON_EXTLOAD || 14886 LD->getExtensionType() == ISD::ZEXTLOAD)) 14887 return true; 14888 } 14889 14890 // FIXME: Add other cases... 14891 // - 32-bit shifts with a zext to i64 14892 // - zext after ctlz, bswap, etc. 14893 // - zext after and by a constant mask 14894 14895 return TargetLowering::isZExtFree(Val, VT2); 14896 } 14897 14898 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 14899 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 14900 "invalid fpext types"); 14901 // Extending to float128 is not free. 14902 if (DestVT == MVT::f128) 14903 return false; 14904 return true; 14905 } 14906 14907 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 14908 return isInt<16>(Imm) || isUInt<16>(Imm); 14909 } 14910 14911 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 14912 return isInt<16>(Imm) || isUInt<16>(Imm); 14913 } 14914 14915 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 14916 unsigned, 14917 unsigned, 14918 MachineMemOperand::Flags, 14919 bool *Fast) const { 14920 if (DisablePPCUnaligned) 14921 return false; 14922 14923 // PowerPC supports unaligned memory access for simple non-vector types. 14924 // Although accessing unaligned addresses is not as efficient as accessing 14925 // aligned addresses, it is generally more efficient than manual expansion, 14926 // and generally only traps for software emulation when crossing page 14927 // boundaries. 14928 14929 if (!VT.isSimple()) 14930 return false; 14931 14932 if (VT.getSimpleVT().isVector()) { 14933 if (Subtarget.hasVSX()) { 14934 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 14935 VT != MVT::v4f32 && VT != MVT::v4i32) 14936 return false; 14937 } else { 14938 return false; 14939 } 14940 } 14941 14942 if (VT == MVT::ppcf128) 14943 return false; 14944 14945 if (Fast) 14946 *Fast = true; 14947 14948 return true; 14949 } 14950 14951 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 14952 VT = VT.getScalarType(); 14953 14954 if (!VT.isSimple()) 14955 return false; 14956 14957 switch (VT.getSimpleVT().SimpleTy) { 14958 case MVT::f32: 14959 case MVT::f64: 14960 return true; 14961 case MVT::f128: 14962 return (EnableQuadPrecision && Subtarget.hasP9Vector()); 14963 default: 14964 break; 14965 } 14966 14967 return false; 14968 } 14969 14970 const MCPhysReg * 14971 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 14972 // LR is a callee-save register, but we must treat it as clobbered by any call 14973 // site. Hence we include LR in the scratch registers, which are in turn added 14974 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 14975 // to CTR, which is used by any indirect call. 14976 static const MCPhysReg ScratchRegs[] = { 14977 PPC::X12, PPC::LR8, PPC::CTR8, 0 14978 }; 14979 14980 return ScratchRegs; 14981 } 14982 14983 unsigned PPCTargetLowering::getExceptionPointerRegister( 14984 const Constant *PersonalityFn) const { 14985 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 14986 } 14987 14988 unsigned PPCTargetLowering::getExceptionSelectorRegister( 14989 const Constant *PersonalityFn) const { 14990 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 14991 } 14992 14993 bool 14994 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 14995 EVT VT , unsigned DefinedValues) const { 14996 if (VT == MVT::v2i64) 14997 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 14998 14999 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 15000 return true; 15001 15002 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 15003 } 15004 15005 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 15006 if (DisableILPPref || Subtarget.enableMachineScheduler()) 15007 return TargetLowering::getSchedulingPreference(N); 15008 15009 return Sched::ILP; 15010 } 15011 15012 // Create a fast isel object. 15013 FastISel * 15014 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 15015 const TargetLibraryInfo *LibInfo) const { 15016 return PPC::createFastISel(FuncInfo, LibInfo); 15017 } 15018 15019 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 15020 if (Subtarget.isDarwinABI()) return; 15021 if (!Subtarget.isPPC64()) return; 15022 15023 // Update IsSplitCSR in PPCFunctionInfo 15024 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 15025 PFI->setIsSplitCSR(true); 15026 } 15027 15028 void PPCTargetLowering::insertCopiesSplitCSR( 15029 MachineBasicBlock *Entry, 15030 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 15031 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 15032 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 15033 if (!IStart) 15034 return; 15035 15036 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 15037 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 15038 MachineBasicBlock::iterator MBBI = Entry->begin(); 15039 for (const MCPhysReg *I = IStart; *I; ++I) { 15040 const TargetRegisterClass *RC = nullptr; 15041 if (PPC::G8RCRegClass.contains(*I)) 15042 RC = &PPC::G8RCRegClass; 15043 else if (PPC::F8RCRegClass.contains(*I)) 15044 RC = &PPC::F8RCRegClass; 15045 else if (PPC::CRRCRegClass.contains(*I)) 15046 RC = &PPC::CRRCRegClass; 15047 else if (PPC::VRRCRegClass.contains(*I)) 15048 RC = &PPC::VRRCRegClass; 15049 else 15050 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 15051 15052 Register NewVR = MRI->createVirtualRegister(RC); 15053 // Create copy from CSR to a virtual register. 15054 // FIXME: this currently does not emit CFI pseudo-instructions, it works 15055 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 15056 // nounwind. If we want to generalize this later, we may need to emit 15057 // CFI pseudo-instructions. 15058 assert(Entry->getParent()->getFunction().hasFnAttribute( 15059 Attribute::NoUnwind) && 15060 "Function should be nounwind in insertCopiesSplitCSR!"); 15061 Entry->addLiveIn(*I); 15062 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 15063 .addReg(*I); 15064 15065 // Insert the copy-back instructions right before the terminator. 15066 for (auto *Exit : Exits) 15067 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 15068 TII->get(TargetOpcode::COPY), *I) 15069 .addReg(NewVR); 15070 } 15071 } 15072 15073 // Override to enable LOAD_STACK_GUARD lowering on Linux. 15074 bool PPCTargetLowering::useLoadStackGuardNode() const { 15075 if (!Subtarget.isTargetLinux()) 15076 return TargetLowering::useLoadStackGuardNode(); 15077 return true; 15078 } 15079 15080 // Override to disable global variable loading on Linux. 15081 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 15082 if (!Subtarget.isTargetLinux()) 15083 return TargetLowering::insertSSPDeclarations(M); 15084 } 15085 15086 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 15087 bool ForCodeSize) const { 15088 if (!VT.isSimple() || !Subtarget.hasVSX()) 15089 return false; 15090 15091 switch(VT.getSimpleVT().SimpleTy) { 15092 default: 15093 // For FP types that are currently not supported by PPC backend, return 15094 // false. Examples: f16, f80. 15095 return false; 15096 case MVT::f32: 15097 case MVT::f64: 15098 case MVT::ppcf128: 15099 return Imm.isPosZero(); 15100 } 15101 } 15102 15103 // For vector shift operation op, fold 15104 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 15105 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 15106 SelectionDAG &DAG) { 15107 SDValue N0 = N->getOperand(0); 15108 SDValue N1 = N->getOperand(1); 15109 EVT VT = N0.getValueType(); 15110 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 15111 unsigned Opcode = N->getOpcode(); 15112 unsigned TargetOpcode; 15113 15114 switch (Opcode) { 15115 default: 15116 llvm_unreachable("Unexpected shift operation"); 15117 case ISD::SHL: 15118 TargetOpcode = PPCISD::SHL; 15119 break; 15120 case ISD::SRL: 15121 TargetOpcode = PPCISD::SRL; 15122 break; 15123 case ISD::SRA: 15124 TargetOpcode = PPCISD::SRA; 15125 break; 15126 } 15127 15128 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 15129 N1->getOpcode() == ISD::AND) 15130 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 15131 if (Mask->getZExtValue() == OpSizeInBits - 1) 15132 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 15133 15134 return SDValue(); 15135 } 15136 15137 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 15138 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15139 return Value; 15140 15141 SDValue N0 = N->getOperand(0); 15142 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 15143 if (!Subtarget.isISA3_0() || 15144 N0.getOpcode() != ISD::SIGN_EXTEND || 15145 N0.getOperand(0).getValueType() != MVT::i32 || 15146 CN1 == nullptr || N->getValueType(0) != MVT::i64) 15147 return SDValue(); 15148 15149 // We can't save an operation here if the value is already extended, and 15150 // the existing shift is easier to combine. 15151 SDValue ExtsSrc = N0.getOperand(0); 15152 if (ExtsSrc.getOpcode() == ISD::TRUNCATE && 15153 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext) 15154 return SDValue(); 15155 15156 SDLoc DL(N0); 15157 SDValue ShiftBy = SDValue(CN1, 0); 15158 // We want the shift amount to be i32 on the extswli, but the shift could 15159 // have an i64. 15160 if (ShiftBy.getValueType() == MVT::i64) 15161 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32); 15162 15163 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0), 15164 ShiftBy); 15165 } 15166 15167 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 15168 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15169 return Value; 15170 15171 return SDValue(); 15172 } 15173 15174 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 15175 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15176 return Value; 15177 15178 return SDValue(); 15179 } 15180 15181 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1)) 15182 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0)) 15183 // When C is zero, the equation (addi Z, -C) can be simplified to Z 15184 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types 15185 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, 15186 const PPCSubtarget &Subtarget) { 15187 if (!Subtarget.isPPC64()) 15188 return SDValue(); 15189 15190 SDValue LHS = N->getOperand(0); 15191 SDValue RHS = N->getOperand(1); 15192 15193 auto isZextOfCompareWithConstant = [](SDValue Op) { 15194 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() || 15195 Op.getValueType() != MVT::i64) 15196 return false; 15197 15198 SDValue Cmp = Op.getOperand(0); 15199 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() || 15200 Cmp.getOperand(0).getValueType() != MVT::i64) 15201 return false; 15202 15203 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) { 15204 int64_t NegConstant = 0 - Constant->getSExtValue(); 15205 // Due to the limitations of the addi instruction, 15206 // -C is required to be [-32768, 32767]. 15207 return isInt<16>(NegConstant); 15208 } 15209 15210 return false; 15211 }; 15212 15213 bool LHSHasPattern = isZextOfCompareWithConstant(LHS); 15214 bool RHSHasPattern = isZextOfCompareWithConstant(RHS); 15215 15216 // If there is a pattern, canonicalize a zext operand to the RHS. 15217 if (LHSHasPattern && !RHSHasPattern) 15218 std::swap(LHS, RHS); 15219 else if (!LHSHasPattern && !RHSHasPattern) 15220 return SDValue(); 15221 15222 SDLoc DL(N); 15223 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue); 15224 SDValue Cmp = RHS.getOperand(0); 15225 SDValue Z = Cmp.getOperand(0); 15226 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1)); 15227 15228 assert(Constant && "Constant Should not be a null pointer."); 15229 int64_t NegConstant = 0 - Constant->getSExtValue(); 15230 15231 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) { 15232 default: break; 15233 case ISD::SETNE: { 15234 // when C == 0 15235 // --> addze X, (addic Z, -1).carry 15236 // / 15237 // add X, (zext(setne Z, C))-- 15238 // \ when -32768 <= -C <= 32767 && C != 0 15239 // --> addze X, (addic (addi Z, -C), -1).carry 15240 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15241 DAG.getConstant(NegConstant, DL, MVT::i64)); 15242 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15243 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15244 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64)); 15245 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15246 SDValue(Addc.getNode(), 1)); 15247 } 15248 case ISD::SETEQ: { 15249 // when C == 0 15250 // --> addze X, (subfic Z, 0).carry 15251 // / 15252 // add X, (zext(sete Z, C))-- 15253 // \ when -32768 <= -C <= 32767 && C != 0 15254 // --> addze X, (subfic (addi Z, -C), 0).carry 15255 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15256 DAG.getConstant(NegConstant, DL, MVT::i64)); 15257 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15258 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15259 DAG.getConstant(0, DL, MVT::i64), AddOrZ); 15260 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15261 SDValue(Subc.getNode(), 1)); 15262 } 15263 } 15264 15265 return SDValue(); 15266 } 15267 15268 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const { 15269 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget)) 15270 return Value; 15271 15272 return SDValue(); 15273 } 15274 15275 // Detect TRUNCATE operations on bitcasts of float128 values. 15276 // What we are looking for here is the situtation where we extract a subset 15277 // of bits from a 128 bit float. 15278 // This can be of two forms: 15279 // 1) BITCAST of f128 feeding TRUNCATE 15280 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE 15281 // The reason this is required is because we do not have a legal i128 type 15282 // and so we want to prevent having to store the f128 and then reload part 15283 // of it. 15284 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N, 15285 DAGCombinerInfo &DCI) const { 15286 // If we are using CRBits then try that first. 15287 if (Subtarget.useCRBits()) { 15288 // Check if CRBits did anything and return that if it did. 15289 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI)) 15290 return CRTruncValue; 15291 } 15292 15293 SDLoc dl(N); 15294 SDValue Op0 = N->getOperand(0); 15295 15296 // Looking for a truncate of i128 to i64. 15297 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64) 15298 return SDValue(); 15299 15300 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0; 15301 15302 // SRL feeding TRUNCATE. 15303 if (Op0.getOpcode() == ISD::SRL) { 15304 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 15305 // The right shift has to be by 64 bits. 15306 if (!ConstNode || ConstNode->getZExtValue() != 64) 15307 return SDValue(); 15308 15309 // Switch the element number to extract. 15310 EltToExtract = EltToExtract ? 0 : 1; 15311 // Update Op0 past the SRL. 15312 Op0 = Op0.getOperand(0); 15313 } 15314 15315 // BITCAST feeding a TRUNCATE possibly via SRL. 15316 if (Op0.getOpcode() == ISD::BITCAST && 15317 Op0.getValueType() == MVT::i128 && 15318 Op0.getOperand(0).getValueType() == MVT::f128) { 15319 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0)); 15320 return DCI.DAG.getNode( 15321 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast, 15322 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32)); 15323 } 15324 return SDValue(); 15325 } 15326 15327 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const { 15328 SelectionDAG &DAG = DCI.DAG; 15329 15330 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1)); 15331 if (!ConstOpOrElement) 15332 return SDValue(); 15333 15334 // An imul is usually smaller than the alternative sequence for legal type. 15335 if (DAG.getMachineFunction().getFunction().hasMinSize() && 15336 isOperationLegal(ISD::MUL, N->getValueType(0))) 15337 return SDValue(); 15338 15339 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool { 15340 switch (this->Subtarget.getDarwinDirective()) { 15341 default: 15342 // TODO: enhance the condition for subtarget before pwr8 15343 return false; 15344 case PPC::DIR_PWR8: 15345 // type mul add shl 15346 // scalar 4 1 1 15347 // vector 7 2 2 15348 return true; 15349 case PPC::DIR_PWR9: 15350 // type mul add shl 15351 // scalar 5 2 2 15352 // vector 7 2 2 15353 15354 // The cycle RATIO of related operations are showed as a table above. 15355 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both 15356 // scalar and vector type. For 2 instrs patterns, add/sub + shl 15357 // are 4, it is always profitable; but for 3 instrs patterns 15358 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6. 15359 // So we should only do it for vector type. 15360 return IsAddOne && IsNeg ? VT.isVector() : true; 15361 } 15362 }; 15363 15364 EVT VT = N->getValueType(0); 15365 SDLoc DL(N); 15366 15367 const APInt &MulAmt = ConstOpOrElement->getAPIntValue(); 15368 bool IsNeg = MulAmt.isNegative(); 15369 APInt MulAmtAbs = MulAmt.abs(); 15370 15371 if ((MulAmtAbs - 1).isPowerOf2()) { 15372 // (mul x, 2^N + 1) => (add (shl x, N), x) 15373 // (mul x, -(2^N + 1)) => -(add (shl x, N), x) 15374 15375 if (!IsProfitable(IsNeg, true, VT)) 15376 return SDValue(); 15377 15378 SDValue Op0 = N->getOperand(0); 15379 SDValue Op1 = 15380 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15381 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT)); 15382 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); 15383 15384 if (!IsNeg) 15385 return Res; 15386 15387 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); 15388 } else if ((MulAmtAbs + 1).isPowerOf2()) { 15389 // (mul x, 2^N - 1) => (sub (shl x, N), x) 15390 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 15391 15392 if (!IsProfitable(IsNeg, false, VT)) 15393 return SDValue(); 15394 15395 SDValue Op0 = N->getOperand(0); 15396 SDValue Op1 = 15397 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15398 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT)); 15399 15400 if (!IsNeg) 15401 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0); 15402 else 15403 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); 15404 15405 } else { 15406 return SDValue(); 15407 } 15408 } 15409 15410 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 15411 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 15412 if (!Subtarget.is64BitELFABI()) 15413 return false; 15414 15415 // If not a tail call then no need to proceed. 15416 if (!CI->isTailCall()) 15417 return false; 15418 15419 // If tail calls are disabled for the caller then we are done. 15420 const Function *Caller = CI->getParent()->getParent(); 15421 auto Attr = Caller->getFnAttribute("disable-tail-calls"); 15422 if (Attr.getValueAsString() == "true") 15423 return false; 15424 15425 // If sibling calls have been disabled and tail-calls aren't guaranteed 15426 // there is no reason to duplicate. 15427 auto &TM = getTargetMachine(); 15428 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 15429 return false; 15430 15431 // Can't tail call a function called indirectly, or if it has variadic args. 15432 const Function *Callee = CI->getCalledFunction(); 15433 if (!Callee || Callee->isVarArg()) 15434 return false; 15435 15436 // Make sure the callee and caller calling conventions are eligible for tco. 15437 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 15438 CI->getCallingConv())) 15439 return false; 15440 15441 // If the function is local then we have a good chance at tail-calling it 15442 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 15443 } 15444 15445 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 15446 if (!Subtarget.hasVSX()) 15447 return false; 15448 if (Subtarget.hasP9Vector() && VT == MVT::f128) 15449 return true; 15450 return VT == MVT::f32 || VT == MVT::f64 || 15451 VT == MVT::v4f32 || VT == MVT::v2f64; 15452 } 15453 15454 bool PPCTargetLowering:: 15455 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 15456 const Value *Mask = AndI.getOperand(1); 15457 // If the mask is suitable for andi. or andis. we should sink the and. 15458 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 15459 // Can't handle constants wider than 64-bits. 15460 if (CI->getBitWidth() > 64) 15461 return false; 15462 int64_t ConstVal = CI->getZExtValue(); 15463 return isUInt<16>(ConstVal) || 15464 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 15465 } 15466 15467 // For non-constant masks, we can always use the record-form and. 15468 return true; 15469 } 15470 15471 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0) 15472 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0) 15473 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0) 15474 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0) 15475 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32 15476 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const { 15477 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here"); 15478 assert(Subtarget.hasP9Altivec() && 15479 "Only combine this when P9 altivec supported!"); 15480 EVT VT = N->getValueType(0); 15481 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 15482 return SDValue(); 15483 15484 SelectionDAG &DAG = DCI.DAG; 15485 SDLoc dl(N); 15486 if (N->getOperand(0).getOpcode() == ISD::SUB) { 15487 // Even for signed integers, if it's known to be positive (as signed 15488 // integer) due to zero-extended inputs. 15489 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode(); 15490 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode(); 15491 if ((SubOpcd0 == ISD::ZERO_EXTEND || 15492 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) && 15493 (SubOpcd1 == ISD::ZERO_EXTEND || 15494 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) { 15495 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 15496 N->getOperand(0)->getOperand(0), 15497 N->getOperand(0)->getOperand(1), 15498 DAG.getTargetConstant(0, dl, MVT::i32)); 15499 } 15500 15501 // For type v4i32, it can be optimized with xvnegsp + vabsduw 15502 if (N->getOperand(0).getValueType() == MVT::v4i32 && 15503 N->getOperand(0).hasOneUse()) { 15504 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 15505 N->getOperand(0)->getOperand(0), 15506 N->getOperand(0)->getOperand(1), 15507 DAG.getTargetConstant(1, dl, MVT::i32)); 15508 } 15509 } 15510 15511 return SDValue(); 15512 } 15513 15514 // For type v4i32/v8ii16/v16i8, transform 15515 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b) 15516 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b) 15517 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b) 15518 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b) 15519 SDValue PPCTargetLowering::combineVSelect(SDNode *N, 15520 DAGCombinerInfo &DCI) const { 15521 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here"); 15522 assert(Subtarget.hasP9Altivec() && 15523 "Only combine this when P9 altivec supported!"); 15524 15525 SelectionDAG &DAG = DCI.DAG; 15526 SDLoc dl(N); 15527 SDValue Cond = N->getOperand(0); 15528 SDValue TrueOpnd = N->getOperand(1); 15529 SDValue FalseOpnd = N->getOperand(2); 15530 EVT VT = N->getOperand(1).getValueType(); 15531 15532 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB || 15533 FalseOpnd.getOpcode() != ISD::SUB) 15534 return SDValue(); 15535 15536 // ABSD only available for type v4i32/v8i16/v16i8 15537 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 15538 return SDValue(); 15539 15540 // At least to save one more dependent computation 15541 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse())) 15542 return SDValue(); 15543 15544 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15545 15546 // Can only handle unsigned comparison here 15547 switch (CC) { 15548 default: 15549 return SDValue(); 15550 case ISD::SETUGT: 15551 case ISD::SETUGE: 15552 break; 15553 case ISD::SETULT: 15554 case ISD::SETULE: 15555 std::swap(TrueOpnd, FalseOpnd); 15556 break; 15557 } 15558 15559 SDValue CmpOpnd1 = Cond.getOperand(0); 15560 SDValue CmpOpnd2 = Cond.getOperand(1); 15561 15562 // SETCC CmpOpnd1 CmpOpnd2 cond 15563 // TrueOpnd = CmpOpnd1 - CmpOpnd2 15564 // FalseOpnd = CmpOpnd2 - CmpOpnd1 15565 if (TrueOpnd.getOperand(0) == CmpOpnd1 && 15566 TrueOpnd.getOperand(1) == CmpOpnd2 && 15567 FalseOpnd.getOperand(0) == CmpOpnd2 && 15568 FalseOpnd.getOperand(1) == CmpOpnd1) { 15569 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(), 15570 CmpOpnd1, CmpOpnd2, 15571 DAG.getTargetConstant(0, dl, MVT::i32)); 15572 } 15573 15574 return SDValue(); 15575 } 15576