1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCCCState.h" 18 #include "PPCCallingConv.h" 19 #include "PPCFrameLowering.h" 20 #include "PPCInstrInfo.h" 21 #include "PPCMachineFunctionInfo.h" 22 #include "PPCPerfectShuffle.h" 23 #include "PPCRegisterInfo.h" 24 #include "PPCSubtarget.h" 25 #include "PPCTargetMachine.h" 26 #include "llvm/ADT/APFloat.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallPtrSet.h" 33 #include "llvm/ADT/SmallSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/ISDOpcodes.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFrameInfo.h" 42 #include "llvm/CodeGen/MachineFunction.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineJumpTableInfo.h" 46 #include "llvm/CodeGen/MachineLoopInfo.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/MachineValueType.h" 51 #include "llvm/CodeGen/RuntimeLibcalls.h" 52 #include "llvm/CodeGen/SelectionDAG.h" 53 #include "llvm/CodeGen/SelectionDAGNodes.h" 54 #include "llvm/CodeGen/TargetInstrInfo.h" 55 #include "llvm/CodeGen/TargetLowering.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/CallSite.h" 59 #include "llvm/IR/CallingConv.h" 60 #include "llvm/IR/Constant.h" 61 #include "llvm/IR/Constants.h" 62 #include "llvm/IR/DataLayout.h" 63 #include "llvm/IR/DebugLoc.h" 64 #include "llvm/IR/DerivedTypes.h" 65 #include "llvm/IR/Function.h" 66 #include "llvm/IR/GlobalValue.h" 67 #include "llvm/IR/IRBuilder.h" 68 #include "llvm/IR/Instructions.h" 69 #include "llvm/IR/Intrinsics.h" 70 #include "llvm/IR/Module.h" 71 #include "llvm/IR/Type.h" 72 #include "llvm/IR/Use.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/MC/MCExpr.h" 75 #include "llvm/MC/MCRegisterInfo.h" 76 #include "llvm/Support/AtomicOrdering.h" 77 #include "llvm/Support/BranchProbability.h" 78 #include "llvm/Support/Casting.h" 79 #include "llvm/Support/CodeGen.h" 80 #include "llvm/Support/CommandLine.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/KnownBits.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include "llvm/Target/TargetMachine.h" 89 #include "llvm/Target/TargetOptions.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 100 #define DEBUG_TYPE "ppc-lowering" 101 102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 104 105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 107 108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 110 111 static cl::opt<bool> DisableSCO("disable-ppc-sco", 112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 113 114 STATISTIC(NumTailCalls, "Number of tail calls"); 115 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 116 117 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 118 119 // FIXME: Remove this once the bug has been fixed! 120 extern cl::opt<bool> ANDIGlueBug; 121 122 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 123 const PPCSubtarget &STI) 124 : TargetLowering(TM), Subtarget(STI) { 125 // Use _setjmp/_longjmp instead of setjmp/longjmp. 126 setUseUnderscoreSetJmp(true); 127 setUseUnderscoreLongJmp(true); 128 129 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 130 // arguments are at least 4/8 bytes aligned. 131 bool isPPC64 = Subtarget.isPPC64(); 132 setMinStackArgumentAlignment(isPPC64 ? 8:4); 133 134 // Set up the register classes. 135 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 136 if (!useSoftFloat()) { 137 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 138 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 139 } 140 141 // Match BITREVERSE to customized fast code sequence in the td file. 142 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 143 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 144 145 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 146 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 147 148 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 149 for (MVT VT : MVT::integer_valuetypes()) { 150 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 151 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 152 } 153 154 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 155 156 // PowerPC has pre-inc load and store's. 157 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 158 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 159 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 160 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 161 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 162 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 163 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 164 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 165 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 166 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 167 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 168 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 169 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 170 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 171 172 if (Subtarget.useCRBits()) { 173 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 174 175 if (isPPC64 || Subtarget.hasFPCVT()) { 176 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 177 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 178 isPPC64 ? MVT::i64 : MVT::i32); 179 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 180 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 181 isPPC64 ? MVT::i64 : MVT::i32); 182 } else { 183 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 184 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 185 } 186 187 // PowerPC does not support direct load/store of condition registers. 188 setOperationAction(ISD::LOAD, MVT::i1, Custom); 189 setOperationAction(ISD::STORE, MVT::i1, Custom); 190 191 // FIXME: Remove this once the ANDI glue bug is fixed: 192 if (ANDIGlueBug) 193 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 194 195 for (MVT VT : MVT::integer_valuetypes()) { 196 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 197 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 198 setTruncStoreAction(VT, MVT::i1, Expand); 199 } 200 201 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 202 } 203 204 // This is used in the ppcf128->int sequence. Note it has different semantics 205 // from FP_ROUND: that rounds to nearest, this rounds to zero. 206 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 207 208 // We do not currently implement these libm ops for PowerPC. 209 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 210 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 211 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 212 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 213 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 214 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 215 216 // PowerPC has no SREM/UREM instructions unless we are on P9 217 // On P9 we may use a hardware instruction to compute the remainder. 218 // The instructions are not legalized directly because in the cases where the 219 // result of both the remainder and the division is required it is more 220 // efficient to compute the remainder from the result of the division rather 221 // than use the remainder instruction. 222 if (Subtarget.isISA3_0()) { 223 setOperationAction(ISD::SREM, MVT::i32, Custom); 224 setOperationAction(ISD::UREM, MVT::i32, Custom); 225 setOperationAction(ISD::SREM, MVT::i64, Custom); 226 setOperationAction(ISD::UREM, MVT::i64, Custom); 227 } else { 228 setOperationAction(ISD::SREM, MVT::i32, Expand); 229 setOperationAction(ISD::UREM, MVT::i32, Expand); 230 setOperationAction(ISD::SREM, MVT::i64, Expand); 231 setOperationAction(ISD::UREM, MVT::i64, Expand); 232 } 233 234 if (Subtarget.hasP9Vector()) { 235 setOperationAction(ISD::ABS, MVT::v4i32, Legal); 236 setOperationAction(ISD::ABS, MVT::v8i16, Legal); 237 setOperationAction(ISD::ABS, MVT::v16i8, Legal); 238 } 239 240 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 241 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 242 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 243 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 244 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 245 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 246 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 247 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 248 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 249 250 // We don't support sin/cos/sqrt/fmod/pow 251 setOperationAction(ISD::FSIN , MVT::f64, Expand); 252 setOperationAction(ISD::FCOS , MVT::f64, Expand); 253 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 254 setOperationAction(ISD::FREM , MVT::f64, Expand); 255 setOperationAction(ISD::FPOW , MVT::f64, Expand); 256 setOperationAction(ISD::FMA , MVT::f64, Legal); 257 setOperationAction(ISD::FSIN , MVT::f32, Expand); 258 setOperationAction(ISD::FCOS , MVT::f32, Expand); 259 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 260 setOperationAction(ISD::FREM , MVT::f32, Expand); 261 setOperationAction(ISD::FPOW , MVT::f32, Expand); 262 setOperationAction(ISD::FMA , MVT::f32, Legal); 263 264 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 265 266 // If we're enabling GP optimizations, use hardware square root 267 if (!Subtarget.hasFSQRT() && 268 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 269 Subtarget.hasFRE())) 270 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 271 272 if (!Subtarget.hasFSQRT() && 273 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 274 Subtarget.hasFRES())) 275 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 276 277 if (Subtarget.hasFCPSGN()) { 278 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 279 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 280 } else { 281 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 282 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 283 } 284 285 if (Subtarget.hasFPRND()) { 286 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 287 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 288 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 289 setOperationAction(ISD::FROUND, MVT::f64, Legal); 290 291 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 292 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 293 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 294 setOperationAction(ISD::FROUND, MVT::f32, Legal); 295 } 296 297 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 298 // to speed up scalar BSWAP64. 299 // CTPOP or CTTZ were introduced in P8/P9 respectivelly 300 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 301 if (Subtarget.isISA3_0()) { 302 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 303 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 304 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 305 } else { 306 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 307 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 308 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 309 } 310 311 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 312 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 313 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 314 } else { 315 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 316 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 317 } 318 319 // PowerPC does not have ROTR 320 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 321 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 322 323 if (!Subtarget.useCRBits()) { 324 // PowerPC does not have Select 325 setOperationAction(ISD::SELECT, MVT::i32, Expand); 326 setOperationAction(ISD::SELECT, MVT::i64, Expand); 327 setOperationAction(ISD::SELECT, MVT::f32, Expand); 328 setOperationAction(ISD::SELECT, MVT::f64, Expand); 329 } 330 331 // PowerPC wants to turn select_cc of FP into fsel when possible. 332 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 333 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 334 335 // PowerPC wants to optimize integer setcc a bit 336 if (!Subtarget.useCRBits()) 337 setOperationAction(ISD::SETCC, MVT::i32, Custom); 338 339 // PowerPC does not have BRCOND which requires SetCC 340 if (!Subtarget.useCRBits()) 341 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 342 343 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 344 345 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 346 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 347 348 // PowerPC does not have [U|S]INT_TO_FP 349 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 350 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 351 352 if (Subtarget.hasDirectMove() && isPPC64) { 353 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 354 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 355 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 356 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 357 } else { 358 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 359 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 360 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 361 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 362 } 363 364 // We cannot sextinreg(i1). Expand to shifts. 365 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 366 367 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 368 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 369 // support continuation, user-level threading, and etc.. As a result, no 370 // other SjLj exception interfaces are implemented and please don't build 371 // your own exception handling based on them. 372 // LLVM/Clang supports zero-cost DWARF exception handling. 373 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 374 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 375 376 // We want to legalize GlobalAddress and ConstantPool nodes into the 377 // appropriate instructions to materialize the address. 378 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 379 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 380 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 381 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 382 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 383 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 384 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 385 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 386 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 387 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 388 389 // TRAP is legal. 390 setOperationAction(ISD::TRAP, MVT::Other, Legal); 391 392 // TRAMPOLINE is custom lowered. 393 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 394 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 395 396 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 397 setOperationAction(ISD::VASTART , MVT::Other, Custom); 398 399 if (Subtarget.isSVR4ABI()) { 400 if (isPPC64) { 401 // VAARG always uses double-word chunks, so promote anything smaller. 402 setOperationAction(ISD::VAARG, MVT::i1, Promote); 403 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 404 setOperationAction(ISD::VAARG, MVT::i8, Promote); 405 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 406 setOperationAction(ISD::VAARG, MVT::i16, Promote); 407 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 408 setOperationAction(ISD::VAARG, MVT::i32, Promote); 409 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 410 setOperationAction(ISD::VAARG, MVT::Other, Expand); 411 } else { 412 // VAARG is custom lowered with the 32-bit SVR4 ABI. 413 setOperationAction(ISD::VAARG, MVT::Other, Custom); 414 setOperationAction(ISD::VAARG, MVT::i64, Custom); 415 } 416 } else 417 setOperationAction(ISD::VAARG, MVT::Other, Expand); 418 419 if (Subtarget.isSVR4ABI() && !isPPC64) 420 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 421 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 422 else 423 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 424 425 // Use the default implementation. 426 setOperationAction(ISD::VAEND , MVT::Other, Expand); 427 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 428 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 429 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 430 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 431 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 432 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 433 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 434 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 435 436 // We want to custom lower some of our intrinsics. 437 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 438 439 // To handle counter-based loop conditions. 440 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 441 442 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 443 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 444 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 445 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 446 447 // Comparisons that require checking two conditions. 448 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 449 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 450 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 451 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 452 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 453 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 454 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 455 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 456 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 457 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 458 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 459 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 460 461 if (Subtarget.has64BitSupport()) { 462 // They also have instructions for converting between i64 and fp. 463 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 464 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 465 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 466 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 467 // This is just the low 32 bits of a (signed) fp->i64 conversion. 468 // We cannot do this with Promote because i64 is not a legal type. 469 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 470 471 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 472 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 473 } else { 474 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 475 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 476 } 477 478 // With the instructions enabled under FPCVT, we can do everything. 479 if (Subtarget.hasFPCVT()) { 480 if (Subtarget.has64BitSupport()) { 481 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 482 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 483 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 484 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 485 } 486 487 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 488 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 489 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 490 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 491 } 492 493 if (Subtarget.use64BitRegs()) { 494 // 64-bit PowerPC implementations can support i64 types directly 495 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 496 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 497 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 498 // 64-bit PowerPC wants to expand i128 shifts itself. 499 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 500 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 501 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 502 } else { 503 // 32-bit PowerPC wants to expand i64 shifts itself. 504 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 505 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 506 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 507 } 508 509 if (Subtarget.hasAltivec()) { 510 // First set operation action for all vector types to expand. Then we 511 // will selectively turn on ones that can be effectively codegen'd. 512 for (MVT VT : MVT::vector_valuetypes()) { 513 // add/sub are legal for all supported vector VT's. 514 setOperationAction(ISD::ADD, VT, Legal); 515 setOperationAction(ISD::SUB, VT, Legal); 516 517 // Vector instructions introduced in P8 518 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 519 setOperationAction(ISD::CTPOP, VT, Legal); 520 setOperationAction(ISD::CTLZ, VT, Legal); 521 } 522 else { 523 setOperationAction(ISD::CTPOP, VT, Expand); 524 setOperationAction(ISD::CTLZ, VT, Expand); 525 } 526 527 // Vector instructions introduced in P9 528 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 529 setOperationAction(ISD::CTTZ, VT, Legal); 530 else 531 setOperationAction(ISD::CTTZ, VT, Expand); 532 533 // We promote all shuffles to v16i8. 534 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 535 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 536 537 // We promote all non-typed operations to v4i32. 538 setOperationAction(ISD::AND , VT, Promote); 539 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 540 setOperationAction(ISD::OR , VT, Promote); 541 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 542 setOperationAction(ISD::XOR , VT, Promote); 543 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 544 setOperationAction(ISD::LOAD , VT, Promote); 545 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 546 setOperationAction(ISD::SELECT, VT, Promote); 547 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 548 setOperationAction(ISD::SELECT_CC, VT, Promote); 549 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 550 setOperationAction(ISD::STORE, VT, Promote); 551 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 552 553 // No other operations are legal. 554 setOperationAction(ISD::MUL , VT, Expand); 555 setOperationAction(ISD::SDIV, VT, Expand); 556 setOperationAction(ISD::SREM, VT, Expand); 557 setOperationAction(ISD::UDIV, VT, Expand); 558 setOperationAction(ISD::UREM, VT, Expand); 559 setOperationAction(ISD::FDIV, VT, Expand); 560 setOperationAction(ISD::FREM, VT, Expand); 561 setOperationAction(ISD::FNEG, VT, Expand); 562 setOperationAction(ISD::FSQRT, VT, Expand); 563 setOperationAction(ISD::FLOG, VT, Expand); 564 setOperationAction(ISD::FLOG10, VT, Expand); 565 setOperationAction(ISD::FLOG2, VT, Expand); 566 setOperationAction(ISD::FEXP, VT, Expand); 567 setOperationAction(ISD::FEXP2, VT, Expand); 568 setOperationAction(ISD::FSIN, VT, Expand); 569 setOperationAction(ISD::FCOS, VT, Expand); 570 setOperationAction(ISD::FABS, VT, Expand); 571 setOperationAction(ISD::FFLOOR, VT, Expand); 572 setOperationAction(ISD::FCEIL, VT, Expand); 573 setOperationAction(ISD::FTRUNC, VT, Expand); 574 setOperationAction(ISD::FRINT, VT, Expand); 575 setOperationAction(ISD::FNEARBYINT, VT, Expand); 576 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 577 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 578 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 579 setOperationAction(ISD::MULHU, VT, Expand); 580 setOperationAction(ISD::MULHS, VT, Expand); 581 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 582 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 583 setOperationAction(ISD::UDIVREM, VT, Expand); 584 setOperationAction(ISD::SDIVREM, VT, Expand); 585 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 586 setOperationAction(ISD::FPOW, VT, Expand); 587 setOperationAction(ISD::BSWAP, VT, Expand); 588 setOperationAction(ISD::VSELECT, VT, Expand); 589 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 590 setOperationAction(ISD::ROTL, VT, Expand); 591 setOperationAction(ISD::ROTR, VT, Expand); 592 593 for (MVT InnerVT : MVT::vector_valuetypes()) { 594 setTruncStoreAction(VT, InnerVT, Expand); 595 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 596 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 597 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 598 } 599 } 600 601 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 602 // with merges, splats, etc. 603 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 604 605 setOperationAction(ISD::AND , MVT::v4i32, Legal); 606 setOperationAction(ISD::OR , MVT::v4i32, Legal); 607 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 608 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 609 setOperationAction(ISD::SELECT, MVT::v4i32, 610 Subtarget.useCRBits() ? Legal : Expand); 611 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 612 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 613 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 614 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 615 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 616 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 617 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 618 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 619 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 620 621 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 622 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 623 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 624 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 625 626 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 627 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 628 629 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 630 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 631 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 632 } 633 634 if (Subtarget.hasP8Altivec()) 635 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 636 else 637 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 638 639 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 640 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 641 642 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 643 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 644 645 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 646 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 647 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 648 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 649 650 // Altivec does not contain unordered floating-point compare instructions 651 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 652 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 653 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 654 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 655 656 if (Subtarget.hasVSX()) { 657 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 658 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 659 if (Subtarget.hasP8Vector()) { 660 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 661 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 662 } 663 if (Subtarget.hasDirectMove() && isPPC64) { 664 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 665 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 666 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 667 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 668 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 669 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 670 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 671 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 672 } 673 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 674 675 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 676 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 677 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 678 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 679 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 680 681 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 682 683 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 684 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 685 686 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 687 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 688 689 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 690 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 691 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 692 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 693 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 694 695 // Share the Altivec comparison restrictions. 696 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 697 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 698 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 699 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 700 701 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 702 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 703 704 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 705 706 if (Subtarget.hasP8Vector()) 707 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 708 709 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 710 711 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 712 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 713 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 714 715 if (Subtarget.hasP8Altivec()) { 716 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 717 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 718 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 719 720 // 128 bit shifts can be accomplished via 3 instructions for SHL and 721 // SRL, but not for SRA because of the instructions available: 722 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 723 // doing 724 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 725 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 726 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 727 728 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 729 } 730 else { 731 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 732 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 733 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 734 735 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 736 737 // VSX v2i64 only supports non-arithmetic operations. 738 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 739 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 740 } 741 742 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 743 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 744 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 745 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 746 747 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 748 749 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 750 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 751 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 752 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 753 754 // Vector operation legalization checks the result type of 755 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 756 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 757 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 758 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 759 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 760 761 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 762 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 763 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 764 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 765 766 if (Subtarget.hasDirectMove()) 767 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 768 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 769 770 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 771 } 772 773 if (Subtarget.hasP8Altivec()) { 774 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 775 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 776 } 777 778 if (Subtarget.hasP9Vector()) { 779 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 780 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 781 782 // 128 bit shifts can be accomplished via 3 instructions for SHL and 783 // SRL, but not for SRA because of the instructions available: 784 // VS{RL} and VS{RL}O. 785 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 786 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 787 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 788 } 789 790 if (Subtarget.hasP9Altivec()) { 791 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 792 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 793 } 794 } 795 796 if (Subtarget.hasQPX()) { 797 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 798 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 799 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 800 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 801 802 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 803 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 804 805 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 806 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 807 808 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 809 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 810 811 if (!Subtarget.useCRBits()) 812 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 813 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 814 815 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 816 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 817 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 818 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 819 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 820 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 821 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 822 823 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 824 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 825 826 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 827 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 828 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 829 830 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 831 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 832 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 833 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 834 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 835 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 836 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 837 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 838 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 839 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 840 841 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 842 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 843 844 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 845 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 846 847 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 848 849 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 850 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 851 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 852 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 853 854 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 855 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 856 857 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 858 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 859 860 if (!Subtarget.useCRBits()) 861 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 862 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 863 864 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 865 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 866 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 867 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 868 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 869 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 870 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 871 872 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 873 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 874 875 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 876 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 877 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 878 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 879 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 880 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 881 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 882 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 883 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 884 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 885 886 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 887 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 888 889 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 890 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 891 892 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 893 894 setOperationAction(ISD::AND , MVT::v4i1, Legal); 895 setOperationAction(ISD::OR , MVT::v4i1, Legal); 896 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 897 898 if (!Subtarget.useCRBits()) 899 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 900 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 901 902 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 903 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 904 905 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 906 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 907 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 908 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 909 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 910 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 911 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 912 913 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 914 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 915 916 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 917 918 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 919 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 920 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 921 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 922 923 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 924 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 925 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 926 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 927 928 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 929 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 930 931 // These need to set FE_INEXACT, and so cannot be vectorized here. 932 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 933 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 934 935 if (TM.Options.UnsafeFPMath) { 936 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 937 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 938 939 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 940 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 941 } else { 942 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 943 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 944 945 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 946 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 947 } 948 } 949 950 if (Subtarget.has64BitSupport()) 951 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 952 953 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 954 955 if (!isPPC64) { 956 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 957 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 958 } 959 960 setBooleanContents(ZeroOrOneBooleanContent); 961 962 if (Subtarget.hasAltivec()) { 963 // Altivec instructions set fields to all zeros or all ones. 964 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 965 } 966 967 if (!isPPC64) { 968 // These libcalls are not available in 32-bit. 969 setLibcallName(RTLIB::SHL_I128, nullptr); 970 setLibcallName(RTLIB::SRL_I128, nullptr); 971 setLibcallName(RTLIB::SRA_I128, nullptr); 972 } 973 974 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 975 976 // We have target-specific dag combine patterns for the following nodes: 977 setTargetDAGCombine(ISD::SHL); 978 setTargetDAGCombine(ISD::SRA); 979 setTargetDAGCombine(ISD::SRL); 980 setTargetDAGCombine(ISD::SINT_TO_FP); 981 setTargetDAGCombine(ISD::BUILD_VECTOR); 982 if (Subtarget.hasFPCVT()) 983 setTargetDAGCombine(ISD::UINT_TO_FP); 984 setTargetDAGCombine(ISD::LOAD); 985 setTargetDAGCombine(ISD::STORE); 986 setTargetDAGCombine(ISD::BR_CC); 987 if (Subtarget.useCRBits()) 988 setTargetDAGCombine(ISD::BRCOND); 989 setTargetDAGCombine(ISD::BSWAP); 990 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 991 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 992 setTargetDAGCombine(ISD::INTRINSIC_VOID); 993 994 setTargetDAGCombine(ISD::SIGN_EXTEND); 995 setTargetDAGCombine(ISD::ZERO_EXTEND); 996 setTargetDAGCombine(ISD::ANY_EXTEND); 997 998 if (Subtarget.useCRBits()) { 999 setTargetDAGCombine(ISD::TRUNCATE); 1000 setTargetDAGCombine(ISD::SETCC); 1001 setTargetDAGCombine(ISD::SELECT_CC); 1002 } 1003 1004 // Use reciprocal estimates. 1005 if (TM.Options.UnsafeFPMath) { 1006 setTargetDAGCombine(ISD::FDIV); 1007 setTargetDAGCombine(ISD::FSQRT); 1008 } 1009 1010 // Darwin long double math library functions have $LDBL128 appended. 1011 if (Subtarget.isDarwin()) { 1012 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1013 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1014 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1015 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1016 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1017 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1018 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1019 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1020 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1021 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1022 } 1023 1024 // With 32 condition bits, we don't need to sink (and duplicate) compares 1025 // aggressively in CodeGenPrep. 1026 if (Subtarget.useCRBits()) { 1027 setHasMultipleConditionRegisters(); 1028 setJumpIsExpensive(); 1029 } 1030 1031 setMinFunctionAlignment(2); 1032 if (Subtarget.isDarwin()) 1033 setPrefFunctionAlignment(4); 1034 1035 switch (Subtarget.getDarwinDirective()) { 1036 default: break; 1037 case PPC::DIR_970: 1038 case PPC::DIR_A2: 1039 case PPC::DIR_E500mc: 1040 case PPC::DIR_E5500: 1041 case PPC::DIR_PWR4: 1042 case PPC::DIR_PWR5: 1043 case PPC::DIR_PWR5X: 1044 case PPC::DIR_PWR6: 1045 case PPC::DIR_PWR6X: 1046 case PPC::DIR_PWR7: 1047 case PPC::DIR_PWR8: 1048 case PPC::DIR_PWR9: 1049 setPrefFunctionAlignment(4); 1050 setPrefLoopAlignment(4); 1051 break; 1052 } 1053 1054 if (Subtarget.enableMachineScheduler()) 1055 setSchedulingPreference(Sched::Source); 1056 else 1057 setSchedulingPreference(Sched::Hybrid); 1058 1059 computeRegisterProperties(STI.getRegisterInfo()); 1060 1061 // The Freescale cores do better with aggressive inlining of memcpy and 1062 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1063 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1064 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1065 MaxStoresPerMemset = 32; 1066 MaxStoresPerMemsetOptSize = 16; 1067 MaxStoresPerMemcpy = 32; 1068 MaxStoresPerMemcpyOptSize = 8; 1069 MaxStoresPerMemmove = 32; 1070 MaxStoresPerMemmoveOptSize = 8; 1071 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1072 // The A2 also benefits from (very) aggressive inlining of memcpy and 1073 // friends. The overhead of a the function call, even when warm, can be 1074 // over one hundred cycles. 1075 MaxStoresPerMemset = 128; 1076 MaxStoresPerMemcpy = 128; 1077 MaxStoresPerMemmove = 128; 1078 MaxLoadsPerMemcmp = 128; 1079 } else { 1080 MaxLoadsPerMemcmp = 8; 1081 MaxLoadsPerMemcmpOptSize = 4; 1082 } 1083 } 1084 1085 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1086 /// the desired ByVal argument alignment. 1087 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1088 unsigned MaxMaxAlign) { 1089 if (MaxAlign == MaxMaxAlign) 1090 return; 1091 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1092 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1093 MaxAlign = 32; 1094 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1095 MaxAlign = 16; 1096 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1097 unsigned EltAlign = 0; 1098 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1099 if (EltAlign > MaxAlign) 1100 MaxAlign = EltAlign; 1101 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1102 for (auto *EltTy : STy->elements()) { 1103 unsigned EltAlign = 0; 1104 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1105 if (EltAlign > MaxAlign) 1106 MaxAlign = EltAlign; 1107 if (MaxAlign == MaxMaxAlign) 1108 break; 1109 } 1110 } 1111 } 1112 1113 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1114 /// function arguments in the caller parameter area. 1115 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1116 const DataLayout &DL) const { 1117 // Darwin passes everything on 4 byte boundary. 1118 if (Subtarget.isDarwin()) 1119 return 4; 1120 1121 // 16byte and wider vectors are passed on 16byte boundary. 1122 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1123 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1124 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1125 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1126 return Align; 1127 } 1128 1129 bool PPCTargetLowering::useSoftFloat() const { 1130 return Subtarget.useSoftFloat(); 1131 } 1132 1133 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1134 switch ((PPCISD::NodeType)Opcode) { 1135 case PPCISD::FIRST_NUMBER: break; 1136 case PPCISD::FSEL: return "PPCISD::FSEL"; 1137 case PPCISD::FCFID: return "PPCISD::FCFID"; 1138 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1139 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1140 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1141 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1142 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1143 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1144 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1145 case PPCISD::FRE: return "PPCISD::FRE"; 1146 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1147 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1148 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1149 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1150 case PPCISD::VPERM: return "PPCISD::VPERM"; 1151 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1152 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1153 case PPCISD::XXREVERSE: return "PPCISD::XXREVERSE"; 1154 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1155 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1156 case PPCISD::CMPB: return "PPCISD::CMPB"; 1157 case PPCISD::Hi: return "PPCISD::Hi"; 1158 case PPCISD::Lo: return "PPCISD::Lo"; 1159 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1160 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1161 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1162 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1163 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1164 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1165 case PPCISD::SRL: return "PPCISD::SRL"; 1166 case PPCISD::SRA: return "PPCISD::SRA"; 1167 case PPCISD::SHL: return "PPCISD::SHL"; 1168 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1169 case PPCISD::CALL: return "PPCISD::CALL"; 1170 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1171 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1172 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1173 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1174 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1175 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1176 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1177 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1178 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1179 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1180 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1181 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1182 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1183 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1184 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1185 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1186 case PPCISD::VCMP: return "PPCISD::VCMP"; 1187 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1188 case PPCISD::LBRX: return "PPCISD::LBRX"; 1189 case PPCISD::STBRX: return "PPCISD::STBRX"; 1190 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1191 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1192 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1193 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1194 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1195 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1196 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1197 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1198 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1199 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1200 case PPCISD::BDZ: return "PPCISD::BDZ"; 1201 case PPCISD::MFFS: return "PPCISD::MFFS"; 1202 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1203 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1204 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1205 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1206 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1207 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1208 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1209 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1210 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1211 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1212 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1213 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1214 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1215 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1216 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1217 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1218 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1219 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1220 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1221 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1222 case PPCISD::SC: return "PPCISD::SC"; 1223 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1224 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1225 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1226 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1227 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1228 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1229 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1230 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1231 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1232 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1233 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1234 } 1235 return nullptr; 1236 } 1237 1238 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1239 EVT VT) const { 1240 if (!VT.isVector()) 1241 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1242 1243 if (Subtarget.hasQPX()) 1244 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1245 1246 return VT.changeVectorElementTypeToInteger(); 1247 } 1248 1249 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1250 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1251 return true; 1252 } 1253 1254 //===----------------------------------------------------------------------===// 1255 // Node matching predicates, for use by the tblgen matching code. 1256 //===----------------------------------------------------------------------===// 1257 1258 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1259 static bool isFloatingPointZero(SDValue Op) { 1260 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1261 return CFP->getValueAPF().isZero(); 1262 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1263 // Maybe this has already been legalized into the constant pool? 1264 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1265 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1266 return CFP->getValueAPF().isZero(); 1267 } 1268 return false; 1269 } 1270 1271 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1272 /// true if Op is undef or if it matches the specified value. 1273 static bool isConstantOrUndef(int Op, int Val) { 1274 return Op < 0 || Op == Val; 1275 } 1276 1277 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1278 /// VPKUHUM instruction. 1279 /// The ShuffleKind distinguishes between big-endian operations with 1280 /// two different inputs (0), either-endian operations with two identical 1281 /// inputs (1), and little-endian operations with two different inputs (2). 1282 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1283 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1284 SelectionDAG &DAG) { 1285 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1286 if (ShuffleKind == 0) { 1287 if (IsLE) 1288 return false; 1289 for (unsigned i = 0; i != 16; ++i) 1290 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1291 return false; 1292 } else if (ShuffleKind == 2) { 1293 if (!IsLE) 1294 return false; 1295 for (unsigned i = 0; i != 16; ++i) 1296 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1297 return false; 1298 } else if (ShuffleKind == 1) { 1299 unsigned j = IsLE ? 0 : 1; 1300 for (unsigned i = 0; i != 8; ++i) 1301 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1302 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1303 return false; 1304 } 1305 return true; 1306 } 1307 1308 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1309 /// VPKUWUM instruction. 1310 /// The ShuffleKind distinguishes between big-endian operations with 1311 /// two different inputs (0), either-endian operations with two identical 1312 /// inputs (1), and little-endian operations with two different inputs (2). 1313 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1314 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1315 SelectionDAG &DAG) { 1316 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1317 if (ShuffleKind == 0) { 1318 if (IsLE) 1319 return false; 1320 for (unsigned i = 0; i != 16; i += 2) 1321 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1322 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1323 return false; 1324 } else if (ShuffleKind == 2) { 1325 if (!IsLE) 1326 return false; 1327 for (unsigned i = 0; i != 16; i += 2) 1328 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1329 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1330 return false; 1331 } else if (ShuffleKind == 1) { 1332 unsigned j = IsLE ? 0 : 2; 1333 for (unsigned i = 0; i != 8; i += 2) 1334 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1335 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1336 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1337 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1338 return false; 1339 } 1340 return true; 1341 } 1342 1343 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1344 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1345 /// current subtarget. 1346 /// 1347 /// The ShuffleKind distinguishes between big-endian operations with 1348 /// two different inputs (0), either-endian operations with two identical 1349 /// inputs (1), and little-endian operations with two different inputs (2). 1350 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1351 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1352 SelectionDAG &DAG) { 1353 const PPCSubtarget& Subtarget = 1354 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1355 if (!Subtarget.hasP8Vector()) 1356 return false; 1357 1358 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1359 if (ShuffleKind == 0) { 1360 if (IsLE) 1361 return false; 1362 for (unsigned i = 0; i != 16; i += 4) 1363 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1364 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1365 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1366 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1367 return false; 1368 } else if (ShuffleKind == 2) { 1369 if (!IsLE) 1370 return false; 1371 for (unsigned i = 0; i != 16; i += 4) 1372 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1373 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1374 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1375 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1376 return false; 1377 } else if (ShuffleKind == 1) { 1378 unsigned j = IsLE ? 0 : 4; 1379 for (unsigned i = 0; i != 8; i += 4) 1380 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1381 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1382 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1383 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1384 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1385 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1386 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1387 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1388 return false; 1389 } 1390 return true; 1391 } 1392 1393 /// isVMerge - Common function, used to match vmrg* shuffles. 1394 /// 1395 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1396 unsigned LHSStart, unsigned RHSStart) { 1397 if (N->getValueType(0) != MVT::v16i8) 1398 return false; 1399 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1400 "Unsupported merge size!"); 1401 1402 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1403 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1404 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1405 LHSStart+j+i*UnitSize) || 1406 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1407 RHSStart+j+i*UnitSize)) 1408 return false; 1409 } 1410 return true; 1411 } 1412 1413 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1414 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1415 /// The ShuffleKind distinguishes between big-endian merges with two 1416 /// different inputs (0), either-endian merges with two identical inputs (1), 1417 /// and little-endian merges with two different inputs (2). For the latter, 1418 /// the input operands are swapped (see PPCInstrAltivec.td). 1419 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1420 unsigned ShuffleKind, SelectionDAG &DAG) { 1421 if (DAG.getDataLayout().isLittleEndian()) { 1422 if (ShuffleKind == 1) // unary 1423 return isVMerge(N, UnitSize, 0, 0); 1424 else if (ShuffleKind == 2) // swapped 1425 return isVMerge(N, UnitSize, 0, 16); 1426 else 1427 return false; 1428 } else { 1429 if (ShuffleKind == 1) // unary 1430 return isVMerge(N, UnitSize, 8, 8); 1431 else if (ShuffleKind == 0) // normal 1432 return isVMerge(N, UnitSize, 8, 24); 1433 else 1434 return false; 1435 } 1436 } 1437 1438 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1439 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1440 /// The ShuffleKind distinguishes between big-endian merges with two 1441 /// different inputs (0), either-endian merges with two identical inputs (1), 1442 /// and little-endian merges with two different inputs (2). For the latter, 1443 /// the input operands are swapped (see PPCInstrAltivec.td). 1444 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1445 unsigned ShuffleKind, SelectionDAG &DAG) { 1446 if (DAG.getDataLayout().isLittleEndian()) { 1447 if (ShuffleKind == 1) // unary 1448 return isVMerge(N, UnitSize, 8, 8); 1449 else if (ShuffleKind == 2) // swapped 1450 return isVMerge(N, UnitSize, 8, 24); 1451 else 1452 return false; 1453 } else { 1454 if (ShuffleKind == 1) // unary 1455 return isVMerge(N, UnitSize, 0, 0); 1456 else if (ShuffleKind == 0) // normal 1457 return isVMerge(N, UnitSize, 0, 16); 1458 else 1459 return false; 1460 } 1461 } 1462 1463 /** 1464 * \brief Common function used to match vmrgew and vmrgow shuffles 1465 * 1466 * The indexOffset determines whether to look for even or odd words in 1467 * the shuffle mask. This is based on the of the endianness of the target 1468 * machine. 1469 * - Little Endian: 1470 * - Use offset of 0 to check for odd elements 1471 * - Use offset of 4 to check for even elements 1472 * - Big Endian: 1473 * - Use offset of 0 to check for even elements 1474 * - Use offset of 4 to check for odd elements 1475 * A detailed description of the vector element ordering for little endian and 1476 * big endian can be found at 1477 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1478 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1479 * compiler differences mean to you 1480 * 1481 * The mask to the shuffle vector instruction specifies the indices of the 1482 * elements from the two input vectors to place in the result. The elements are 1483 * numbered in array-access order, starting with the first vector. These vectors 1484 * are always of type v16i8, thus each vector will contain 16 elements of size 1485 * 8. More info on the shuffle vector can be found in the 1486 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1487 * Language Reference. 1488 * 1489 * The RHSStartValue indicates whether the same input vectors are used (unary) 1490 * or two different input vectors are used, based on the following: 1491 * - If the instruction uses the same vector for both inputs, the range of the 1492 * indices will be 0 to 15. In this case, the RHSStart value passed should 1493 * be 0. 1494 * - If the instruction has two different vectors then the range of the 1495 * indices will be 0 to 31. In this case, the RHSStart value passed should 1496 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1497 * to 31 specify elements in the second vector). 1498 * 1499 * \param[in] N The shuffle vector SD Node to analyze 1500 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1501 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1502 * vector to the shuffle_vector instruction 1503 * \return true iff this shuffle vector represents an even or odd word merge 1504 */ 1505 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1506 unsigned RHSStartValue) { 1507 if (N->getValueType(0) != MVT::v16i8) 1508 return false; 1509 1510 for (unsigned i = 0; i < 2; ++i) 1511 for (unsigned j = 0; j < 4; ++j) 1512 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1513 i*RHSStartValue+j+IndexOffset) || 1514 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1515 i*RHSStartValue+j+IndexOffset+8)) 1516 return false; 1517 return true; 1518 } 1519 1520 /** 1521 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1522 * vmrgow instructions. 1523 * 1524 * \param[in] N The shuffle vector SD Node to analyze 1525 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1526 * \param[in] ShuffleKind Identify the type of merge: 1527 * - 0 = big-endian merge with two different inputs; 1528 * - 1 = either-endian merge with two identical inputs; 1529 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1530 * little-endian merges). 1531 * \param[in] DAG The current SelectionDAG 1532 * \return true iff this shuffle mask 1533 */ 1534 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1535 unsigned ShuffleKind, SelectionDAG &DAG) { 1536 if (DAG.getDataLayout().isLittleEndian()) { 1537 unsigned indexOffset = CheckEven ? 4 : 0; 1538 if (ShuffleKind == 1) // Unary 1539 return isVMerge(N, indexOffset, 0); 1540 else if (ShuffleKind == 2) // swapped 1541 return isVMerge(N, indexOffset, 16); 1542 else 1543 return false; 1544 } 1545 else { 1546 unsigned indexOffset = CheckEven ? 0 : 4; 1547 if (ShuffleKind == 1) // Unary 1548 return isVMerge(N, indexOffset, 0); 1549 else if (ShuffleKind == 0) // Normal 1550 return isVMerge(N, indexOffset, 16); 1551 else 1552 return false; 1553 } 1554 return false; 1555 } 1556 1557 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1558 /// amount, otherwise return -1. 1559 /// The ShuffleKind distinguishes between big-endian operations with two 1560 /// different inputs (0), either-endian operations with two identical inputs 1561 /// (1), and little-endian operations with two different inputs (2). For the 1562 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1563 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1564 SelectionDAG &DAG) { 1565 if (N->getValueType(0) != MVT::v16i8) 1566 return -1; 1567 1568 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1569 1570 // Find the first non-undef value in the shuffle mask. 1571 unsigned i; 1572 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1573 /*search*/; 1574 1575 if (i == 16) return -1; // all undef. 1576 1577 // Otherwise, check to see if the rest of the elements are consecutively 1578 // numbered from this value. 1579 unsigned ShiftAmt = SVOp->getMaskElt(i); 1580 if (ShiftAmt < i) return -1; 1581 1582 ShiftAmt -= i; 1583 bool isLE = DAG.getDataLayout().isLittleEndian(); 1584 1585 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1586 // Check the rest of the elements to see if they are consecutive. 1587 for (++i; i != 16; ++i) 1588 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1589 return -1; 1590 } else if (ShuffleKind == 1) { 1591 // Check the rest of the elements to see if they are consecutive. 1592 for (++i; i != 16; ++i) 1593 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1594 return -1; 1595 } else 1596 return -1; 1597 1598 if (isLE) 1599 ShiftAmt = 16 - ShiftAmt; 1600 1601 return ShiftAmt; 1602 } 1603 1604 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1605 /// specifies a splat of a single element that is suitable for input to 1606 /// VSPLTB/VSPLTH/VSPLTW. 1607 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1608 assert(N->getValueType(0) == MVT::v16i8 && 1609 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1610 1611 // The consecutive indices need to specify an element, not part of two 1612 // different elements. So abandon ship early if this isn't the case. 1613 if (N->getMaskElt(0) % EltSize != 0) 1614 return false; 1615 1616 // This is a splat operation if each element of the permute is the same, and 1617 // if the value doesn't reference the second vector. 1618 unsigned ElementBase = N->getMaskElt(0); 1619 1620 // FIXME: Handle UNDEF elements too! 1621 if (ElementBase >= 16) 1622 return false; 1623 1624 // Check that the indices are consecutive, in the case of a multi-byte element 1625 // splatted with a v16i8 mask. 1626 for (unsigned i = 1; i != EltSize; ++i) 1627 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1628 return false; 1629 1630 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1631 if (N->getMaskElt(i) < 0) continue; 1632 for (unsigned j = 0; j != EltSize; ++j) 1633 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1634 return false; 1635 } 1636 return true; 1637 } 1638 1639 /// Check that the mask is shuffling N byte elements. Within each N byte 1640 /// element of the mask, the indices could be either in increasing or 1641 /// decreasing order as long as they are consecutive. 1642 /// \param[in] N the shuffle vector SD Node to analyze 1643 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1644 /// Word/DoubleWord/QuadWord). 1645 /// \param[in] StepLen the delta indices number among the N byte element, if 1646 /// the mask is in increasing/decreasing order then it is 1/-1. 1647 /// \return true iff the mask is shuffling N byte elements. 1648 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1649 int StepLen) { 1650 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1651 "Unexpected element width."); 1652 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1653 1654 unsigned NumOfElem = 16 / Width; 1655 unsigned MaskVal[16]; // Width is never greater than 16 1656 for (unsigned i = 0; i < NumOfElem; ++i) { 1657 MaskVal[0] = N->getMaskElt(i * Width); 1658 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1659 return false; 1660 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1661 return false; 1662 } 1663 1664 for (unsigned int j = 1; j < Width; ++j) { 1665 MaskVal[j] = N->getMaskElt(i * Width + j); 1666 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1667 return false; 1668 } 1669 } 1670 } 1671 1672 return true; 1673 } 1674 1675 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1676 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1677 if (!isNByteElemShuffleMask(N, 4, 1)) 1678 return false; 1679 1680 // Now we look at mask elements 0,4,8,12 1681 unsigned M0 = N->getMaskElt(0) / 4; 1682 unsigned M1 = N->getMaskElt(4) / 4; 1683 unsigned M2 = N->getMaskElt(8) / 4; 1684 unsigned M3 = N->getMaskElt(12) / 4; 1685 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1686 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1687 1688 // Below, let H and L be arbitrary elements of the shuffle mask 1689 // where H is in the range [4,7] and L is in the range [0,3]. 1690 // H, 1, 2, 3 or L, 5, 6, 7 1691 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1692 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1693 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1694 InsertAtByte = IsLE ? 12 : 0; 1695 Swap = M0 < 4; 1696 return true; 1697 } 1698 // 0, H, 2, 3 or 4, L, 6, 7 1699 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1700 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1701 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1702 InsertAtByte = IsLE ? 8 : 4; 1703 Swap = M1 < 4; 1704 return true; 1705 } 1706 // 0, 1, H, 3 or 4, 5, L, 7 1707 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1708 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1709 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1710 InsertAtByte = IsLE ? 4 : 8; 1711 Swap = M2 < 4; 1712 return true; 1713 } 1714 // 0, 1, 2, H or 4, 5, 6, L 1715 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1716 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1717 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1718 InsertAtByte = IsLE ? 0 : 12; 1719 Swap = M3 < 4; 1720 return true; 1721 } 1722 1723 // If both vector operands for the shuffle are the same vector, the mask will 1724 // contain only elements from the first one and the second one will be undef. 1725 if (N->getOperand(1).isUndef()) { 1726 ShiftElts = 0; 1727 Swap = true; 1728 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1729 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1730 InsertAtByte = IsLE ? 12 : 0; 1731 return true; 1732 } 1733 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1734 InsertAtByte = IsLE ? 8 : 4; 1735 return true; 1736 } 1737 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1738 InsertAtByte = IsLE ? 4 : 8; 1739 return true; 1740 } 1741 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1742 InsertAtByte = IsLE ? 0 : 12; 1743 return true; 1744 } 1745 } 1746 1747 return false; 1748 } 1749 1750 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1751 bool &Swap, bool IsLE) { 1752 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1753 // Ensure each byte index of the word is consecutive. 1754 if (!isNByteElemShuffleMask(N, 4, 1)) 1755 return false; 1756 1757 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1758 unsigned M0 = N->getMaskElt(0) / 4; 1759 unsigned M1 = N->getMaskElt(4) / 4; 1760 unsigned M2 = N->getMaskElt(8) / 4; 1761 unsigned M3 = N->getMaskElt(12) / 4; 1762 1763 // If both vector operands for the shuffle are the same vector, the mask will 1764 // contain only elements from the first one and the second one will be undef. 1765 if (N->getOperand(1).isUndef()) { 1766 assert(M0 < 4 && "Indexing into an undef vector?"); 1767 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1768 return false; 1769 1770 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1771 Swap = false; 1772 return true; 1773 } 1774 1775 // Ensure each word index of the ShuffleVector Mask is consecutive. 1776 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1777 return false; 1778 1779 if (IsLE) { 1780 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1781 // Input vectors don't need to be swapped if the leading element 1782 // of the result is one of the 3 left elements of the second vector 1783 // (or if there is no shift to be done at all). 1784 Swap = false; 1785 ShiftElts = (8 - M0) % 8; 1786 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 1787 // Input vectors need to be swapped if the leading element 1788 // of the result is one of the 3 left elements of the first vector 1789 // (or if we're shifting by 4 - thereby simply swapping the vectors). 1790 Swap = true; 1791 ShiftElts = (4 - M0) % 4; 1792 } 1793 1794 return true; 1795 } else { // BE 1796 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 1797 // Input vectors don't need to be swapped if the leading element 1798 // of the result is one of the 4 elements of the first vector. 1799 Swap = false; 1800 ShiftElts = M0; 1801 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 1802 // Input vectors need to be swapped if the leading element 1803 // of the result is one of the 4 elements of the right vector. 1804 Swap = true; 1805 ShiftElts = M0 - 4; 1806 } 1807 1808 return true; 1809 } 1810 } 1811 1812 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 1813 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1814 1815 if (!isNByteElemShuffleMask(N, Width, -1)) 1816 return false; 1817 1818 for (int i = 0; i < 16; i += Width) 1819 if (N->getMaskElt(i) != i + Width - 1) 1820 return false; 1821 1822 return true; 1823 } 1824 1825 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 1826 return isXXBRShuffleMaskHelper(N, 2); 1827 } 1828 1829 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 1830 return isXXBRShuffleMaskHelper(N, 4); 1831 } 1832 1833 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 1834 return isXXBRShuffleMaskHelper(N, 8); 1835 } 1836 1837 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 1838 return isXXBRShuffleMaskHelper(N, 16); 1839 } 1840 1841 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 1842 /// if the inputs to the instruction should be swapped and set \p DM to the 1843 /// value for the immediate. 1844 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 1845 /// AND element 0 of the result comes from the first input (LE) or second input 1846 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 1847 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 1848 /// mask. 1849 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 1850 bool &Swap, bool IsLE) { 1851 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1852 1853 // Ensure each byte index of the double word is consecutive. 1854 if (!isNByteElemShuffleMask(N, 8, 1)) 1855 return false; 1856 1857 unsigned M0 = N->getMaskElt(0) / 8; 1858 unsigned M1 = N->getMaskElt(8) / 8; 1859 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 1860 1861 // If both vector operands for the shuffle are the same vector, the mask will 1862 // contain only elements from the first one and the second one will be undef. 1863 if (N->getOperand(1).isUndef()) { 1864 if ((M0 | M1) < 2) { 1865 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 1866 Swap = false; 1867 return true; 1868 } else 1869 return false; 1870 } 1871 1872 if (IsLE) { 1873 if (M0 > 1 && M1 < 2) { 1874 Swap = false; 1875 } else if (M0 < 2 && M1 > 1) { 1876 M0 = (M0 + 2) % 4; 1877 M1 = (M1 + 2) % 4; 1878 Swap = true; 1879 } else 1880 return false; 1881 1882 // Note: if control flow comes here that means Swap is already set above 1883 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 1884 return true; 1885 } else { // BE 1886 if (M0 < 2 && M1 > 1) { 1887 Swap = false; 1888 } else if (M0 > 1 && M1 < 2) { 1889 M0 = (M0 + 2) % 4; 1890 M1 = (M1 + 2) % 4; 1891 Swap = true; 1892 } else 1893 return false; 1894 1895 // Note: if control flow comes here that means Swap is already set above 1896 DM = (M0 << 1) + (M1 & 1); 1897 return true; 1898 } 1899 } 1900 1901 1902 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1903 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1904 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1905 SelectionDAG &DAG) { 1906 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1907 assert(isSplatShuffleMask(SVOp, EltSize)); 1908 if (DAG.getDataLayout().isLittleEndian()) 1909 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1910 else 1911 return SVOp->getMaskElt(0) / EltSize; 1912 } 1913 1914 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1915 /// by using a vspltis[bhw] instruction of the specified element size, return 1916 /// the constant being splatted. The ByteSize field indicates the number of 1917 /// bytes of each element [124] -> [bhw]. 1918 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1919 SDValue OpVal(nullptr, 0); 1920 1921 // If ByteSize of the splat is bigger than the element size of the 1922 // build_vector, then we have a case where we are checking for a splat where 1923 // multiple elements of the buildvector are folded together into a single 1924 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1925 unsigned EltSize = 16/N->getNumOperands(); 1926 if (EltSize < ByteSize) { 1927 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1928 SDValue UniquedVals[4]; 1929 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1930 1931 // See if all of the elements in the buildvector agree across. 1932 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1933 if (N->getOperand(i).isUndef()) continue; 1934 // If the element isn't a constant, bail fully out. 1935 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1936 1937 if (!UniquedVals[i&(Multiple-1)].getNode()) 1938 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1939 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1940 return SDValue(); // no match. 1941 } 1942 1943 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1944 // either constant or undef values that are identical for each chunk. See 1945 // if these chunks can form into a larger vspltis*. 1946 1947 // Check to see if all of the leading entries are either 0 or -1. If 1948 // neither, then this won't fit into the immediate field. 1949 bool LeadingZero = true; 1950 bool LeadingOnes = true; 1951 for (unsigned i = 0; i != Multiple-1; ++i) { 1952 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1953 1954 LeadingZero &= isNullConstant(UniquedVals[i]); 1955 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1956 } 1957 // Finally, check the least significant entry. 1958 if (LeadingZero) { 1959 if (!UniquedVals[Multiple-1].getNode()) 1960 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1961 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1962 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1963 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1964 } 1965 if (LeadingOnes) { 1966 if (!UniquedVals[Multiple-1].getNode()) 1967 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1968 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1969 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1970 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1971 } 1972 1973 return SDValue(); 1974 } 1975 1976 // Check to see if this buildvec has a single non-undef value in its elements. 1977 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1978 if (N->getOperand(i).isUndef()) continue; 1979 if (!OpVal.getNode()) 1980 OpVal = N->getOperand(i); 1981 else if (OpVal != N->getOperand(i)) 1982 return SDValue(); 1983 } 1984 1985 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1986 1987 unsigned ValSizeInBytes = EltSize; 1988 uint64_t Value = 0; 1989 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1990 Value = CN->getZExtValue(); 1991 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1992 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1993 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1994 } 1995 1996 // If the splat value is larger than the element value, then we can never do 1997 // this splat. The only case that we could fit the replicated bits into our 1998 // immediate field for would be zero, and we prefer to use vxor for it. 1999 if (ValSizeInBytes < ByteSize) return SDValue(); 2000 2001 // If the element value is larger than the splat value, check if it consists 2002 // of a repeated bit pattern of size ByteSize. 2003 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2004 return SDValue(); 2005 2006 // Properly sign extend the value. 2007 int MaskVal = SignExtend32(Value, ByteSize * 8); 2008 2009 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2010 if (MaskVal == 0) return SDValue(); 2011 2012 // Finally, if this value fits in a 5 bit sext field, return it 2013 if (SignExtend32<5>(MaskVal) == MaskVal) 2014 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2015 return SDValue(); 2016 } 2017 2018 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2019 /// amount, otherwise return -1. 2020 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2021 EVT VT = N->getValueType(0); 2022 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2023 return -1; 2024 2025 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2026 2027 // Find the first non-undef value in the shuffle mask. 2028 unsigned i; 2029 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2030 /*search*/; 2031 2032 if (i == 4) return -1; // all undef. 2033 2034 // Otherwise, check to see if the rest of the elements are consecutively 2035 // numbered from this value. 2036 unsigned ShiftAmt = SVOp->getMaskElt(i); 2037 if (ShiftAmt < i) return -1; 2038 ShiftAmt -= i; 2039 2040 // Check the rest of the elements to see if they are consecutive. 2041 for (++i; i != 4; ++i) 2042 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2043 return -1; 2044 2045 return ShiftAmt; 2046 } 2047 2048 //===----------------------------------------------------------------------===// 2049 // Addressing Mode Selection 2050 //===----------------------------------------------------------------------===// 2051 2052 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2053 /// or 64-bit immediate, and if the value can be accurately represented as a 2054 /// sign extension from a 16-bit value. If so, this returns true and the 2055 /// immediate. 2056 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2057 if (!isa<ConstantSDNode>(N)) 2058 return false; 2059 2060 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2061 if (N->getValueType(0) == MVT::i32) 2062 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2063 else 2064 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2065 } 2066 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2067 return isIntS16Immediate(Op.getNode(), Imm); 2068 } 2069 2070 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2071 /// can be represented as an indexed [r+r] operation. Returns false if it 2072 /// can be more efficiently represented with [r+imm]. 2073 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2074 SDValue &Index, 2075 SelectionDAG &DAG) const { 2076 int16_t imm = 0; 2077 if (N.getOpcode() == ISD::ADD) { 2078 if (isIntS16Immediate(N.getOperand(1), imm)) 2079 return false; // r+i 2080 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2081 return false; // r+i 2082 2083 Base = N.getOperand(0); 2084 Index = N.getOperand(1); 2085 return true; 2086 } else if (N.getOpcode() == ISD::OR) { 2087 if (isIntS16Immediate(N.getOperand(1), imm)) 2088 return false; // r+i can fold it if we can. 2089 2090 // If this is an or of disjoint bitfields, we can codegen this as an add 2091 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2092 // disjoint. 2093 KnownBits LHSKnown, RHSKnown; 2094 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2095 2096 if (LHSKnown.Zero.getBoolValue()) { 2097 DAG.computeKnownBits(N.getOperand(1), RHSKnown); 2098 // If all of the bits are known zero on the LHS or RHS, the add won't 2099 // carry. 2100 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2101 Base = N.getOperand(0); 2102 Index = N.getOperand(1); 2103 return true; 2104 } 2105 } 2106 } 2107 2108 return false; 2109 } 2110 2111 // If we happen to be doing an i64 load or store into a stack slot that has 2112 // less than a 4-byte alignment, then the frame-index elimination may need to 2113 // use an indexed load or store instruction (because the offset may not be a 2114 // multiple of 4). The extra register needed to hold the offset comes from the 2115 // register scavenger, and it is possible that the scavenger will need to use 2116 // an emergency spill slot. As a result, we need to make sure that a spill slot 2117 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2118 // stack slot. 2119 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2120 // FIXME: This does not handle the LWA case. 2121 if (VT != MVT::i64) 2122 return; 2123 2124 // NOTE: We'll exclude negative FIs here, which come from argument 2125 // lowering, because there are no known test cases triggering this problem 2126 // using packed structures (or similar). We can remove this exclusion if 2127 // we find such a test case. The reason why this is so test-case driven is 2128 // because this entire 'fixup' is only to prevent crashes (from the 2129 // register scavenger) on not-really-valid inputs. For example, if we have: 2130 // %a = alloca i1 2131 // %b = bitcast i1* %a to i64* 2132 // store i64* a, i64 b 2133 // then the store should really be marked as 'align 1', but is not. If it 2134 // were marked as 'align 1' then the indexed form would have been 2135 // instruction-selected initially, and the problem this 'fixup' is preventing 2136 // won't happen regardless. 2137 if (FrameIdx < 0) 2138 return; 2139 2140 MachineFunction &MF = DAG.getMachineFunction(); 2141 MachineFrameInfo &MFI = MF.getFrameInfo(); 2142 2143 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2144 if (Align >= 4) 2145 return; 2146 2147 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2148 FuncInfo->setHasNonRISpills(); 2149 } 2150 2151 /// Returns true if the address N can be represented by a base register plus 2152 /// a signed 16-bit displacement [r+imm], and if it is not better 2153 /// represented as reg+reg. If \p Alignment is non-zero, only accept 2154 /// displacements that are multiples of that value. 2155 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2156 SDValue &Base, 2157 SelectionDAG &DAG, 2158 unsigned Alignment) const { 2159 // FIXME dl should come from parent load or store, not from address 2160 SDLoc dl(N); 2161 // If this can be more profitably realized as r+r, fail. 2162 if (SelectAddressRegReg(N, Disp, Base, DAG)) 2163 return false; 2164 2165 if (N.getOpcode() == ISD::ADD) { 2166 int16_t imm = 0; 2167 if (isIntS16Immediate(N.getOperand(1), imm) && 2168 (!Alignment || (imm % Alignment) == 0)) { 2169 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2170 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2171 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2172 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2173 } else { 2174 Base = N.getOperand(0); 2175 } 2176 return true; // [r+i] 2177 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2178 // Match LOAD (ADD (X, Lo(G))). 2179 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2180 && "Cannot handle constant offsets yet!"); 2181 Disp = N.getOperand(1).getOperand(0); // The global address. 2182 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2183 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2184 Disp.getOpcode() == ISD::TargetConstantPool || 2185 Disp.getOpcode() == ISD::TargetJumpTable); 2186 Base = N.getOperand(0); 2187 return true; // [&g+r] 2188 } 2189 } else if (N.getOpcode() == ISD::OR) { 2190 int16_t imm = 0; 2191 if (isIntS16Immediate(N.getOperand(1), imm) && 2192 (!Alignment || (imm % Alignment) == 0)) { 2193 // If this is an or of disjoint bitfields, we can codegen this as an add 2194 // (for better address arithmetic) if the LHS and RHS of the OR are 2195 // provably disjoint. 2196 KnownBits LHSKnown; 2197 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2198 2199 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2200 // If all of the bits are known zero on the LHS or RHS, the add won't 2201 // carry. 2202 if (FrameIndexSDNode *FI = 2203 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2204 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2205 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2206 } else { 2207 Base = N.getOperand(0); 2208 } 2209 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2210 return true; 2211 } 2212 } 2213 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2214 // Loading from a constant address. 2215 2216 // If this address fits entirely in a 16-bit sext immediate field, codegen 2217 // this as "d, 0" 2218 int16_t Imm; 2219 if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) { 2220 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2221 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2222 CN->getValueType(0)); 2223 return true; 2224 } 2225 2226 // Handle 32-bit sext immediates with LIS + addr mode. 2227 if ((CN->getValueType(0) == MVT::i32 || 2228 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2229 (!Alignment || (CN->getZExtValue() % Alignment) == 0)) { 2230 int Addr = (int)CN->getZExtValue(); 2231 2232 // Otherwise, break this down into an LIS + disp. 2233 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2234 2235 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2236 MVT::i32); 2237 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2238 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2239 return true; 2240 } 2241 } 2242 2243 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2244 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2245 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2246 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2247 } else 2248 Base = N; 2249 return true; // [r+0] 2250 } 2251 2252 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2253 /// represented as an indexed [r+r] operation. 2254 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2255 SDValue &Index, 2256 SelectionDAG &DAG) const { 2257 // Check to see if we can easily represent this as an [r+r] address. This 2258 // will fail if it thinks that the address is more profitably represented as 2259 // reg+imm, e.g. where imm = 0. 2260 if (SelectAddressRegReg(N, Base, Index, DAG)) 2261 return true; 2262 2263 // If the address is the result of an add, we will utilize the fact that the 2264 // address calculation includes an implicit add. However, we can reduce 2265 // register pressure if we do not materialize a constant just for use as the 2266 // index register. We only get rid of the add if it is not an add of a 2267 // value and a 16-bit signed constant and both have a single use. 2268 int16_t imm = 0; 2269 if (N.getOpcode() == ISD::ADD && 2270 (!isIntS16Immediate(N.getOperand(1), imm) || 2271 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2272 Base = N.getOperand(0); 2273 Index = N.getOperand(1); 2274 return true; 2275 } 2276 2277 // Otherwise, do it the hard way, using R0 as the base register. 2278 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2279 N.getValueType()); 2280 Index = N; 2281 return true; 2282 } 2283 2284 /// getPreIndexedAddressParts - returns true by value, base pointer and 2285 /// offset pointer and addressing mode by reference if the node's address 2286 /// can be legally represented as pre-indexed load / store address. 2287 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2288 SDValue &Offset, 2289 ISD::MemIndexedMode &AM, 2290 SelectionDAG &DAG) const { 2291 if (DisablePPCPreinc) return false; 2292 2293 bool isLoad = true; 2294 SDValue Ptr; 2295 EVT VT; 2296 unsigned Alignment; 2297 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2298 Ptr = LD->getBasePtr(); 2299 VT = LD->getMemoryVT(); 2300 Alignment = LD->getAlignment(); 2301 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2302 Ptr = ST->getBasePtr(); 2303 VT = ST->getMemoryVT(); 2304 Alignment = ST->getAlignment(); 2305 isLoad = false; 2306 } else 2307 return false; 2308 2309 // PowerPC doesn't have preinc load/store instructions for vectors (except 2310 // for QPX, which does have preinc r+r forms). 2311 if (VT.isVector()) { 2312 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2313 return false; 2314 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2315 AM = ISD::PRE_INC; 2316 return true; 2317 } 2318 } 2319 2320 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2321 // Common code will reject creating a pre-inc form if the base pointer 2322 // is a frame index, or if N is a store and the base pointer is either 2323 // the same as or a predecessor of the value being stored. Check for 2324 // those situations here, and try with swapped Base/Offset instead. 2325 bool Swap = false; 2326 2327 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2328 Swap = true; 2329 else if (!isLoad) { 2330 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2331 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2332 Swap = true; 2333 } 2334 2335 if (Swap) 2336 std::swap(Base, Offset); 2337 2338 AM = ISD::PRE_INC; 2339 return true; 2340 } 2341 2342 // LDU/STU can only handle immediates that are a multiple of 4. 2343 if (VT != MVT::i64) { 2344 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2345 return false; 2346 } else { 2347 // LDU/STU need an address with at least 4-byte alignment. 2348 if (Alignment < 4) 2349 return false; 2350 2351 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2352 return false; 2353 } 2354 2355 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2356 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2357 // sext i32 to i64 when addr mode is r+i. 2358 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2359 LD->getExtensionType() == ISD::SEXTLOAD && 2360 isa<ConstantSDNode>(Offset)) 2361 return false; 2362 } 2363 2364 AM = ISD::PRE_INC; 2365 return true; 2366 } 2367 2368 //===----------------------------------------------------------------------===// 2369 // LowerOperation implementation 2370 //===----------------------------------------------------------------------===// 2371 2372 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2373 /// and LoOpFlags to the target MO flags. 2374 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2375 unsigned &HiOpFlags, unsigned &LoOpFlags, 2376 const GlobalValue *GV = nullptr) { 2377 HiOpFlags = PPCII::MO_HA; 2378 LoOpFlags = PPCII::MO_LO; 2379 2380 // Don't use the pic base if not in PIC relocation model. 2381 if (IsPIC) { 2382 HiOpFlags |= PPCII::MO_PIC_FLAG; 2383 LoOpFlags |= PPCII::MO_PIC_FLAG; 2384 } 2385 2386 // If this is a reference to a global value that requires a non-lazy-ptr, make 2387 // sure that instruction lowering adds it. 2388 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2389 HiOpFlags |= PPCII::MO_NLP_FLAG; 2390 LoOpFlags |= PPCII::MO_NLP_FLAG; 2391 2392 if (GV->hasHiddenVisibility()) { 2393 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2394 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2395 } 2396 } 2397 } 2398 2399 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2400 SelectionDAG &DAG) { 2401 SDLoc DL(HiPart); 2402 EVT PtrVT = HiPart.getValueType(); 2403 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2404 2405 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2406 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2407 2408 // With PIC, the first instruction is actually "GR+hi(&G)". 2409 if (isPIC) 2410 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2411 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2412 2413 // Generate non-pic code that has direct accesses to the constant pool. 2414 // The address of the global is just (hi(&g)+lo(&g)). 2415 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2416 } 2417 2418 static void setUsesTOCBasePtr(MachineFunction &MF) { 2419 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2420 FuncInfo->setUsesTOCBasePtr(); 2421 } 2422 2423 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2424 setUsesTOCBasePtr(DAG.getMachineFunction()); 2425 } 2426 2427 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2428 SDValue GA) { 2429 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2430 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2431 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2432 2433 SDValue Ops[] = { GA, Reg }; 2434 return DAG.getMemIntrinsicNode( 2435 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2436 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, 2437 MachineMemOperand::MOLoad); 2438 } 2439 2440 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2441 SelectionDAG &DAG) const { 2442 EVT PtrVT = Op.getValueType(); 2443 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2444 const Constant *C = CP->getConstVal(); 2445 2446 // 64-bit SVR4 ABI code is always position-independent. 2447 // The actual address of the GlobalValue is stored in the TOC. 2448 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2449 setUsesTOCBasePtr(DAG); 2450 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2451 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2452 } 2453 2454 unsigned MOHiFlag, MOLoFlag; 2455 bool IsPIC = isPositionIndependent(); 2456 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2457 2458 if (IsPIC && Subtarget.isSVR4ABI()) { 2459 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2460 PPCII::MO_PIC_FLAG); 2461 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2462 } 2463 2464 SDValue CPIHi = 2465 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2466 SDValue CPILo = 2467 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2468 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2469 } 2470 2471 // For 64-bit PowerPC, prefer the more compact relative encodings. 2472 // This trades 32 bits per jump table entry for one or two instructions 2473 // on the jump site. 2474 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2475 if (isJumpTableRelative()) 2476 return MachineJumpTableInfo::EK_LabelDifference32; 2477 2478 return TargetLowering::getJumpTableEncoding(); 2479 } 2480 2481 bool PPCTargetLowering::isJumpTableRelative() const { 2482 if (Subtarget.isPPC64()) 2483 return true; 2484 return TargetLowering::isJumpTableRelative(); 2485 } 2486 2487 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2488 SelectionDAG &DAG) const { 2489 if (!Subtarget.isPPC64()) 2490 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2491 2492 switch (getTargetMachine().getCodeModel()) { 2493 case CodeModel::Small: 2494 case CodeModel::Medium: 2495 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2496 default: 2497 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2498 getPointerTy(DAG.getDataLayout())); 2499 } 2500 } 2501 2502 const MCExpr * 2503 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2504 unsigned JTI, 2505 MCContext &Ctx) const { 2506 if (!Subtarget.isPPC64()) 2507 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2508 2509 switch (getTargetMachine().getCodeModel()) { 2510 case CodeModel::Small: 2511 case CodeModel::Medium: 2512 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2513 default: 2514 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2515 } 2516 } 2517 2518 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2519 EVT PtrVT = Op.getValueType(); 2520 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2521 2522 // 64-bit SVR4 ABI code is always position-independent. 2523 // The actual address of the GlobalValue is stored in the TOC. 2524 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2525 setUsesTOCBasePtr(DAG); 2526 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2527 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2528 } 2529 2530 unsigned MOHiFlag, MOLoFlag; 2531 bool IsPIC = isPositionIndependent(); 2532 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2533 2534 if (IsPIC && Subtarget.isSVR4ABI()) { 2535 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2536 PPCII::MO_PIC_FLAG); 2537 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2538 } 2539 2540 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2541 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2542 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2543 } 2544 2545 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2546 SelectionDAG &DAG) const { 2547 EVT PtrVT = Op.getValueType(); 2548 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2549 const BlockAddress *BA = BASDN->getBlockAddress(); 2550 2551 // 64-bit SVR4 ABI code is always position-independent. 2552 // The actual BlockAddress is stored in the TOC. 2553 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2554 setUsesTOCBasePtr(DAG); 2555 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2556 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2557 } 2558 2559 unsigned MOHiFlag, MOLoFlag; 2560 bool IsPIC = isPositionIndependent(); 2561 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2562 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2563 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2564 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2565 } 2566 2567 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2568 SelectionDAG &DAG) const { 2569 // FIXME: TLS addresses currently use medium model code sequences, 2570 // which is the most useful form. Eventually support for small and 2571 // large models could be added if users need it, at the cost of 2572 // additional complexity. 2573 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2574 if (DAG.getTarget().Options.EmulatedTLS) 2575 return LowerToTLSEmulatedModel(GA, DAG); 2576 2577 SDLoc dl(GA); 2578 const GlobalValue *GV = GA->getGlobal(); 2579 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2580 bool is64bit = Subtarget.isPPC64(); 2581 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 2582 PICLevel::Level picLevel = M->getPICLevel(); 2583 2584 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2585 2586 if (Model == TLSModel::LocalExec) { 2587 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2588 PPCII::MO_TPREL_HA); 2589 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2590 PPCII::MO_TPREL_LO); 2591 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2592 : DAG.getRegister(PPC::R2, MVT::i32); 2593 2594 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2595 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2596 } 2597 2598 if (Model == TLSModel::InitialExec) { 2599 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2600 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2601 PPCII::MO_TLS); 2602 SDValue GOTPtr; 2603 if (is64bit) { 2604 setUsesTOCBasePtr(DAG); 2605 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2606 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2607 PtrVT, GOTReg, TGA); 2608 } else 2609 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2610 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2611 PtrVT, TGA, GOTPtr); 2612 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2613 } 2614 2615 if (Model == TLSModel::GeneralDynamic) { 2616 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2617 SDValue GOTPtr; 2618 if (is64bit) { 2619 setUsesTOCBasePtr(DAG); 2620 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2621 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2622 GOTReg, TGA); 2623 } else { 2624 if (picLevel == PICLevel::SmallPIC) 2625 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2626 else 2627 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2628 } 2629 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2630 GOTPtr, TGA, TGA); 2631 } 2632 2633 if (Model == TLSModel::LocalDynamic) { 2634 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2635 SDValue GOTPtr; 2636 if (is64bit) { 2637 setUsesTOCBasePtr(DAG); 2638 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2639 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2640 GOTReg, TGA); 2641 } else { 2642 if (picLevel == PICLevel::SmallPIC) 2643 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2644 else 2645 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2646 } 2647 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2648 PtrVT, GOTPtr, TGA, TGA); 2649 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2650 PtrVT, TLSAddr, TGA); 2651 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2652 } 2653 2654 llvm_unreachable("Unknown TLS model!"); 2655 } 2656 2657 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2658 SelectionDAG &DAG) const { 2659 EVT PtrVT = Op.getValueType(); 2660 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2661 SDLoc DL(GSDN); 2662 const GlobalValue *GV = GSDN->getGlobal(); 2663 2664 // 64-bit SVR4 ABI code is always position-independent. 2665 // The actual address of the GlobalValue is stored in the TOC. 2666 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2667 setUsesTOCBasePtr(DAG); 2668 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2669 return getTOCEntry(DAG, DL, true, GA); 2670 } 2671 2672 unsigned MOHiFlag, MOLoFlag; 2673 bool IsPIC = isPositionIndependent(); 2674 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2675 2676 if (IsPIC && Subtarget.isSVR4ABI()) { 2677 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2678 GSDN->getOffset(), 2679 PPCII::MO_PIC_FLAG); 2680 return getTOCEntry(DAG, DL, false, GA); 2681 } 2682 2683 SDValue GAHi = 2684 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2685 SDValue GALo = 2686 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2687 2688 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2689 2690 // If the global reference is actually to a non-lazy-pointer, we have to do an 2691 // extra load to get the address of the global. 2692 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2693 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2694 return Ptr; 2695 } 2696 2697 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2698 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2699 SDLoc dl(Op); 2700 2701 if (Op.getValueType() == MVT::v2i64) { 2702 // When the operands themselves are v2i64 values, we need to do something 2703 // special because VSX has no underlying comparison operations for these. 2704 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2705 // Equality can be handled by casting to the legal type for Altivec 2706 // comparisons, everything else needs to be expanded. 2707 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2708 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2709 DAG.getSetCC(dl, MVT::v4i32, 2710 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2711 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2712 CC)); 2713 } 2714 2715 return SDValue(); 2716 } 2717 2718 // We handle most of these in the usual way. 2719 return Op; 2720 } 2721 2722 // If we're comparing for equality to zero, expose the fact that this is 2723 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2724 // fold the new nodes. 2725 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 2726 return V; 2727 2728 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2729 // Leave comparisons against 0 and -1 alone for now, since they're usually 2730 // optimized. FIXME: revisit this when we can custom lower all setcc 2731 // optimizations. 2732 if (C->isAllOnesValue() || C->isNullValue()) 2733 return SDValue(); 2734 } 2735 2736 // If we have an integer seteq/setne, turn it into a compare against zero 2737 // by xor'ing the rhs with the lhs, which is faster than setting a 2738 // condition register, reading it back out, and masking the correct bit. The 2739 // normal approach here uses sub to do this instead of xor. Using xor exposes 2740 // the result to other bit-twiddling opportunities. 2741 EVT LHSVT = Op.getOperand(0).getValueType(); 2742 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2743 EVT VT = Op.getValueType(); 2744 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2745 Op.getOperand(1)); 2746 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2747 } 2748 return SDValue(); 2749 } 2750 2751 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2752 SDNode *Node = Op.getNode(); 2753 EVT VT = Node->getValueType(0); 2754 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2755 SDValue InChain = Node->getOperand(0); 2756 SDValue VAListPtr = Node->getOperand(1); 2757 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2758 SDLoc dl(Node); 2759 2760 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2761 2762 // gpr_index 2763 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2764 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2765 InChain = GprIndex.getValue(1); 2766 2767 if (VT == MVT::i64) { 2768 // Check if GprIndex is even 2769 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2770 DAG.getConstant(1, dl, MVT::i32)); 2771 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2772 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2773 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2774 DAG.getConstant(1, dl, MVT::i32)); 2775 // Align GprIndex to be even if it isn't 2776 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2777 GprIndex); 2778 } 2779 2780 // fpr index is 1 byte after gpr 2781 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2782 DAG.getConstant(1, dl, MVT::i32)); 2783 2784 // fpr 2785 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2786 FprPtr, MachinePointerInfo(SV), MVT::i8); 2787 InChain = FprIndex.getValue(1); 2788 2789 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2790 DAG.getConstant(8, dl, MVT::i32)); 2791 2792 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2793 DAG.getConstant(4, dl, MVT::i32)); 2794 2795 // areas 2796 SDValue OverflowArea = 2797 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2798 InChain = OverflowArea.getValue(1); 2799 2800 SDValue RegSaveArea = 2801 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2802 InChain = RegSaveArea.getValue(1); 2803 2804 // select overflow_area if index > 8 2805 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2806 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2807 2808 // adjustment constant gpr_index * 4/8 2809 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2810 VT.isInteger() ? GprIndex : FprIndex, 2811 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2812 MVT::i32)); 2813 2814 // OurReg = RegSaveArea + RegConstant 2815 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2816 RegConstant); 2817 2818 // Floating types are 32 bytes into RegSaveArea 2819 if (VT.isFloatingPoint()) 2820 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2821 DAG.getConstant(32, dl, MVT::i32)); 2822 2823 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2824 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2825 VT.isInteger() ? GprIndex : FprIndex, 2826 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2827 MVT::i32)); 2828 2829 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2830 VT.isInteger() ? VAListPtr : FprPtr, 2831 MachinePointerInfo(SV), MVT::i8); 2832 2833 // determine if we should load from reg_save_area or overflow_area 2834 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2835 2836 // increase overflow_area by 4/8 if gpr/fpr > 8 2837 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2838 DAG.getConstant(VT.isInteger() ? 4 : 8, 2839 dl, MVT::i32)); 2840 2841 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2842 OverflowAreaPlusN); 2843 2844 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 2845 MachinePointerInfo(), MVT::i32); 2846 2847 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 2848 } 2849 2850 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2851 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2852 2853 // We have to copy the entire va_list struct: 2854 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2855 return DAG.getMemcpy(Op.getOperand(0), Op, 2856 Op.getOperand(1), Op.getOperand(2), 2857 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2858 false, MachinePointerInfo(), MachinePointerInfo()); 2859 } 2860 2861 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2862 SelectionDAG &DAG) const { 2863 return Op.getOperand(0); 2864 } 2865 2866 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2867 SelectionDAG &DAG) const { 2868 SDValue Chain = Op.getOperand(0); 2869 SDValue Trmp = Op.getOperand(1); // trampoline 2870 SDValue FPtr = Op.getOperand(2); // nested function 2871 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2872 SDLoc dl(Op); 2873 2874 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2875 bool isPPC64 = (PtrVT == MVT::i64); 2876 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2877 2878 TargetLowering::ArgListTy Args; 2879 TargetLowering::ArgListEntry Entry; 2880 2881 Entry.Ty = IntPtrTy; 2882 Entry.Node = Trmp; Args.push_back(Entry); 2883 2884 // TrampSize == (isPPC64 ? 48 : 40); 2885 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2886 isPPC64 ? MVT::i64 : MVT::i32); 2887 Args.push_back(Entry); 2888 2889 Entry.Node = FPtr; Args.push_back(Entry); 2890 Entry.Node = Nest; Args.push_back(Entry); 2891 2892 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2893 TargetLowering::CallLoweringInfo CLI(DAG); 2894 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 2895 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2896 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 2897 2898 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2899 return CallResult.second; 2900 } 2901 2902 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2903 MachineFunction &MF = DAG.getMachineFunction(); 2904 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2905 EVT PtrVT = getPointerTy(MF.getDataLayout()); 2906 2907 SDLoc dl(Op); 2908 2909 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2910 // vastart just stores the address of the VarArgsFrameIndex slot into the 2911 // memory location argument. 2912 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2913 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2914 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2915 MachinePointerInfo(SV)); 2916 } 2917 2918 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2919 // We suppose the given va_list is already allocated. 2920 // 2921 // typedef struct { 2922 // char gpr; /* index into the array of 8 GPRs 2923 // * stored in the register save area 2924 // * gpr=0 corresponds to r3, 2925 // * gpr=1 to r4, etc. 2926 // */ 2927 // char fpr; /* index into the array of 8 FPRs 2928 // * stored in the register save area 2929 // * fpr=0 corresponds to f1, 2930 // * fpr=1 to f2, etc. 2931 // */ 2932 // char *overflow_arg_area; 2933 // /* location on stack that holds 2934 // * the next overflow argument 2935 // */ 2936 // char *reg_save_area; 2937 // /* where r3:r10 and f1:f8 (if saved) 2938 // * are stored 2939 // */ 2940 // } va_list[1]; 2941 2942 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2943 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2944 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2945 PtrVT); 2946 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2947 PtrVT); 2948 2949 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2950 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2951 2952 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2953 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2954 2955 uint64_t FPROffset = 1; 2956 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2957 2958 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2959 2960 // Store first byte : number of int regs 2961 SDValue firstStore = 2962 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 2963 MachinePointerInfo(SV), MVT::i8); 2964 uint64_t nextOffset = FPROffset; 2965 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2966 ConstFPROffset); 2967 2968 // Store second byte : number of float regs 2969 SDValue secondStore = 2970 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2971 MachinePointerInfo(SV, nextOffset), MVT::i8); 2972 nextOffset += StackOffset; 2973 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2974 2975 // Store second word : arguments given on stack 2976 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2977 MachinePointerInfo(SV, nextOffset)); 2978 nextOffset += FrameOffset; 2979 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2980 2981 // Store third word : arguments given in registers 2982 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2983 MachinePointerInfo(SV, nextOffset)); 2984 } 2985 2986 #include "PPCGenCallingConv.inc" 2987 2988 // Function whose sole purpose is to kill compiler warnings 2989 // stemming from unused functions included from PPCGenCallingConv.inc. 2990 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2991 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2992 } 2993 2994 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2995 CCValAssign::LocInfo &LocInfo, 2996 ISD::ArgFlagsTy &ArgFlags, 2997 CCState &State) { 2998 return true; 2999 } 3000 3001 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 3002 MVT &LocVT, 3003 CCValAssign::LocInfo &LocInfo, 3004 ISD::ArgFlagsTy &ArgFlags, 3005 CCState &State) { 3006 static const MCPhysReg ArgRegs[] = { 3007 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3008 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3009 }; 3010 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3011 3012 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3013 3014 // Skip one register if the first unallocated register has an even register 3015 // number and there are still argument registers available which have not been 3016 // allocated yet. RegNum is actually an index into ArgRegs, which means we 3017 // need to skip a register if RegNum is odd. 3018 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 3019 State.AllocateReg(ArgRegs[RegNum]); 3020 } 3021 3022 // Always return false here, as this function only makes sure that the first 3023 // unallocated register has an odd register number and does not actually 3024 // allocate a register for the current argument. 3025 return false; 3026 } 3027 3028 bool 3029 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 3030 MVT &LocVT, 3031 CCValAssign::LocInfo &LocInfo, 3032 ISD::ArgFlagsTy &ArgFlags, 3033 CCState &State) { 3034 static const MCPhysReg ArgRegs[] = { 3035 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3036 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3037 }; 3038 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3039 3040 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3041 int RegsLeft = NumArgRegs - RegNum; 3042 3043 // Skip if there is not enough registers left for long double type (4 gpr regs 3044 // in soft float mode) and put long double argument on the stack. 3045 if (RegNum != NumArgRegs && RegsLeft < 4) { 3046 for (int i = 0; i < RegsLeft; i++) { 3047 State.AllocateReg(ArgRegs[RegNum + i]); 3048 } 3049 } 3050 3051 return false; 3052 } 3053 3054 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 3055 MVT &LocVT, 3056 CCValAssign::LocInfo &LocInfo, 3057 ISD::ArgFlagsTy &ArgFlags, 3058 CCState &State) { 3059 static const MCPhysReg ArgRegs[] = { 3060 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3061 PPC::F8 3062 }; 3063 3064 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3065 3066 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3067 3068 // If there is only one Floating-point register left we need to put both f64 3069 // values of a split ppc_fp128 value on the stack. 3070 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 3071 State.AllocateReg(ArgRegs[RegNum]); 3072 } 3073 3074 // Always return false here, as this function only makes sure that the two f64 3075 // values a ppc_fp128 value is split into are both passed in registers or both 3076 // passed on the stack and does not actually allocate a register for the 3077 // current argument. 3078 return false; 3079 } 3080 3081 /// FPR - The set of FP registers that should be allocated for arguments, 3082 /// on Darwin. 3083 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3084 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3085 PPC::F11, PPC::F12, PPC::F13}; 3086 3087 /// QFPR - The set of QPX registers that should be allocated for arguments. 3088 static const MCPhysReg QFPR[] = { 3089 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3090 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3091 3092 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3093 /// the stack. 3094 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3095 unsigned PtrByteSize) { 3096 unsigned ArgSize = ArgVT.getStoreSize(); 3097 if (Flags.isByVal()) 3098 ArgSize = Flags.getByValSize(); 3099 3100 // Round up to multiples of the pointer size, except for array members, 3101 // which are always packed. 3102 if (!Flags.isInConsecutiveRegs()) 3103 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3104 3105 return ArgSize; 3106 } 3107 3108 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3109 /// on the stack. 3110 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3111 ISD::ArgFlagsTy Flags, 3112 unsigned PtrByteSize) { 3113 unsigned Align = PtrByteSize; 3114 3115 // Altivec parameters are padded to a 16 byte boundary. 3116 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3117 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3118 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3119 ArgVT == MVT::v1i128) 3120 Align = 16; 3121 // QPX vector types stored in double-precision are padded to a 32 byte 3122 // boundary. 3123 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3124 Align = 32; 3125 3126 // ByVal parameters are aligned as requested. 3127 if (Flags.isByVal()) { 3128 unsigned BVAlign = Flags.getByValAlign(); 3129 if (BVAlign > PtrByteSize) { 3130 if (BVAlign % PtrByteSize != 0) 3131 llvm_unreachable( 3132 "ByVal alignment is not a multiple of the pointer size"); 3133 3134 Align = BVAlign; 3135 } 3136 } 3137 3138 // Array members are always packed to their original alignment. 3139 if (Flags.isInConsecutiveRegs()) { 3140 // If the array member was split into multiple registers, the first 3141 // needs to be aligned to the size of the full type. (Except for 3142 // ppcf128, which is only aligned as its f64 components.) 3143 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3144 Align = OrigVT.getStoreSize(); 3145 else 3146 Align = ArgVT.getStoreSize(); 3147 } 3148 3149 return Align; 3150 } 3151 3152 /// CalculateStackSlotUsed - Return whether this argument will use its 3153 /// stack slot (instead of being passed in registers). ArgOffset, 3154 /// AvailableFPRs, and AvailableVRs must hold the current argument 3155 /// position, and will be updated to account for this argument. 3156 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3157 ISD::ArgFlagsTy Flags, 3158 unsigned PtrByteSize, 3159 unsigned LinkageSize, 3160 unsigned ParamAreaSize, 3161 unsigned &ArgOffset, 3162 unsigned &AvailableFPRs, 3163 unsigned &AvailableVRs, bool HasQPX) { 3164 bool UseMemory = false; 3165 3166 // Respect alignment of argument on the stack. 3167 unsigned Align = 3168 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3169 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3170 // If there's no space left in the argument save area, we must 3171 // use memory (this check also catches zero-sized arguments). 3172 if (ArgOffset >= LinkageSize + ParamAreaSize) 3173 UseMemory = true; 3174 3175 // Allocate argument on the stack. 3176 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3177 if (Flags.isInConsecutiveRegsLast()) 3178 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3179 // If we overran the argument save area, we must use memory 3180 // (this check catches arguments passed partially in memory) 3181 if (ArgOffset > LinkageSize + ParamAreaSize) 3182 UseMemory = true; 3183 3184 // However, if the argument is actually passed in an FPR or a VR, 3185 // we don't use memory after all. 3186 if (!Flags.isByVal()) { 3187 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3188 // QPX registers overlap with the scalar FP registers. 3189 (HasQPX && (ArgVT == MVT::v4f32 || 3190 ArgVT == MVT::v4f64 || 3191 ArgVT == MVT::v4i1))) 3192 if (AvailableFPRs > 0) { 3193 --AvailableFPRs; 3194 return false; 3195 } 3196 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3197 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3198 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3199 ArgVT == MVT::v1i128) 3200 if (AvailableVRs > 0) { 3201 --AvailableVRs; 3202 return false; 3203 } 3204 } 3205 3206 return UseMemory; 3207 } 3208 3209 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3210 /// ensure minimum alignment required for target. 3211 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3212 unsigned NumBytes) { 3213 unsigned TargetAlign = Lowering->getStackAlignment(); 3214 unsigned AlignMask = TargetAlign - 1; 3215 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3216 return NumBytes; 3217 } 3218 3219 SDValue PPCTargetLowering::LowerFormalArguments( 3220 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3221 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3222 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3223 if (Subtarget.isSVR4ABI()) { 3224 if (Subtarget.isPPC64()) 3225 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 3226 dl, DAG, InVals); 3227 else 3228 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 3229 dl, DAG, InVals); 3230 } else { 3231 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 3232 dl, DAG, InVals); 3233 } 3234 } 3235 3236 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3237 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3238 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3239 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3240 3241 // 32-bit SVR4 ABI Stack Frame Layout: 3242 // +-----------------------------------+ 3243 // +--> | Back chain | 3244 // | +-----------------------------------+ 3245 // | | Floating-point register save area | 3246 // | +-----------------------------------+ 3247 // | | General register save area | 3248 // | +-----------------------------------+ 3249 // | | CR save word | 3250 // | +-----------------------------------+ 3251 // | | VRSAVE save word | 3252 // | +-----------------------------------+ 3253 // | | Alignment padding | 3254 // | +-----------------------------------+ 3255 // | | Vector register save area | 3256 // | +-----------------------------------+ 3257 // | | Local variable space | 3258 // | +-----------------------------------+ 3259 // | | Parameter list area | 3260 // | +-----------------------------------+ 3261 // | | LR save word | 3262 // | +-----------------------------------+ 3263 // SP--> +--- | Back chain | 3264 // +-----------------------------------+ 3265 // 3266 // Specifications: 3267 // System V Application Binary Interface PowerPC Processor Supplement 3268 // AltiVec Technology Programming Interface Manual 3269 3270 MachineFunction &MF = DAG.getMachineFunction(); 3271 MachineFrameInfo &MFI = MF.getFrameInfo(); 3272 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3273 3274 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3275 // Potential tail calls could cause overwriting of argument stack slots. 3276 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3277 (CallConv == CallingConv::Fast)); 3278 unsigned PtrByteSize = 4; 3279 3280 // Assign locations to all of the incoming arguments. 3281 SmallVector<CCValAssign, 16> ArgLocs; 3282 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3283 *DAG.getContext()); 3284 3285 // Reserve space for the linkage area on the stack. 3286 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3287 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3288 if (useSoftFloat()) 3289 CCInfo.PreAnalyzeFormalArguments(Ins); 3290 3291 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3292 CCInfo.clearWasPPCF128(); 3293 3294 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3295 CCValAssign &VA = ArgLocs[i]; 3296 3297 // Arguments stored in registers. 3298 if (VA.isRegLoc()) { 3299 const TargetRegisterClass *RC; 3300 EVT ValVT = VA.getValVT(); 3301 3302 switch (ValVT.getSimpleVT().SimpleTy) { 3303 default: 3304 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3305 case MVT::i1: 3306 case MVT::i32: 3307 RC = &PPC::GPRCRegClass; 3308 break; 3309 case MVT::f32: 3310 if (Subtarget.hasP8Vector()) 3311 RC = &PPC::VSSRCRegClass; 3312 else 3313 RC = &PPC::F4RCRegClass; 3314 break; 3315 case MVT::f64: 3316 if (Subtarget.hasVSX()) 3317 RC = &PPC::VSFRCRegClass; 3318 else 3319 RC = &PPC::F8RCRegClass; 3320 break; 3321 case MVT::v16i8: 3322 case MVT::v8i16: 3323 case MVT::v4i32: 3324 RC = &PPC::VRRCRegClass; 3325 break; 3326 case MVT::v4f32: 3327 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3328 break; 3329 case MVT::v2f64: 3330 case MVT::v2i64: 3331 RC = &PPC::VRRCRegClass; 3332 break; 3333 case MVT::v4f64: 3334 RC = &PPC::QFRCRegClass; 3335 break; 3336 case MVT::v4i1: 3337 RC = &PPC::QBRCRegClass; 3338 break; 3339 } 3340 3341 // Transform the arguments stored in physical registers into virtual ones. 3342 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3343 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3344 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3345 3346 if (ValVT == MVT::i1) 3347 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3348 3349 InVals.push_back(ArgValue); 3350 } else { 3351 // Argument stored in memory. 3352 assert(VA.isMemLoc()); 3353 3354 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3355 int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(), 3356 isImmutable); 3357 3358 // Create load nodes to retrieve arguments from the stack. 3359 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3360 InVals.push_back( 3361 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3362 } 3363 } 3364 3365 // Assign locations to all of the incoming aggregate by value arguments. 3366 // Aggregates passed by value are stored in the local variable space of the 3367 // caller's stack frame, right above the parameter list area. 3368 SmallVector<CCValAssign, 16> ByValArgLocs; 3369 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3370 ByValArgLocs, *DAG.getContext()); 3371 3372 // Reserve stack space for the allocations in CCInfo. 3373 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3374 3375 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3376 3377 // Area that is at least reserved in the caller of this function. 3378 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3379 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3380 3381 // Set the size that is at least reserved in caller of this function. Tail 3382 // call optimized function's reserved stack space needs to be aligned so that 3383 // taking the difference between two stack areas will result in an aligned 3384 // stack. 3385 MinReservedArea = 3386 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3387 FuncInfo->setMinReservedArea(MinReservedArea); 3388 3389 SmallVector<SDValue, 8> MemOps; 3390 3391 // If the function takes variable number of arguments, make a frame index for 3392 // the start of the first vararg value... for expansion of llvm.va_start. 3393 if (isVarArg) { 3394 static const MCPhysReg GPArgRegs[] = { 3395 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3396 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3397 }; 3398 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3399 3400 static const MCPhysReg FPArgRegs[] = { 3401 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3402 PPC::F8 3403 }; 3404 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3405 3406 if (useSoftFloat()) 3407 NumFPArgRegs = 0; 3408 3409 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3410 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3411 3412 // Make room for NumGPArgRegs and NumFPArgRegs. 3413 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3414 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3415 3416 FuncInfo->setVarArgsStackOffset( 3417 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3418 CCInfo.getNextStackOffset(), true)); 3419 3420 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3421 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3422 3423 // The fixed integer arguments of a variadic function are stored to the 3424 // VarArgsFrameIndex on the stack so that they may be loaded by 3425 // dereferencing the result of va_next. 3426 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3427 // Get an existing live-in vreg, or add a new one. 3428 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3429 if (!VReg) 3430 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3431 3432 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3433 SDValue Store = 3434 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3435 MemOps.push_back(Store); 3436 // Increment the address by four for the next argument to store 3437 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3438 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3439 } 3440 3441 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3442 // is set. 3443 // The double arguments are stored to the VarArgsFrameIndex 3444 // on the stack. 3445 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3446 // Get an existing live-in vreg, or add a new one. 3447 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3448 if (!VReg) 3449 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3450 3451 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3452 SDValue Store = 3453 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3454 MemOps.push_back(Store); 3455 // Increment the address by eight for the next argument to store 3456 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3457 PtrVT); 3458 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3459 } 3460 } 3461 3462 if (!MemOps.empty()) 3463 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3464 3465 return Chain; 3466 } 3467 3468 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3469 // value to MVT::i64 and then truncate to the correct register size. 3470 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3471 EVT ObjectVT, SelectionDAG &DAG, 3472 SDValue ArgVal, 3473 const SDLoc &dl) const { 3474 if (Flags.isSExt()) 3475 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3476 DAG.getValueType(ObjectVT)); 3477 else if (Flags.isZExt()) 3478 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3479 DAG.getValueType(ObjectVT)); 3480 3481 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3482 } 3483 3484 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3485 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3486 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3487 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3488 // TODO: add description of PPC stack frame format, or at least some docs. 3489 // 3490 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3491 bool isLittleEndian = Subtarget.isLittleEndian(); 3492 MachineFunction &MF = DAG.getMachineFunction(); 3493 MachineFrameInfo &MFI = MF.getFrameInfo(); 3494 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3495 3496 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3497 "fastcc not supported on varargs functions"); 3498 3499 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3500 // Potential tail calls could cause overwriting of argument stack slots. 3501 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3502 (CallConv == CallingConv::Fast)); 3503 unsigned PtrByteSize = 8; 3504 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3505 3506 static const MCPhysReg GPR[] = { 3507 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3508 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3509 }; 3510 static const MCPhysReg VR[] = { 3511 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3512 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3513 }; 3514 3515 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3516 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3517 const unsigned Num_VR_Regs = array_lengthof(VR); 3518 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3519 3520 // Do a first pass over the arguments to determine whether the ABI 3521 // guarantees that our caller has allocated the parameter save area 3522 // on its stack frame. In the ELFv1 ABI, this is always the case; 3523 // in the ELFv2 ABI, it is true if this is a vararg function or if 3524 // any parameter is located in a stack slot. 3525 3526 bool HasParameterArea = !isELFv2ABI || isVarArg; 3527 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3528 unsigned NumBytes = LinkageSize; 3529 unsigned AvailableFPRs = Num_FPR_Regs; 3530 unsigned AvailableVRs = Num_VR_Regs; 3531 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3532 if (Ins[i].Flags.isNest()) 3533 continue; 3534 3535 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3536 PtrByteSize, LinkageSize, ParamAreaSize, 3537 NumBytes, AvailableFPRs, AvailableVRs, 3538 Subtarget.hasQPX())) 3539 HasParameterArea = true; 3540 } 3541 3542 // Add DAG nodes to load the arguments or copy them out of registers. On 3543 // entry to a function on PPC, the arguments start after the linkage area, 3544 // although the first ones are often in registers. 3545 3546 unsigned ArgOffset = LinkageSize; 3547 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3548 unsigned &QFPR_idx = FPR_idx; 3549 SmallVector<SDValue, 8> MemOps; 3550 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3551 unsigned CurArgIdx = 0; 3552 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3553 SDValue ArgVal; 3554 bool needsLoad = false; 3555 EVT ObjectVT = Ins[ArgNo].VT; 3556 EVT OrigVT = Ins[ArgNo].ArgVT; 3557 unsigned ObjSize = ObjectVT.getStoreSize(); 3558 unsigned ArgSize = ObjSize; 3559 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3560 if (Ins[ArgNo].isOrigArg()) { 3561 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3562 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3563 } 3564 // We re-align the argument offset for each argument, except when using the 3565 // fast calling convention, when we need to make sure we do that only when 3566 // we'll actually use a stack slot. 3567 unsigned CurArgOffset, Align; 3568 auto ComputeArgOffset = [&]() { 3569 /* Respect alignment of argument on the stack. */ 3570 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3571 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3572 CurArgOffset = ArgOffset; 3573 }; 3574 3575 if (CallConv != CallingConv::Fast) { 3576 ComputeArgOffset(); 3577 3578 /* Compute GPR index associated with argument offset. */ 3579 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3580 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3581 } 3582 3583 // FIXME the codegen can be much improved in some cases. 3584 // We do not have to keep everything in memory. 3585 if (Flags.isByVal()) { 3586 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3587 3588 if (CallConv == CallingConv::Fast) 3589 ComputeArgOffset(); 3590 3591 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3592 ObjSize = Flags.getByValSize(); 3593 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3594 // Empty aggregate parameters do not take up registers. Examples: 3595 // struct { } a; 3596 // union { } b; 3597 // int c[0]; 3598 // etc. However, we have to provide a place-holder in InVals, so 3599 // pretend we have an 8-byte item at the current address for that 3600 // purpose. 3601 if (!ObjSize) { 3602 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3603 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3604 InVals.push_back(FIN); 3605 continue; 3606 } 3607 3608 // Create a stack object covering all stack doublewords occupied 3609 // by the argument. If the argument is (fully or partially) on 3610 // the stack, or if the argument is fully in registers but the 3611 // caller has allocated the parameter save anyway, we can refer 3612 // directly to the caller's stack frame. Otherwise, create a 3613 // local copy in our own frame. 3614 int FI; 3615 if (HasParameterArea || 3616 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3617 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3618 else 3619 FI = MFI.CreateStackObject(ArgSize, Align, false); 3620 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3621 3622 // Handle aggregates smaller than 8 bytes. 3623 if (ObjSize < PtrByteSize) { 3624 // The value of the object is its address, which differs from the 3625 // address of the enclosing doubleword on big-endian systems. 3626 SDValue Arg = FIN; 3627 if (!isLittleEndian) { 3628 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3629 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3630 } 3631 InVals.push_back(Arg); 3632 3633 if (GPR_idx != Num_GPR_Regs) { 3634 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3635 FuncInfo->addLiveInAttr(VReg, Flags); 3636 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3637 SDValue Store; 3638 3639 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3640 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3641 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3642 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3643 MachinePointerInfo(&*FuncArg), ObjType); 3644 } else { 3645 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3646 // store the whole register as-is to the parameter save area 3647 // slot. 3648 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3649 MachinePointerInfo(&*FuncArg)); 3650 } 3651 3652 MemOps.push_back(Store); 3653 } 3654 // Whether we copied from a register or not, advance the offset 3655 // into the parameter save area by a full doubleword. 3656 ArgOffset += PtrByteSize; 3657 continue; 3658 } 3659 3660 // The value of the object is its address, which is the address of 3661 // its first stack doubleword. 3662 InVals.push_back(FIN); 3663 3664 // Store whatever pieces of the object are in registers to memory. 3665 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3666 if (GPR_idx == Num_GPR_Regs) 3667 break; 3668 3669 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3670 FuncInfo->addLiveInAttr(VReg, Flags); 3671 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3672 SDValue Addr = FIN; 3673 if (j) { 3674 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3675 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3676 } 3677 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3678 MachinePointerInfo(&*FuncArg, j)); 3679 MemOps.push_back(Store); 3680 ++GPR_idx; 3681 } 3682 ArgOffset += ArgSize; 3683 continue; 3684 } 3685 3686 switch (ObjectVT.getSimpleVT().SimpleTy) { 3687 default: llvm_unreachable("Unhandled argument type!"); 3688 case MVT::i1: 3689 case MVT::i32: 3690 case MVT::i64: 3691 if (Flags.isNest()) { 3692 // The 'nest' parameter, if any, is passed in R11. 3693 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3694 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3695 3696 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3697 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3698 3699 break; 3700 } 3701 3702 // These can be scalar arguments or elements of an integer array type 3703 // passed directly. Clang may use those instead of "byval" aggregate 3704 // types to avoid forcing arguments to memory unnecessarily. 3705 if (GPR_idx != Num_GPR_Regs) { 3706 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3707 FuncInfo->addLiveInAttr(VReg, Flags); 3708 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3709 3710 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3711 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3712 // value to MVT::i64 and then truncate to the correct register size. 3713 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3714 } else { 3715 if (CallConv == CallingConv::Fast) 3716 ComputeArgOffset(); 3717 3718 needsLoad = true; 3719 ArgSize = PtrByteSize; 3720 } 3721 if (CallConv != CallingConv::Fast || needsLoad) 3722 ArgOffset += 8; 3723 break; 3724 3725 case MVT::f32: 3726 case MVT::f64: 3727 // These can be scalar arguments or elements of a float array type 3728 // passed directly. The latter are used to implement ELFv2 homogenous 3729 // float aggregates. 3730 if (FPR_idx != Num_FPR_Regs) { 3731 unsigned VReg; 3732 3733 if (ObjectVT == MVT::f32) 3734 VReg = MF.addLiveIn(FPR[FPR_idx], 3735 Subtarget.hasP8Vector() 3736 ? &PPC::VSSRCRegClass 3737 : &PPC::F4RCRegClass); 3738 else 3739 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3740 ? &PPC::VSFRCRegClass 3741 : &PPC::F8RCRegClass); 3742 3743 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3744 ++FPR_idx; 3745 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3746 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3747 // once we support fp <-> gpr moves. 3748 3749 // This can only ever happen in the presence of f32 array types, 3750 // since otherwise we never run out of FPRs before running out 3751 // of GPRs. 3752 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3753 FuncInfo->addLiveInAttr(VReg, Flags); 3754 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3755 3756 if (ObjectVT == MVT::f32) { 3757 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3758 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3759 DAG.getConstant(32, dl, MVT::i32)); 3760 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3761 } 3762 3763 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3764 } else { 3765 if (CallConv == CallingConv::Fast) 3766 ComputeArgOffset(); 3767 3768 needsLoad = true; 3769 } 3770 3771 // When passing an array of floats, the array occupies consecutive 3772 // space in the argument area; only round up to the next doubleword 3773 // at the end of the array. Otherwise, each float takes 8 bytes. 3774 if (CallConv != CallingConv::Fast || needsLoad) { 3775 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3776 ArgOffset += ArgSize; 3777 if (Flags.isInConsecutiveRegsLast()) 3778 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3779 } 3780 break; 3781 case MVT::v4f32: 3782 case MVT::v4i32: 3783 case MVT::v8i16: 3784 case MVT::v16i8: 3785 case MVT::v2f64: 3786 case MVT::v2i64: 3787 case MVT::v1i128: 3788 if (!Subtarget.hasQPX()) { 3789 // These can be scalar arguments or elements of a vector array type 3790 // passed directly. The latter are used to implement ELFv2 homogenous 3791 // vector aggregates. 3792 if (VR_idx != Num_VR_Regs) { 3793 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3794 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3795 ++VR_idx; 3796 } else { 3797 if (CallConv == CallingConv::Fast) 3798 ComputeArgOffset(); 3799 3800 needsLoad = true; 3801 } 3802 if (CallConv != CallingConv::Fast || needsLoad) 3803 ArgOffset += 16; 3804 break; 3805 } // not QPX 3806 3807 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3808 "Invalid QPX parameter type"); 3809 /* fall through */ 3810 3811 case MVT::v4f64: 3812 case MVT::v4i1: 3813 // QPX vectors are treated like their scalar floating-point subregisters 3814 // (except that they're larger). 3815 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3816 if (QFPR_idx != Num_QFPR_Regs) { 3817 const TargetRegisterClass *RC; 3818 switch (ObjectVT.getSimpleVT().SimpleTy) { 3819 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3820 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3821 default: RC = &PPC::QBRCRegClass; break; 3822 } 3823 3824 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3825 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3826 ++QFPR_idx; 3827 } else { 3828 if (CallConv == CallingConv::Fast) 3829 ComputeArgOffset(); 3830 needsLoad = true; 3831 } 3832 if (CallConv != CallingConv::Fast || needsLoad) 3833 ArgOffset += Sz; 3834 break; 3835 } 3836 3837 // We need to load the argument to a virtual register if we determined 3838 // above that we ran out of physical registers of the appropriate type. 3839 if (needsLoad) { 3840 if (ObjSize < ArgSize && !isLittleEndian) 3841 CurArgOffset += ArgSize - ObjSize; 3842 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3843 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3844 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3845 } 3846 3847 InVals.push_back(ArgVal); 3848 } 3849 3850 // Area that is at least reserved in the caller of this function. 3851 unsigned MinReservedArea; 3852 if (HasParameterArea) 3853 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3854 else 3855 MinReservedArea = LinkageSize; 3856 3857 // Set the size that is at least reserved in caller of this function. Tail 3858 // call optimized functions' reserved stack space needs to be aligned so that 3859 // taking the difference between two stack areas will result in an aligned 3860 // stack. 3861 MinReservedArea = 3862 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3863 FuncInfo->setMinReservedArea(MinReservedArea); 3864 3865 // If the function takes variable number of arguments, make a frame index for 3866 // the start of the first vararg value... for expansion of llvm.va_start. 3867 if (isVarArg) { 3868 int Depth = ArgOffset; 3869 3870 FuncInfo->setVarArgsFrameIndex( 3871 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 3872 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3873 3874 // If this function is vararg, store any remaining integer argument regs 3875 // to their spots on the stack so that they may be loaded by dereferencing 3876 // the result of va_next. 3877 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3878 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3879 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3880 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3881 SDValue Store = 3882 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3883 MemOps.push_back(Store); 3884 // Increment the address by four for the next argument to store 3885 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3886 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3887 } 3888 } 3889 3890 if (!MemOps.empty()) 3891 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3892 3893 return Chain; 3894 } 3895 3896 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 3897 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3898 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3899 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3900 // TODO: add description of PPC stack frame format, or at least some docs. 3901 // 3902 MachineFunction &MF = DAG.getMachineFunction(); 3903 MachineFrameInfo &MFI = MF.getFrameInfo(); 3904 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3905 3906 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3907 bool isPPC64 = PtrVT == MVT::i64; 3908 // Potential tail calls could cause overwriting of argument stack slots. 3909 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3910 (CallConv == CallingConv::Fast)); 3911 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3912 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3913 unsigned ArgOffset = LinkageSize; 3914 // Area that is at least reserved in caller of this function. 3915 unsigned MinReservedArea = ArgOffset; 3916 3917 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3918 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3919 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3920 }; 3921 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3922 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3923 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3924 }; 3925 static const MCPhysReg VR[] = { 3926 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3927 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3928 }; 3929 3930 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3931 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3932 const unsigned Num_VR_Regs = array_lengthof( VR); 3933 3934 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3935 3936 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3937 3938 // In 32-bit non-varargs functions, the stack space for vectors is after the 3939 // stack space for non-vectors. We do not use this space unless we have 3940 // too many vectors to fit in registers, something that only occurs in 3941 // constructed examples:), but we have to walk the arglist to figure 3942 // that out...for the pathological case, compute VecArgOffset as the 3943 // start of the vector parameter area. Computing VecArgOffset is the 3944 // entire point of the following loop. 3945 unsigned VecArgOffset = ArgOffset; 3946 if (!isVarArg && !isPPC64) { 3947 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3948 ++ArgNo) { 3949 EVT ObjectVT = Ins[ArgNo].VT; 3950 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3951 3952 if (Flags.isByVal()) { 3953 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3954 unsigned ObjSize = Flags.getByValSize(); 3955 unsigned ArgSize = 3956 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3957 VecArgOffset += ArgSize; 3958 continue; 3959 } 3960 3961 switch(ObjectVT.getSimpleVT().SimpleTy) { 3962 default: llvm_unreachable("Unhandled argument type!"); 3963 case MVT::i1: 3964 case MVT::i32: 3965 case MVT::f32: 3966 VecArgOffset += 4; 3967 break; 3968 case MVT::i64: // PPC64 3969 case MVT::f64: 3970 // FIXME: We are guaranteed to be !isPPC64 at this point. 3971 // Does MVT::i64 apply? 3972 VecArgOffset += 8; 3973 break; 3974 case MVT::v4f32: 3975 case MVT::v4i32: 3976 case MVT::v8i16: 3977 case MVT::v16i8: 3978 // Nothing to do, we're only looking at Nonvector args here. 3979 break; 3980 } 3981 } 3982 } 3983 // We've found where the vector parameter area in memory is. Skip the 3984 // first 12 parameters; these don't use that memory. 3985 VecArgOffset = ((VecArgOffset+15)/16)*16; 3986 VecArgOffset += 12*16; 3987 3988 // Add DAG nodes to load the arguments or copy them out of registers. On 3989 // entry to a function on PPC, the arguments start after the linkage area, 3990 // although the first ones are often in registers. 3991 3992 SmallVector<SDValue, 8> MemOps; 3993 unsigned nAltivecParamsAtEnd = 0; 3994 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3995 unsigned CurArgIdx = 0; 3996 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3997 SDValue ArgVal; 3998 bool needsLoad = false; 3999 EVT ObjectVT = Ins[ArgNo].VT; 4000 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 4001 unsigned ArgSize = ObjSize; 4002 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4003 if (Ins[ArgNo].isOrigArg()) { 4004 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4005 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4006 } 4007 unsigned CurArgOffset = ArgOffset; 4008 4009 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4010 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4011 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4012 if (isVarArg || isPPC64) { 4013 MinReservedArea = ((MinReservedArea+15)/16)*16; 4014 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4015 Flags, 4016 PtrByteSize); 4017 } else nAltivecParamsAtEnd++; 4018 } else 4019 // Calculate min reserved area. 4020 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4021 Flags, 4022 PtrByteSize); 4023 4024 // FIXME the codegen can be much improved in some cases. 4025 // We do not have to keep everything in memory. 4026 if (Flags.isByVal()) { 4027 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4028 4029 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4030 ObjSize = Flags.getByValSize(); 4031 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4032 // Objects of size 1 and 2 are right justified, everything else is 4033 // left justified. This means the memory address is adjusted forwards. 4034 if (ObjSize==1 || ObjSize==2) { 4035 CurArgOffset = CurArgOffset + (4 - ObjSize); 4036 } 4037 // The value of the object is its address. 4038 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4039 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4040 InVals.push_back(FIN); 4041 if (ObjSize==1 || ObjSize==2) { 4042 if (GPR_idx != Num_GPR_Regs) { 4043 unsigned VReg; 4044 if (isPPC64) 4045 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4046 else 4047 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4048 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4049 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4050 SDValue Store = 4051 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4052 MachinePointerInfo(&*FuncArg), ObjType); 4053 MemOps.push_back(Store); 4054 ++GPR_idx; 4055 } 4056 4057 ArgOffset += PtrByteSize; 4058 4059 continue; 4060 } 4061 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4062 // Store whatever pieces of the object are in registers 4063 // to memory. ArgOffset will be the address of the beginning 4064 // of the object. 4065 if (GPR_idx != Num_GPR_Regs) { 4066 unsigned VReg; 4067 if (isPPC64) 4068 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4069 else 4070 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4071 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4072 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4073 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4074 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4075 MachinePointerInfo(&*FuncArg, j)); 4076 MemOps.push_back(Store); 4077 ++GPR_idx; 4078 ArgOffset += PtrByteSize; 4079 } else { 4080 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4081 break; 4082 } 4083 } 4084 continue; 4085 } 4086 4087 switch (ObjectVT.getSimpleVT().SimpleTy) { 4088 default: llvm_unreachable("Unhandled argument type!"); 4089 case MVT::i1: 4090 case MVT::i32: 4091 if (!isPPC64) { 4092 if (GPR_idx != Num_GPR_Regs) { 4093 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4094 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4095 4096 if (ObjectVT == MVT::i1) 4097 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4098 4099 ++GPR_idx; 4100 } else { 4101 needsLoad = true; 4102 ArgSize = PtrByteSize; 4103 } 4104 // All int arguments reserve stack space in the Darwin ABI. 4105 ArgOffset += PtrByteSize; 4106 break; 4107 } 4108 LLVM_FALLTHROUGH; 4109 case MVT::i64: // PPC64 4110 if (GPR_idx != Num_GPR_Regs) { 4111 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4112 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4113 4114 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4115 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4116 // value to MVT::i64 and then truncate to the correct register size. 4117 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4118 4119 ++GPR_idx; 4120 } else { 4121 needsLoad = true; 4122 ArgSize = PtrByteSize; 4123 } 4124 // All int arguments reserve stack space in the Darwin ABI. 4125 ArgOffset += 8; 4126 break; 4127 4128 case MVT::f32: 4129 case MVT::f64: 4130 // Every 4 bytes of argument space consumes one of the GPRs available for 4131 // argument passing. 4132 if (GPR_idx != Num_GPR_Regs) { 4133 ++GPR_idx; 4134 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4135 ++GPR_idx; 4136 } 4137 if (FPR_idx != Num_FPR_Regs) { 4138 unsigned VReg; 4139 4140 if (ObjectVT == MVT::f32) 4141 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4142 else 4143 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4144 4145 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4146 ++FPR_idx; 4147 } else { 4148 needsLoad = true; 4149 } 4150 4151 // All FP arguments reserve stack space in the Darwin ABI. 4152 ArgOffset += isPPC64 ? 8 : ObjSize; 4153 break; 4154 case MVT::v4f32: 4155 case MVT::v4i32: 4156 case MVT::v8i16: 4157 case MVT::v16i8: 4158 // Note that vector arguments in registers don't reserve stack space, 4159 // except in varargs functions. 4160 if (VR_idx != Num_VR_Regs) { 4161 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4162 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4163 if (isVarArg) { 4164 while ((ArgOffset % 16) != 0) { 4165 ArgOffset += PtrByteSize; 4166 if (GPR_idx != Num_GPR_Regs) 4167 GPR_idx++; 4168 } 4169 ArgOffset += 16; 4170 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4171 } 4172 ++VR_idx; 4173 } else { 4174 if (!isVarArg && !isPPC64) { 4175 // Vectors go after all the nonvectors. 4176 CurArgOffset = VecArgOffset; 4177 VecArgOffset += 16; 4178 } else { 4179 // Vectors are aligned. 4180 ArgOffset = ((ArgOffset+15)/16)*16; 4181 CurArgOffset = ArgOffset; 4182 ArgOffset += 16; 4183 } 4184 needsLoad = true; 4185 } 4186 break; 4187 } 4188 4189 // We need to load the argument to a virtual register if we determined above 4190 // that we ran out of physical registers of the appropriate type. 4191 if (needsLoad) { 4192 int FI = MFI.CreateFixedObject(ObjSize, 4193 CurArgOffset + (ArgSize - ObjSize), 4194 isImmutable); 4195 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4196 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4197 } 4198 4199 InVals.push_back(ArgVal); 4200 } 4201 4202 // Allow for Altivec parameters at the end, if needed. 4203 if (nAltivecParamsAtEnd) { 4204 MinReservedArea = ((MinReservedArea+15)/16)*16; 4205 MinReservedArea += 16*nAltivecParamsAtEnd; 4206 } 4207 4208 // Area that is at least reserved in the caller of this function. 4209 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4210 4211 // Set the size that is at least reserved in caller of this function. Tail 4212 // call optimized functions' reserved stack space needs to be aligned so that 4213 // taking the difference between two stack areas will result in an aligned 4214 // stack. 4215 MinReservedArea = 4216 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4217 FuncInfo->setMinReservedArea(MinReservedArea); 4218 4219 // If the function takes variable number of arguments, make a frame index for 4220 // the start of the first vararg value... for expansion of llvm.va_start. 4221 if (isVarArg) { 4222 int Depth = ArgOffset; 4223 4224 FuncInfo->setVarArgsFrameIndex( 4225 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4226 Depth, true)); 4227 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4228 4229 // If this function is vararg, store any remaining integer argument regs 4230 // to their spots on the stack so that they may be loaded by dereferencing 4231 // the result of va_next. 4232 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4233 unsigned VReg; 4234 4235 if (isPPC64) 4236 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4237 else 4238 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4239 4240 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4241 SDValue Store = 4242 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4243 MemOps.push_back(Store); 4244 // Increment the address by four for the next argument to store 4245 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4246 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4247 } 4248 } 4249 4250 if (!MemOps.empty()) 4251 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4252 4253 return Chain; 4254 } 4255 4256 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4257 /// adjusted to accommodate the arguments for the tailcall. 4258 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4259 unsigned ParamSize) { 4260 4261 if (!isTailCall) return 0; 4262 4263 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4264 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4265 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4266 // Remember only if the new adjustement is bigger. 4267 if (SPDiff < FI->getTailCallSPDelta()) 4268 FI->setTailCallSPDelta(SPDiff); 4269 4270 return SPDiff; 4271 } 4272 4273 static bool isFunctionGlobalAddress(SDValue Callee); 4274 4275 static bool 4276 callsShareTOCBase(const Function *Caller, SDValue Callee, 4277 const TargetMachine &TM) { 4278 // If !G, Callee can be an external symbol. 4279 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4280 if (!G) 4281 return false; 4282 4283 // The medium and large code models are expected to provide a sufficiently 4284 // large TOC to provide all data addressing needs of a module with a 4285 // single TOC. Since each module will be addressed with a single TOC then we 4286 // only need to check that caller and callee don't cross dso boundaries. 4287 if (CodeModel::Medium == TM.getCodeModel() || 4288 CodeModel::Large == TM.getCodeModel()) 4289 return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal()); 4290 4291 // Otherwise we need to ensure callee and caller are in the same section, 4292 // since the linker may allocate multiple TOCs, and we don't know which 4293 // sections will belong to the same TOC base. 4294 4295 const GlobalValue *GV = G->getGlobal(); 4296 if (!GV->isStrongDefinitionForLinker()) 4297 return false; 4298 4299 // Any explicitly-specified sections and section prefixes must also match. 4300 // Also, if we're using -ffunction-sections, then each function is always in 4301 // a different section (the same is true for COMDAT functions). 4302 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4303 GV->getSection() != Caller->getSection()) 4304 return false; 4305 if (const auto *F = dyn_cast<Function>(GV)) { 4306 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4307 return false; 4308 } 4309 4310 // If the callee might be interposed, then we can't assume the ultimate call 4311 // target will be in the same section. Even in cases where we can assume that 4312 // interposition won't happen, in any case where the linker might insert a 4313 // stub to allow for interposition, we must generate code as though 4314 // interposition might occur. To understand why this matters, consider a 4315 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4316 // in the same section, but a is in a different module (i.e. has a different 4317 // TOC base pointer). If the linker allows for interposition between b and c, 4318 // then it will generate a stub for the call edge between b and c which will 4319 // save the TOC pointer into the designated stack slot allocated by b. If we 4320 // return true here, and therefore allow a tail call between b and c, that 4321 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4322 // pointer into the stack slot allocated by a (where the a -> b stub saved 4323 // a's TOC base pointer). If we're not considering a tail call, but rather, 4324 // whether a nop is needed after the call instruction in b, because the linker 4325 // will insert a stub, it might complain about a missing nop if we omit it 4326 // (although many don't complain in this case). 4327 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4328 return false; 4329 4330 return true; 4331 } 4332 4333 static bool 4334 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4335 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4336 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 4337 4338 const unsigned PtrByteSize = 8; 4339 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4340 4341 static const MCPhysReg GPR[] = { 4342 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4343 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4344 }; 4345 static const MCPhysReg VR[] = { 4346 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4347 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4348 }; 4349 4350 const unsigned NumGPRs = array_lengthof(GPR); 4351 const unsigned NumFPRs = 13; 4352 const unsigned NumVRs = array_lengthof(VR); 4353 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4354 4355 unsigned NumBytes = LinkageSize; 4356 unsigned AvailableFPRs = NumFPRs; 4357 unsigned AvailableVRs = NumVRs; 4358 4359 for (const ISD::OutputArg& Param : Outs) { 4360 if (Param.Flags.isNest()) continue; 4361 4362 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4363 PtrByteSize, LinkageSize, ParamAreaSize, 4364 NumBytes, AvailableFPRs, AvailableVRs, 4365 Subtarget.hasQPX())) 4366 return true; 4367 } 4368 return false; 4369 } 4370 4371 static bool 4372 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4373 if (CS.arg_size() != CallerFn->arg_size()) 4374 return false; 4375 4376 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4377 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4378 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4379 4380 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4381 const Value* CalleeArg = *CalleeArgIter; 4382 const Value* CallerArg = &(*CallerArgIter); 4383 if (CalleeArg == CallerArg) 4384 continue; 4385 4386 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4387 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4388 // } 4389 // 1st argument of callee is undef and has the same type as caller. 4390 if (CalleeArg->getType() == CallerArg->getType() && 4391 isa<UndefValue>(CalleeArg)) 4392 continue; 4393 4394 return false; 4395 } 4396 4397 return true; 4398 } 4399 4400 // Returns true if TCO is possible between the callers and callees 4401 // calling conventions. 4402 static bool 4403 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4404 CallingConv::ID CalleeCC) { 4405 // Tail calls are possible with fastcc and ccc. 4406 auto isTailCallableCC = [] (CallingConv::ID CC){ 4407 return CC == CallingConv::C || CC == CallingConv::Fast; 4408 }; 4409 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4410 return false; 4411 4412 // We can safely tail call both fastcc and ccc callees from a c calling 4413 // convention caller. If the caller is fastcc, we may have less stack space 4414 // than a non-fastcc caller with the same signature so disable tail-calls in 4415 // that case. 4416 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4417 } 4418 4419 bool 4420 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4421 SDValue Callee, 4422 CallingConv::ID CalleeCC, 4423 ImmutableCallSite CS, 4424 bool isVarArg, 4425 const SmallVectorImpl<ISD::OutputArg> &Outs, 4426 const SmallVectorImpl<ISD::InputArg> &Ins, 4427 SelectionDAG& DAG) const { 4428 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4429 4430 if (DisableSCO && !TailCallOpt) return false; 4431 4432 // Variadic argument functions are not supported. 4433 if (isVarArg) return false; 4434 4435 auto &Caller = DAG.getMachineFunction().getFunction(); 4436 // Check that the calling conventions are compatible for tco. 4437 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4438 return false; 4439 4440 // Caller contains any byval parameter is not supported. 4441 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4442 return false; 4443 4444 // Callee contains any byval parameter is not supported, too. 4445 // Note: This is a quick work around, because in some cases, e.g. 4446 // caller's stack size > callee's stack size, we are still able to apply 4447 // sibling call optimization. For example, gcc is able to do SCO for caller1 4448 // in the following example, but not for caller2. 4449 // struct test { 4450 // long int a; 4451 // char ary[56]; 4452 // } gTest; 4453 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4454 // b->a = v.a; 4455 // return 0; 4456 // } 4457 // void caller1(struct test a, struct test c, struct test *b) { 4458 // callee(gTest, b); } 4459 // void caller2(struct test *b) { callee(gTest, b); } 4460 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4461 return false; 4462 4463 // If callee and caller use different calling conventions, we cannot pass 4464 // parameters on stack since offsets for the parameter area may be different. 4465 if (Caller.getCallingConv() != CalleeCC && 4466 needStackSlotPassParameters(Subtarget, Outs)) 4467 return false; 4468 4469 // No TCO/SCO on indirect call because Caller have to restore its TOC 4470 if (!isFunctionGlobalAddress(Callee) && 4471 !isa<ExternalSymbolSDNode>(Callee)) 4472 return false; 4473 4474 // If the caller and callee potentially have different TOC bases then we 4475 // cannot tail call since we need to restore the TOC pointer after the call. 4476 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4477 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4478 return false; 4479 4480 // TCO allows altering callee ABI, so we don't have to check further. 4481 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4482 return true; 4483 4484 if (DisableSCO) return false; 4485 4486 // If callee use the same argument list that caller is using, then we can 4487 // apply SCO on this case. If it is not, then we need to check if callee needs 4488 // stack for passing arguments. 4489 if (!hasSameArgumentList(&Caller, CS) && 4490 needStackSlotPassParameters(Subtarget, Outs)) { 4491 return false; 4492 } 4493 4494 return true; 4495 } 4496 4497 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4498 /// for tail call optimization. Targets which want to do tail call 4499 /// optimization should implement this function. 4500 bool 4501 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4502 CallingConv::ID CalleeCC, 4503 bool isVarArg, 4504 const SmallVectorImpl<ISD::InputArg> &Ins, 4505 SelectionDAG& DAG) const { 4506 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4507 return false; 4508 4509 // Variable argument functions are not supported. 4510 if (isVarArg) 4511 return false; 4512 4513 MachineFunction &MF = DAG.getMachineFunction(); 4514 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4515 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4516 // Functions containing by val parameters are not supported. 4517 for (unsigned i = 0; i != Ins.size(); i++) { 4518 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4519 if (Flags.isByVal()) return false; 4520 } 4521 4522 // Non-PIC/GOT tail calls are supported. 4523 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4524 return true; 4525 4526 // At the moment we can only do local tail calls (in same module, hidden 4527 // or protected) if we are generating PIC. 4528 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4529 return G->getGlobal()->hasHiddenVisibility() 4530 || G->getGlobal()->hasProtectedVisibility(); 4531 } 4532 4533 return false; 4534 } 4535 4536 /// isCallCompatibleAddress - Return the immediate to use if the specified 4537 /// 32-bit value is representable in the immediate field of a BxA instruction. 4538 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4539 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4540 if (!C) return nullptr; 4541 4542 int Addr = C->getZExtValue(); 4543 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4544 SignExtend32<26>(Addr) != Addr) 4545 return nullptr; // Top 6 bits have to be sext of immediate. 4546 4547 return DAG 4548 .getConstant( 4549 (int)C->getZExtValue() >> 2, SDLoc(Op), 4550 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4551 .getNode(); 4552 } 4553 4554 namespace { 4555 4556 struct TailCallArgumentInfo { 4557 SDValue Arg; 4558 SDValue FrameIdxOp; 4559 int FrameIdx = 0; 4560 4561 TailCallArgumentInfo() = default; 4562 }; 4563 4564 } // end anonymous namespace 4565 4566 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4567 static void StoreTailCallArgumentsToStackSlot( 4568 SelectionDAG &DAG, SDValue Chain, 4569 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4570 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4571 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4572 SDValue Arg = TailCallArgs[i].Arg; 4573 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4574 int FI = TailCallArgs[i].FrameIdx; 4575 // Store relative to framepointer. 4576 MemOpChains.push_back(DAG.getStore( 4577 Chain, dl, Arg, FIN, 4578 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4579 } 4580 } 4581 4582 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4583 /// the appropriate stack slot for the tail call optimized function call. 4584 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4585 SDValue OldRetAddr, SDValue OldFP, 4586 int SPDiff, const SDLoc &dl) { 4587 if (SPDiff) { 4588 // Calculate the new stack slot for the return address. 4589 MachineFunction &MF = DAG.getMachineFunction(); 4590 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4591 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4592 bool isPPC64 = Subtarget.isPPC64(); 4593 int SlotSize = isPPC64 ? 8 : 4; 4594 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4595 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4596 NewRetAddrLoc, true); 4597 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4598 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4599 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4600 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4601 4602 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4603 // slot as the FP is never overwritten. 4604 if (Subtarget.isDarwinABI()) { 4605 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4606 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4607 true); 4608 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4609 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4610 MachinePointerInfo::getFixedStack( 4611 DAG.getMachineFunction(), NewFPIdx)); 4612 } 4613 } 4614 return Chain; 4615 } 4616 4617 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4618 /// the position of the argument. 4619 static void 4620 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4621 SDValue Arg, int SPDiff, unsigned ArgOffset, 4622 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4623 int Offset = ArgOffset + SPDiff; 4624 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4625 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4626 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4627 SDValue FIN = DAG.getFrameIndex(FI, VT); 4628 TailCallArgumentInfo Info; 4629 Info.Arg = Arg; 4630 Info.FrameIdxOp = FIN; 4631 Info.FrameIdx = FI; 4632 TailCallArguments.push_back(Info); 4633 } 4634 4635 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4636 /// stack slot. Returns the chain as result and the loaded frame pointers in 4637 /// LROpOut/FPOpout. Used when tail calling. 4638 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4639 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4640 SDValue &FPOpOut, const SDLoc &dl) const { 4641 if (SPDiff) { 4642 // Load the LR and FP stack slot for later adjusting. 4643 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4644 LROpOut = getReturnAddrFrameIndex(DAG); 4645 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4646 Chain = SDValue(LROpOut.getNode(), 1); 4647 4648 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4649 // slot as the FP is never overwritten. 4650 if (Subtarget.isDarwinABI()) { 4651 FPOpOut = getFramePointerFrameIndex(DAG); 4652 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4653 Chain = SDValue(FPOpOut.getNode(), 1); 4654 } 4655 } 4656 return Chain; 4657 } 4658 4659 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4660 /// by "Src" to address "Dst" of size "Size". Alignment information is 4661 /// specified by the specific parameter attribute. The copy will be passed as 4662 /// a byval function parameter. 4663 /// Sometimes what we are copying is the end of a larger object, the part that 4664 /// does not fit in registers. 4665 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4666 SDValue Chain, ISD::ArgFlagsTy Flags, 4667 SelectionDAG &DAG, const SDLoc &dl) { 4668 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4669 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4670 false, false, false, MachinePointerInfo(), 4671 MachinePointerInfo()); 4672 } 4673 4674 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4675 /// tail calls. 4676 static void LowerMemOpCallTo( 4677 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4678 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4679 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4680 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4681 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4682 if (!isTailCall) { 4683 if (isVector) { 4684 SDValue StackPtr; 4685 if (isPPC64) 4686 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4687 else 4688 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4689 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4690 DAG.getConstant(ArgOffset, dl, PtrVT)); 4691 } 4692 MemOpChains.push_back( 4693 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4694 // Calculate and remember argument location. 4695 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4696 TailCallArguments); 4697 } 4698 4699 static void 4700 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4701 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4702 SDValue FPOp, 4703 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4704 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4705 // might overwrite each other in case of tail call optimization. 4706 SmallVector<SDValue, 8> MemOpChains2; 4707 // Do not flag preceding copytoreg stuff together with the following stuff. 4708 InFlag = SDValue(); 4709 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4710 MemOpChains2, dl); 4711 if (!MemOpChains2.empty()) 4712 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4713 4714 // Store the return address to the appropriate stack slot. 4715 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4716 4717 // Emit callseq_end just before tailcall node. 4718 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4719 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4720 InFlag = Chain.getValue(1); 4721 } 4722 4723 // Is this global address that of a function that can be called by name? (as 4724 // opposed to something that must hold a descriptor for an indirect call). 4725 static bool isFunctionGlobalAddress(SDValue Callee) { 4726 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4727 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4728 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4729 return false; 4730 4731 return G->getGlobal()->getValueType()->isFunctionTy(); 4732 } 4733 4734 return false; 4735 } 4736 4737 static unsigned 4738 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4739 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4740 bool isPatchPoint, bool hasNest, 4741 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4742 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4743 ImmutableCallSite CS, const PPCSubtarget &Subtarget) { 4744 bool isPPC64 = Subtarget.isPPC64(); 4745 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4746 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4747 4748 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4749 NodeTys.push_back(MVT::Other); // Returns a chain 4750 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4751 4752 unsigned CallOpc = PPCISD::CALL; 4753 4754 bool needIndirectCall = true; 4755 if (!isSVR4ABI || !isPPC64) 4756 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4757 // If this is an absolute destination address, use the munged value. 4758 Callee = SDValue(Dest, 0); 4759 needIndirectCall = false; 4760 } 4761 4762 // PC-relative references to external symbols should go through $stub, unless 4763 // we're building with the leopard linker or later, which automatically 4764 // synthesizes these stubs. 4765 const TargetMachine &TM = DAG.getTarget(); 4766 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 4767 const GlobalValue *GV = nullptr; 4768 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4769 GV = G->getGlobal(); 4770 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4771 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4772 4773 if (isFunctionGlobalAddress(Callee)) { 4774 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4775 // A call to a TLS address is actually an indirect call to a 4776 // thread-specific pointer. 4777 unsigned OpFlags = 0; 4778 if (UsePlt) 4779 OpFlags = PPCII::MO_PLT; 4780 4781 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4782 // every direct call is) turn it into a TargetGlobalAddress / 4783 // TargetExternalSymbol node so that legalize doesn't hack it. 4784 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4785 Callee.getValueType(), 0, OpFlags); 4786 needIndirectCall = false; 4787 } 4788 4789 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4790 unsigned char OpFlags = 0; 4791 4792 if (UsePlt) 4793 OpFlags = PPCII::MO_PLT; 4794 4795 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4796 OpFlags); 4797 needIndirectCall = false; 4798 } 4799 4800 if (isPatchPoint) { 4801 // We'll form an invalid direct call when lowering a patchpoint; the full 4802 // sequence for an indirect call is complicated, and many of the 4803 // instructions introduced might have side effects (and, thus, can't be 4804 // removed later). The call itself will be removed as soon as the 4805 // argument/return lowering is complete, so the fact that it has the wrong 4806 // kind of operands should not really matter. 4807 needIndirectCall = false; 4808 } 4809 4810 if (needIndirectCall) { 4811 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4812 // to do the call, we can't use PPCISD::CALL. 4813 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4814 4815 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4816 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4817 // entry point, but to the function descriptor (the function entry point 4818 // address is part of the function descriptor though). 4819 // The function descriptor is a three doubleword structure with the 4820 // following fields: function entry point, TOC base address and 4821 // environment pointer. 4822 // Thus for a call through a function pointer, the following actions need 4823 // to be performed: 4824 // 1. Save the TOC of the caller in the TOC save area of its stack 4825 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4826 // 2. Load the address of the function entry point from the function 4827 // descriptor. 4828 // 3. Load the TOC of the callee from the function descriptor into r2. 4829 // 4. Load the environment pointer from the function descriptor into 4830 // r11. 4831 // 5. Branch to the function entry point address. 4832 // 6. On return of the callee, the TOC of the caller needs to be 4833 // restored (this is done in FinishCall()). 4834 // 4835 // The loads are scheduled at the beginning of the call sequence, and the 4836 // register copies are flagged together to ensure that no other 4837 // operations can be scheduled in between. E.g. without flagging the 4838 // copies together, a TOC access in the caller could be scheduled between 4839 // the assignment of the callee TOC and the branch to the callee, which 4840 // results in the TOC access going through the TOC of the callee instead 4841 // of going through the TOC of the caller, which leads to incorrect code. 4842 4843 // Load the address of the function entry point from the function 4844 // descriptor. 4845 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4846 if (LDChain.getValueType() == MVT::Glue) 4847 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4848 4849 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 4850 ? (MachineMemOperand::MODereferenceable | 4851 MachineMemOperand::MOInvariant) 4852 : MachineMemOperand::MONone; 4853 4854 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 4855 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4856 /* Alignment = */ 8, MMOFlags); 4857 4858 // Load environment pointer into r11. 4859 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4860 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4861 SDValue LoadEnvPtr = 4862 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 4863 /* Alignment = */ 8, MMOFlags); 4864 4865 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4866 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4867 SDValue TOCPtr = 4868 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 4869 /* Alignment = */ 8, MMOFlags); 4870 4871 setUsesTOCBasePtr(DAG); 4872 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4873 InFlag); 4874 Chain = TOCVal.getValue(0); 4875 InFlag = TOCVal.getValue(1); 4876 4877 // If the function call has an explicit 'nest' parameter, it takes the 4878 // place of the environment pointer. 4879 if (!hasNest) { 4880 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4881 InFlag); 4882 4883 Chain = EnvVal.getValue(0); 4884 InFlag = EnvVal.getValue(1); 4885 } 4886 4887 MTCTROps[0] = Chain; 4888 MTCTROps[1] = LoadFuncPtr; 4889 MTCTROps[2] = InFlag; 4890 } 4891 4892 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4893 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4894 InFlag = Chain.getValue(1); 4895 4896 NodeTys.clear(); 4897 NodeTys.push_back(MVT::Other); 4898 NodeTys.push_back(MVT::Glue); 4899 Ops.push_back(Chain); 4900 CallOpc = PPCISD::BCTRL; 4901 Callee.setNode(nullptr); 4902 // Add use of X11 (holding environment pointer) 4903 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4904 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4905 // Add CTR register as callee so a bctr can be emitted later. 4906 if (isTailCall) 4907 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4908 } 4909 4910 // If this is a direct call, pass the chain and the callee. 4911 if (Callee.getNode()) { 4912 Ops.push_back(Chain); 4913 Ops.push_back(Callee); 4914 } 4915 // If this is a tail call add stack pointer delta. 4916 if (isTailCall) 4917 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4918 4919 // Add argument registers to the end of the list so that they are known live 4920 // into the call. 4921 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4922 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4923 RegsToPass[i].second.getValueType())); 4924 4925 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4926 // into the call. 4927 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 4928 setUsesTOCBasePtr(DAG); 4929 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4930 } 4931 4932 return CallOpc; 4933 } 4934 4935 SDValue PPCTargetLowering::LowerCallResult( 4936 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4937 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4938 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4939 SmallVector<CCValAssign, 16> RVLocs; 4940 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4941 *DAG.getContext()); 4942 4943 CCRetInfo.AnalyzeCallResult( 4944 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 4945 ? RetCC_PPC_Cold 4946 : RetCC_PPC); 4947 4948 // Copy all of the result registers out of their specified physreg. 4949 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4950 CCValAssign &VA = RVLocs[i]; 4951 assert(VA.isRegLoc() && "Can only return in registers!"); 4952 4953 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4954 VA.getLocReg(), VA.getLocVT(), InFlag); 4955 Chain = Val.getValue(1); 4956 InFlag = Val.getValue(2); 4957 4958 switch (VA.getLocInfo()) { 4959 default: llvm_unreachable("Unknown loc info!"); 4960 case CCValAssign::Full: break; 4961 case CCValAssign::AExt: 4962 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4963 break; 4964 case CCValAssign::ZExt: 4965 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4966 DAG.getValueType(VA.getValVT())); 4967 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4968 break; 4969 case CCValAssign::SExt: 4970 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4971 DAG.getValueType(VA.getValVT())); 4972 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4973 break; 4974 } 4975 4976 InVals.push_back(Val); 4977 } 4978 4979 return Chain; 4980 } 4981 4982 SDValue PPCTargetLowering::FinishCall( 4983 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 4984 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 4985 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 4986 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 4987 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 4988 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 4989 std::vector<EVT> NodeTys; 4990 SmallVector<SDValue, 8> Ops; 4991 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4992 SPDiff, isTailCall, isPatchPoint, hasNest, 4993 RegsToPass, Ops, NodeTys, CS, Subtarget); 4994 4995 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4996 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4997 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4998 4999 // When performing tail call optimization the callee pops its arguments off 5000 // the stack. Account for this here so these bytes can be pushed back on in 5001 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5002 int BytesCalleePops = 5003 (CallConv == CallingConv::Fast && 5004 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 5005 5006 // Add a register mask operand representing the call-preserved registers. 5007 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5008 const uint32_t *Mask = 5009 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 5010 assert(Mask && "Missing call preserved mask for calling convention"); 5011 Ops.push_back(DAG.getRegisterMask(Mask)); 5012 5013 if (InFlag.getNode()) 5014 Ops.push_back(InFlag); 5015 5016 // Emit tail call. 5017 if (isTailCall) { 5018 assert(((Callee.getOpcode() == ISD::Register && 5019 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5020 Callee.getOpcode() == ISD::TargetExternalSymbol || 5021 Callee.getOpcode() == ISD::TargetGlobalAddress || 5022 isa<ConstantSDNode>(Callee)) && 5023 "Expecting an global address, external symbol, absolute value or register"); 5024 5025 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5026 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 5027 } 5028 5029 // Add a NOP immediately after the branch instruction when using the 64-bit 5030 // SVR4 ABI. At link time, if caller and callee are in a different module and 5031 // thus have a different TOC, the call will be replaced with a call to a stub 5032 // function which saves the current TOC, loads the TOC of the callee and 5033 // branches to the callee. The NOP will be replaced with a load instruction 5034 // which restores the TOC of the caller from the TOC save slot of the current 5035 // stack frame. If caller and callee belong to the same module (and have the 5036 // same TOC), the NOP will remain unchanged. 5037 5038 MachineFunction &MF = DAG.getMachineFunction(); 5039 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 5040 !isPatchPoint) { 5041 if (CallOpc == PPCISD::BCTRL) { 5042 // This is a call through a function pointer. 5043 // Restore the caller TOC from the save area into R2. 5044 // See PrepareCall() for more information about calls through function 5045 // pointers in the 64-bit SVR4 ABI. 5046 // We are using a target-specific load with r2 hard coded, because the 5047 // result of a target-independent load would never go directly into r2, 5048 // since r2 is a reserved register (which prevents the register allocator 5049 // from allocating it), resulting in an additional register being 5050 // allocated and an unnecessary move instruction being generated. 5051 CallOpc = PPCISD::BCTRL_LOAD_TOC; 5052 5053 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5054 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 5055 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5056 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5057 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 5058 5059 // The address needs to go after the chain input but before the flag (or 5060 // any other variadic arguments). 5061 Ops.insert(std::next(Ops.begin()), AddTOC); 5062 } else if (CallOpc == PPCISD::CALL && 5063 !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) { 5064 // Otherwise insert NOP for non-local calls. 5065 CallOpc = PPCISD::CALL_NOP; 5066 } 5067 } 5068 5069 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 5070 InFlag = Chain.getValue(1); 5071 5072 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5073 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5074 InFlag, dl); 5075 if (!Ins.empty()) 5076 InFlag = Chain.getValue(1); 5077 5078 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 5079 Ins, dl, DAG, InVals); 5080 } 5081 5082 SDValue 5083 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5084 SmallVectorImpl<SDValue> &InVals) const { 5085 SelectionDAG &DAG = CLI.DAG; 5086 SDLoc &dl = CLI.DL; 5087 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5088 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5089 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5090 SDValue Chain = CLI.Chain; 5091 SDValue Callee = CLI.Callee; 5092 bool &isTailCall = CLI.IsTailCall; 5093 CallingConv::ID CallConv = CLI.CallConv; 5094 bool isVarArg = CLI.IsVarArg; 5095 bool isPatchPoint = CLI.IsPatchPoint; 5096 ImmutableCallSite CS = CLI.CS; 5097 5098 if (isTailCall) { 5099 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5100 isTailCall = false; 5101 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5102 isTailCall = 5103 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5104 isVarArg, Outs, Ins, DAG); 5105 else 5106 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5107 Ins, DAG); 5108 if (isTailCall) { 5109 ++NumTailCalls; 5110 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5111 ++NumSiblingCalls; 5112 5113 assert(isa<GlobalAddressSDNode>(Callee) && 5114 "Callee should be an llvm::Function object."); 5115 DEBUG( 5116 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5117 const unsigned Width = 80 - strlen("TCO caller: ") 5118 - strlen(", callee linkage: 0, 0"); 5119 dbgs() << "TCO caller: " 5120 << left_justify(DAG.getMachineFunction().getName(), Width) 5121 << ", callee linkage: " 5122 << GV->getVisibility() << ", " << GV->getLinkage() << "\n" 5123 ); 5124 } 5125 } 5126 5127 if (!isTailCall && CS && CS.isMustTailCall()) 5128 report_fatal_error("failed to perform tail call elimination on a call " 5129 "site marked musttail"); 5130 5131 // When long calls (i.e. indirect calls) are always used, calls are always 5132 // made via function pointer. If we have a function name, first translate it 5133 // into a pointer. 5134 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5135 !isTailCall) 5136 Callee = LowerGlobalAddress(Callee, DAG); 5137 5138 if (Subtarget.isSVR4ABI()) { 5139 if (Subtarget.isPPC64()) 5140 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5141 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5142 dl, DAG, InVals, CS); 5143 else 5144 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5145 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5146 dl, DAG, InVals, CS); 5147 } 5148 5149 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5150 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5151 dl, DAG, InVals, CS); 5152 } 5153 5154 SDValue PPCTargetLowering::LowerCall_32SVR4( 5155 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5156 bool isTailCall, bool isPatchPoint, 5157 const SmallVectorImpl<ISD::OutputArg> &Outs, 5158 const SmallVectorImpl<SDValue> &OutVals, 5159 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5160 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5161 ImmutableCallSite CS) const { 5162 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5163 // of the 32-bit SVR4 ABI stack frame layout. 5164 5165 assert((CallConv == CallingConv::C || 5166 CallConv == CallingConv::Cold || 5167 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5168 5169 unsigned PtrByteSize = 4; 5170 5171 MachineFunction &MF = DAG.getMachineFunction(); 5172 5173 // Mark this function as potentially containing a function that contains a 5174 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5175 // and restoring the callers stack pointer in this functions epilog. This is 5176 // done because by tail calling the called function might overwrite the value 5177 // in this function's (MF) stack pointer stack slot 0(SP). 5178 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5179 CallConv == CallingConv::Fast) 5180 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5181 5182 // Count how many bytes are to be pushed on the stack, including the linkage 5183 // area, parameter list area and the part of the local variable space which 5184 // contains copies of aggregates which are passed by value. 5185 5186 // Assign locations to all of the outgoing arguments. 5187 SmallVector<CCValAssign, 16> ArgLocs; 5188 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5189 5190 // Reserve space for the linkage area on the stack. 5191 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5192 PtrByteSize); 5193 if (useSoftFloat()) 5194 CCInfo.PreAnalyzeCallOperands(Outs); 5195 5196 if (isVarArg) { 5197 // Handle fixed and variable vector arguments differently. 5198 // Fixed vector arguments go into registers as long as registers are 5199 // available. Variable vector arguments always go into memory. 5200 unsigned NumArgs = Outs.size(); 5201 5202 for (unsigned i = 0; i != NumArgs; ++i) { 5203 MVT ArgVT = Outs[i].VT; 5204 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5205 bool Result; 5206 5207 if (Outs[i].IsFixed) { 5208 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5209 CCInfo); 5210 } else { 5211 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5212 ArgFlags, CCInfo); 5213 } 5214 5215 if (Result) { 5216 #ifndef NDEBUG 5217 errs() << "Call operand #" << i << " has unhandled type " 5218 << EVT(ArgVT).getEVTString() << "\n"; 5219 #endif 5220 llvm_unreachable(nullptr); 5221 } 5222 } 5223 } else { 5224 // All arguments are treated the same. 5225 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5226 } 5227 CCInfo.clearWasPPCF128(); 5228 5229 // Assign locations to all of the outgoing aggregate by value arguments. 5230 SmallVector<CCValAssign, 16> ByValArgLocs; 5231 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5232 5233 // Reserve stack space for the allocations in CCInfo. 5234 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5235 5236 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5237 5238 // Size of the linkage area, parameter list area and the part of the local 5239 // space variable where copies of aggregates which are passed by value are 5240 // stored. 5241 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5242 5243 // Calculate by how many bytes the stack has to be adjusted in case of tail 5244 // call optimization. 5245 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5246 5247 // Adjust the stack pointer for the new arguments... 5248 // These operations are automatically eliminated by the prolog/epilog pass 5249 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5250 SDValue CallSeqStart = Chain; 5251 5252 // Load the return address and frame pointer so it can be moved somewhere else 5253 // later. 5254 SDValue LROp, FPOp; 5255 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5256 5257 // Set up a copy of the stack pointer for use loading and storing any 5258 // arguments that may not fit in the registers available for argument 5259 // passing. 5260 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5261 5262 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5263 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5264 SmallVector<SDValue, 8> MemOpChains; 5265 5266 bool seenFloatArg = false; 5267 // Walk the register/memloc assignments, inserting copies/loads. 5268 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 5269 i != e; 5270 ++i) { 5271 CCValAssign &VA = ArgLocs[i]; 5272 SDValue Arg = OutVals[i]; 5273 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5274 5275 if (Flags.isByVal()) { 5276 // Argument is an aggregate which is passed by value, thus we need to 5277 // create a copy of it in the local variable space of the current stack 5278 // frame (which is the stack frame of the caller) and pass the address of 5279 // this copy to the callee. 5280 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5281 CCValAssign &ByValVA = ByValArgLocs[j++]; 5282 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5283 5284 // Memory reserved in the local variable space of the callers stack frame. 5285 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5286 5287 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5288 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5289 StackPtr, PtrOff); 5290 5291 // Create a copy of the argument in the local area of the current 5292 // stack frame. 5293 SDValue MemcpyCall = 5294 CreateCopyOfByValArgument(Arg, PtrOff, 5295 CallSeqStart.getNode()->getOperand(0), 5296 Flags, DAG, dl); 5297 5298 // This must go outside the CALLSEQ_START..END. 5299 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5300 SDLoc(MemcpyCall)); 5301 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5302 NewCallSeqStart.getNode()); 5303 Chain = CallSeqStart = NewCallSeqStart; 5304 5305 // Pass the address of the aggregate copy on the stack either in a 5306 // physical register or in the parameter list area of the current stack 5307 // frame to the callee. 5308 Arg = PtrOff; 5309 } 5310 5311 if (VA.isRegLoc()) { 5312 if (Arg.getValueType() == MVT::i1) 5313 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 5314 5315 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5316 // Put argument in a physical register. 5317 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5318 } else { 5319 // Put argument in the parameter list area of the current stack frame. 5320 assert(VA.isMemLoc()); 5321 unsigned LocMemOffset = VA.getLocMemOffset(); 5322 5323 if (!isTailCall) { 5324 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5325 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5326 StackPtr, PtrOff); 5327 5328 MemOpChains.push_back( 5329 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5330 } else { 5331 // Calculate and remember argument location. 5332 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5333 TailCallArguments); 5334 } 5335 } 5336 } 5337 5338 if (!MemOpChains.empty()) 5339 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5340 5341 // Build a sequence of copy-to-reg nodes chained together with token chain 5342 // and flag operands which copy the outgoing args into the appropriate regs. 5343 SDValue InFlag; 5344 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5345 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5346 RegsToPass[i].second, InFlag); 5347 InFlag = Chain.getValue(1); 5348 } 5349 5350 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5351 // registers. 5352 if (isVarArg) { 5353 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5354 SDValue Ops[] = { Chain, InFlag }; 5355 5356 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5357 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5358 5359 InFlag = Chain.getValue(1); 5360 } 5361 5362 if (isTailCall) 5363 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5364 TailCallArguments); 5365 5366 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5367 /* unused except on PPC64 ELFv1 */ false, DAG, 5368 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5369 NumBytes, Ins, InVals, CS); 5370 } 5371 5372 // Copy an argument into memory, being careful to do this outside the 5373 // call sequence for the call to which the argument belongs. 5374 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5375 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5376 SelectionDAG &DAG, const SDLoc &dl) const { 5377 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5378 CallSeqStart.getNode()->getOperand(0), 5379 Flags, DAG, dl); 5380 // The MEMCPY must go outside the CALLSEQ_START..END. 5381 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5382 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5383 SDLoc(MemcpyCall)); 5384 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5385 NewCallSeqStart.getNode()); 5386 return NewCallSeqStart; 5387 } 5388 5389 SDValue PPCTargetLowering::LowerCall_64SVR4( 5390 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5391 bool isTailCall, bool isPatchPoint, 5392 const SmallVectorImpl<ISD::OutputArg> &Outs, 5393 const SmallVectorImpl<SDValue> &OutVals, 5394 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5395 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5396 ImmutableCallSite CS) const { 5397 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5398 bool isLittleEndian = Subtarget.isLittleEndian(); 5399 unsigned NumOps = Outs.size(); 5400 bool hasNest = false; 5401 bool IsSibCall = false; 5402 5403 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5404 unsigned PtrByteSize = 8; 5405 5406 MachineFunction &MF = DAG.getMachineFunction(); 5407 5408 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5409 IsSibCall = true; 5410 5411 // Mark this function as potentially containing a function that contains a 5412 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5413 // and restoring the callers stack pointer in this functions epilog. This is 5414 // done because by tail calling the called function might overwrite the value 5415 // in this function's (MF) stack pointer stack slot 0(SP). 5416 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5417 CallConv == CallingConv::Fast) 5418 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5419 5420 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5421 "fastcc not supported on varargs functions"); 5422 5423 // Count how many bytes are to be pushed on the stack, including the linkage 5424 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5425 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5426 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5427 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5428 unsigned NumBytes = LinkageSize; 5429 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5430 unsigned &QFPR_idx = FPR_idx; 5431 5432 static const MCPhysReg GPR[] = { 5433 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5434 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5435 }; 5436 static const MCPhysReg VR[] = { 5437 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5438 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5439 }; 5440 5441 const unsigned NumGPRs = array_lengthof(GPR); 5442 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5443 const unsigned NumVRs = array_lengthof(VR); 5444 const unsigned NumQFPRs = NumFPRs; 5445 5446 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5447 // can be passed to the callee in registers. 5448 // For the fast calling convention, there is another check below. 5449 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5450 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5451 if (!HasParameterArea) { 5452 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5453 unsigned AvailableFPRs = NumFPRs; 5454 unsigned AvailableVRs = NumVRs; 5455 unsigned NumBytesTmp = NumBytes; 5456 for (unsigned i = 0; i != NumOps; ++i) { 5457 if (Outs[i].Flags.isNest()) continue; 5458 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5459 PtrByteSize, LinkageSize, ParamAreaSize, 5460 NumBytesTmp, AvailableFPRs, AvailableVRs, 5461 Subtarget.hasQPX())) 5462 HasParameterArea = true; 5463 } 5464 } 5465 5466 // When using the fast calling convention, we don't provide backing for 5467 // arguments that will be in registers. 5468 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5469 5470 // Add up all the space actually used. 5471 for (unsigned i = 0; i != NumOps; ++i) { 5472 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5473 EVT ArgVT = Outs[i].VT; 5474 EVT OrigVT = Outs[i].ArgVT; 5475 5476 if (Flags.isNest()) 5477 continue; 5478 5479 if (CallConv == CallingConv::Fast) { 5480 if (Flags.isByVal()) 5481 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5482 else 5483 switch (ArgVT.getSimpleVT().SimpleTy) { 5484 default: llvm_unreachable("Unexpected ValueType for argument!"); 5485 case MVT::i1: 5486 case MVT::i32: 5487 case MVT::i64: 5488 if (++NumGPRsUsed <= NumGPRs) 5489 continue; 5490 break; 5491 case MVT::v4i32: 5492 case MVT::v8i16: 5493 case MVT::v16i8: 5494 case MVT::v2f64: 5495 case MVT::v2i64: 5496 case MVT::v1i128: 5497 if (++NumVRsUsed <= NumVRs) 5498 continue; 5499 break; 5500 case MVT::v4f32: 5501 // When using QPX, this is handled like a FP register, otherwise, it 5502 // is an Altivec register. 5503 if (Subtarget.hasQPX()) { 5504 if (++NumFPRsUsed <= NumFPRs) 5505 continue; 5506 } else { 5507 if (++NumVRsUsed <= NumVRs) 5508 continue; 5509 } 5510 break; 5511 case MVT::f32: 5512 case MVT::f64: 5513 case MVT::v4f64: // QPX 5514 case MVT::v4i1: // QPX 5515 if (++NumFPRsUsed <= NumFPRs) 5516 continue; 5517 break; 5518 } 5519 } 5520 5521 /* Respect alignment of argument on the stack. */ 5522 unsigned Align = 5523 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5524 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5525 5526 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5527 if (Flags.isInConsecutiveRegsLast()) 5528 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5529 } 5530 5531 unsigned NumBytesActuallyUsed = NumBytes; 5532 5533 // In the old ELFv1 ABI, 5534 // the prolog code of the callee may store up to 8 GPR argument registers to 5535 // the stack, allowing va_start to index over them in memory if its varargs. 5536 // Because we cannot tell if this is needed on the caller side, we have to 5537 // conservatively assume that it is needed. As such, make sure we have at 5538 // least enough stack space for the caller to store the 8 GPRs. 5539 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5540 // really requires memory operands, e.g. a vararg function. 5541 if (HasParameterArea) 5542 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5543 else 5544 NumBytes = LinkageSize; 5545 5546 // Tail call needs the stack to be aligned. 5547 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5548 CallConv == CallingConv::Fast) 5549 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5550 5551 int SPDiff = 0; 5552 5553 // Calculate by how many bytes the stack has to be adjusted in case of tail 5554 // call optimization. 5555 if (!IsSibCall) 5556 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5557 5558 // To protect arguments on the stack from being clobbered in a tail call, 5559 // force all the loads to happen before doing any other lowering. 5560 if (isTailCall) 5561 Chain = DAG.getStackArgumentTokenFactor(Chain); 5562 5563 // Adjust the stack pointer for the new arguments... 5564 // These operations are automatically eliminated by the prolog/epilog pass 5565 if (!IsSibCall) 5566 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5567 SDValue CallSeqStart = Chain; 5568 5569 // Load the return address and frame pointer so it can be move somewhere else 5570 // later. 5571 SDValue LROp, FPOp; 5572 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5573 5574 // Set up a copy of the stack pointer for use loading and storing any 5575 // arguments that may not fit in the registers available for argument 5576 // passing. 5577 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5578 5579 // Figure out which arguments are going to go in registers, and which in 5580 // memory. Also, if this is a vararg function, floating point operations 5581 // must be stored to our stack, and loaded into integer regs as well, if 5582 // any integer regs are available for argument passing. 5583 unsigned ArgOffset = LinkageSize; 5584 5585 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5586 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5587 5588 SmallVector<SDValue, 8> MemOpChains; 5589 for (unsigned i = 0; i != NumOps; ++i) { 5590 SDValue Arg = OutVals[i]; 5591 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5592 EVT ArgVT = Outs[i].VT; 5593 EVT OrigVT = Outs[i].ArgVT; 5594 5595 // PtrOff will be used to store the current argument to the stack if a 5596 // register cannot be found for it. 5597 SDValue PtrOff; 5598 5599 // We re-align the argument offset for each argument, except when using the 5600 // fast calling convention, when we need to make sure we do that only when 5601 // we'll actually use a stack slot. 5602 auto ComputePtrOff = [&]() { 5603 /* Respect alignment of argument on the stack. */ 5604 unsigned Align = 5605 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5606 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5607 5608 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5609 5610 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5611 }; 5612 5613 if (CallConv != CallingConv::Fast) { 5614 ComputePtrOff(); 5615 5616 /* Compute GPR index associated with argument offset. */ 5617 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5618 GPR_idx = std::min(GPR_idx, NumGPRs); 5619 } 5620 5621 // Promote integers to 64-bit values. 5622 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5623 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5624 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5625 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5626 } 5627 5628 // FIXME memcpy is used way more than necessary. Correctness first. 5629 // Note: "by value" is code for passing a structure by value, not 5630 // basic types. 5631 if (Flags.isByVal()) { 5632 // Note: Size includes alignment padding, so 5633 // struct x { short a; char b; } 5634 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5635 // These are the proper values we need for right-justifying the 5636 // aggregate in a parameter register. 5637 unsigned Size = Flags.getByValSize(); 5638 5639 // An empty aggregate parameter takes up no storage and no 5640 // registers. 5641 if (Size == 0) 5642 continue; 5643 5644 if (CallConv == CallingConv::Fast) 5645 ComputePtrOff(); 5646 5647 // All aggregates smaller than 8 bytes must be passed right-justified. 5648 if (Size==1 || Size==2 || Size==4) { 5649 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5650 if (GPR_idx != NumGPRs) { 5651 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5652 MachinePointerInfo(), VT); 5653 MemOpChains.push_back(Load.getValue(1)); 5654 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5655 5656 ArgOffset += PtrByteSize; 5657 continue; 5658 } 5659 } 5660 5661 if (GPR_idx == NumGPRs && Size < 8) { 5662 SDValue AddPtr = PtrOff; 5663 if (!isLittleEndian) { 5664 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5665 PtrOff.getValueType()); 5666 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5667 } 5668 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5669 CallSeqStart, 5670 Flags, DAG, dl); 5671 ArgOffset += PtrByteSize; 5672 continue; 5673 } 5674 // Copy entire object into memory. There are cases where gcc-generated 5675 // code assumes it is there, even if it could be put entirely into 5676 // registers. (This is not what the doc says.) 5677 5678 // FIXME: The above statement is likely due to a misunderstanding of the 5679 // documents. All arguments must be copied into the parameter area BY 5680 // THE CALLEE in the event that the callee takes the address of any 5681 // formal argument. That has not yet been implemented. However, it is 5682 // reasonable to use the stack area as a staging area for the register 5683 // load. 5684 5685 // Skip this for small aggregates, as we will use the same slot for a 5686 // right-justified copy, below. 5687 if (Size >= 8) 5688 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5689 CallSeqStart, 5690 Flags, DAG, dl); 5691 5692 // When a register is available, pass a small aggregate right-justified. 5693 if (Size < 8 && GPR_idx != NumGPRs) { 5694 // The easiest way to get this right-justified in a register 5695 // is to copy the structure into the rightmost portion of a 5696 // local variable slot, then load the whole slot into the 5697 // register. 5698 // FIXME: The memcpy seems to produce pretty awful code for 5699 // small aggregates, particularly for packed ones. 5700 // FIXME: It would be preferable to use the slot in the 5701 // parameter save area instead of a new local variable. 5702 SDValue AddPtr = PtrOff; 5703 if (!isLittleEndian) { 5704 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5705 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5706 } 5707 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5708 CallSeqStart, 5709 Flags, DAG, dl); 5710 5711 // Load the slot into the register. 5712 SDValue Load = 5713 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5714 MemOpChains.push_back(Load.getValue(1)); 5715 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5716 5717 // Done with this argument. 5718 ArgOffset += PtrByteSize; 5719 continue; 5720 } 5721 5722 // For aggregates larger than PtrByteSize, copy the pieces of the 5723 // object that fit into registers from the parameter save area. 5724 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5725 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5726 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5727 if (GPR_idx != NumGPRs) { 5728 SDValue Load = 5729 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5730 MemOpChains.push_back(Load.getValue(1)); 5731 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5732 ArgOffset += PtrByteSize; 5733 } else { 5734 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5735 break; 5736 } 5737 } 5738 continue; 5739 } 5740 5741 switch (Arg.getSimpleValueType().SimpleTy) { 5742 default: llvm_unreachable("Unexpected ValueType for argument!"); 5743 case MVT::i1: 5744 case MVT::i32: 5745 case MVT::i64: 5746 if (Flags.isNest()) { 5747 // The 'nest' parameter, if any, is passed in R11. 5748 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5749 hasNest = true; 5750 break; 5751 } 5752 5753 // These can be scalar arguments or elements of an integer array type 5754 // passed directly. Clang may use those instead of "byval" aggregate 5755 // types to avoid forcing arguments to memory unnecessarily. 5756 if (GPR_idx != NumGPRs) { 5757 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5758 } else { 5759 if (CallConv == CallingConv::Fast) 5760 ComputePtrOff(); 5761 5762 assert(HasParameterArea && 5763 "Parameter area must exist to pass an argument in memory."); 5764 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5765 true, isTailCall, false, MemOpChains, 5766 TailCallArguments, dl); 5767 if (CallConv == CallingConv::Fast) 5768 ArgOffset += PtrByteSize; 5769 } 5770 if (CallConv != CallingConv::Fast) 5771 ArgOffset += PtrByteSize; 5772 break; 5773 case MVT::f32: 5774 case MVT::f64: { 5775 // These can be scalar arguments or elements of a float array type 5776 // passed directly. The latter are used to implement ELFv2 homogenous 5777 // float aggregates. 5778 5779 // Named arguments go into FPRs first, and once they overflow, the 5780 // remaining arguments go into GPRs and then the parameter save area. 5781 // Unnamed arguments for vararg functions always go to GPRs and 5782 // then the parameter save area. For now, put all arguments to vararg 5783 // routines always in both locations (FPR *and* GPR or stack slot). 5784 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5785 bool NeededLoad = false; 5786 5787 // First load the argument into the next available FPR. 5788 if (FPR_idx != NumFPRs) 5789 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5790 5791 // Next, load the argument into GPR or stack slot if needed. 5792 if (!NeedGPROrStack) 5793 ; 5794 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5795 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5796 // once we support fp <-> gpr moves. 5797 5798 // In the non-vararg case, this can only ever happen in the 5799 // presence of f32 array types, since otherwise we never run 5800 // out of FPRs before running out of GPRs. 5801 SDValue ArgVal; 5802 5803 // Double values are always passed in a single GPR. 5804 if (Arg.getValueType() != MVT::f32) { 5805 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5806 5807 // Non-array float values are extended and passed in a GPR. 5808 } else if (!Flags.isInConsecutiveRegs()) { 5809 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5810 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5811 5812 // If we have an array of floats, we collect every odd element 5813 // together with its predecessor into one GPR. 5814 } else if (ArgOffset % PtrByteSize != 0) { 5815 SDValue Lo, Hi; 5816 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5817 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5818 if (!isLittleEndian) 5819 std::swap(Lo, Hi); 5820 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5821 5822 // The final element, if even, goes into the first half of a GPR. 5823 } else if (Flags.isInConsecutiveRegsLast()) { 5824 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5825 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5826 if (!isLittleEndian) 5827 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5828 DAG.getConstant(32, dl, MVT::i32)); 5829 5830 // Non-final even elements are skipped; they will be handled 5831 // together the with subsequent argument on the next go-around. 5832 } else 5833 ArgVal = SDValue(); 5834 5835 if (ArgVal.getNode()) 5836 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5837 } else { 5838 if (CallConv == CallingConv::Fast) 5839 ComputePtrOff(); 5840 5841 // Single-precision floating-point values are mapped to the 5842 // second (rightmost) word of the stack doubleword. 5843 if (Arg.getValueType() == MVT::f32 && 5844 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5845 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5846 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5847 } 5848 5849 assert(HasParameterArea && 5850 "Parameter area must exist to pass an argument in memory."); 5851 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5852 true, isTailCall, false, MemOpChains, 5853 TailCallArguments, dl); 5854 5855 NeededLoad = true; 5856 } 5857 // When passing an array of floats, the array occupies consecutive 5858 // space in the argument area; only round up to the next doubleword 5859 // at the end of the array. Otherwise, each float takes 8 bytes. 5860 if (CallConv != CallingConv::Fast || NeededLoad) { 5861 ArgOffset += (Arg.getValueType() == MVT::f32 && 5862 Flags.isInConsecutiveRegs()) ? 4 : 8; 5863 if (Flags.isInConsecutiveRegsLast()) 5864 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5865 } 5866 break; 5867 } 5868 case MVT::v4f32: 5869 case MVT::v4i32: 5870 case MVT::v8i16: 5871 case MVT::v16i8: 5872 case MVT::v2f64: 5873 case MVT::v2i64: 5874 case MVT::v1i128: 5875 if (!Subtarget.hasQPX()) { 5876 // These can be scalar arguments or elements of a vector array type 5877 // passed directly. The latter are used to implement ELFv2 homogenous 5878 // vector aggregates. 5879 5880 // For a varargs call, named arguments go into VRs or on the stack as 5881 // usual; unnamed arguments always go to the stack or the corresponding 5882 // GPRs when within range. For now, we always put the value in both 5883 // locations (or even all three). 5884 if (isVarArg) { 5885 assert(HasParameterArea && 5886 "Parameter area must exist if we have a varargs call."); 5887 // We could elide this store in the case where the object fits 5888 // entirely in R registers. Maybe later. 5889 SDValue Store = 5890 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5891 MemOpChains.push_back(Store); 5892 if (VR_idx != NumVRs) { 5893 SDValue Load = 5894 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 5895 MemOpChains.push_back(Load.getValue(1)); 5896 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5897 } 5898 ArgOffset += 16; 5899 for (unsigned i=0; i<16; i+=PtrByteSize) { 5900 if (GPR_idx == NumGPRs) 5901 break; 5902 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5903 DAG.getConstant(i, dl, PtrVT)); 5904 SDValue Load = 5905 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5906 MemOpChains.push_back(Load.getValue(1)); 5907 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5908 } 5909 break; 5910 } 5911 5912 // Non-varargs Altivec params go into VRs or on the stack. 5913 if (VR_idx != NumVRs) { 5914 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5915 } else { 5916 if (CallConv == CallingConv::Fast) 5917 ComputePtrOff(); 5918 5919 assert(HasParameterArea && 5920 "Parameter area must exist to pass an argument in memory."); 5921 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5922 true, isTailCall, true, MemOpChains, 5923 TailCallArguments, dl); 5924 if (CallConv == CallingConv::Fast) 5925 ArgOffset += 16; 5926 } 5927 5928 if (CallConv != CallingConv::Fast) 5929 ArgOffset += 16; 5930 break; 5931 } // not QPX 5932 5933 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5934 "Invalid QPX parameter type"); 5935 5936 /* fall through */ 5937 case MVT::v4f64: 5938 case MVT::v4i1: { 5939 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5940 if (isVarArg) { 5941 assert(HasParameterArea && 5942 "Parameter area must exist if we have a varargs call."); 5943 // We could elide this store in the case where the object fits 5944 // entirely in R registers. Maybe later. 5945 SDValue Store = 5946 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5947 MemOpChains.push_back(Store); 5948 if (QFPR_idx != NumQFPRs) { 5949 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 5950 PtrOff, MachinePointerInfo()); 5951 MemOpChains.push_back(Load.getValue(1)); 5952 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5953 } 5954 ArgOffset += (IsF32 ? 16 : 32); 5955 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5956 if (GPR_idx == NumGPRs) 5957 break; 5958 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5959 DAG.getConstant(i, dl, PtrVT)); 5960 SDValue Load = 5961 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5962 MemOpChains.push_back(Load.getValue(1)); 5963 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5964 } 5965 break; 5966 } 5967 5968 // Non-varargs QPX params go into registers or on the stack. 5969 if (QFPR_idx != NumQFPRs) { 5970 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5971 } else { 5972 if (CallConv == CallingConv::Fast) 5973 ComputePtrOff(); 5974 5975 assert(HasParameterArea && 5976 "Parameter area must exist to pass an argument in memory."); 5977 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5978 true, isTailCall, true, MemOpChains, 5979 TailCallArguments, dl); 5980 if (CallConv == CallingConv::Fast) 5981 ArgOffset += (IsF32 ? 16 : 32); 5982 } 5983 5984 if (CallConv != CallingConv::Fast) 5985 ArgOffset += (IsF32 ? 16 : 32); 5986 break; 5987 } 5988 } 5989 } 5990 5991 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 5992 "mismatch in size of parameter area"); 5993 (void)NumBytesActuallyUsed; 5994 5995 if (!MemOpChains.empty()) 5996 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5997 5998 // Check if this is an indirect call (MTCTR/BCTRL). 5999 // See PrepareCall() for more information about calls through function 6000 // pointers in the 64-bit SVR4 ABI. 6001 if (!isTailCall && !isPatchPoint && 6002 !isFunctionGlobalAddress(Callee) && 6003 !isa<ExternalSymbolSDNode>(Callee)) { 6004 // Load r2 into a virtual register and store it to the TOC save area. 6005 setUsesTOCBasePtr(DAG); 6006 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6007 // TOC save area offset. 6008 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6009 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6010 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6011 Chain = DAG.getStore( 6012 Val.getValue(1), dl, Val, AddPtr, 6013 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 6014 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6015 // This does not mean the MTCTR instruction must use R12; it's easier 6016 // to model this as an extra parameter, so do that. 6017 if (isELFv2ABI && !isPatchPoint) 6018 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6019 } 6020 6021 // Build a sequence of copy-to-reg nodes chained together with token chain 6022 // and flag operands which copy the outgoing args into the appropriate regs. 6023 SDValue InFlag; 6024 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6025 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6026 RegsToPass[i].second, InFlag); 6027 InFlag = Chain.getValue(1); 6028 } 6029 6030 if (isTailCall && !IsSibCall) 6031 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6032 TailCallArguments); 6033 6034 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 6035 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 6036 SPDiff, NumBytes, Ins, InVals, CS); 6037 } 6038 6039 SDValue PPCTargetLowering::LowerCall_Darwin( 6040 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6041 bool isTailCall, bool isPatchPoint, 6042 const SmallVectorImpl<ISD::OutputArg> &Outs, 6043 const SmallVectorImpl<SDValue> &OutVals, 6044 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6045 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6046 ImmutableCallSite CS) const { 6047 unsigned NumOps = Outs.size(); 6048 6049 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6050 bool isPPC64 = PtrVT == MVT::i64; 6051 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6052 6053 MachineFunction &MF = DAG.getMachineFunction(); 6054 6055 // Mark this function as potentially containing a function that contains a 6056 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6057 // and restoring the callers stack pointer in this functions epilog. This is 6058 // done because by tail calling the called function might overwrite the value 6059 // in this function's (MF) stack pointer stack slot 0(SP). 6060 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6061 CallConv == CallingConv::Fast) 6062 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6063 6064 // Count how many bytes are to be pushed on the stack, including the linkage 6065 // area, and parameter passing area. We start with 24/48 bytes, which is 6066 // prereserved space for [SP][CR][LR][3 x unused]. 6067 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6068 unsigned NumBytes = LinkageSize; 6069 6070 // Add up all the space actually used. 6071 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6072 // they all go in registers, but we must reserve stack space for them for 6073 // possible use by the caller. In varargs or 64-bit calls, parameters are 6074 // assigned stack space in order, with padding so Altivec parameters are 6075 // 16-byte aligned. 6076 unsigned nAltivecParamsAtEnd = 0; 6077 for (unsigned i = 0; i != NumOps; ++i) { 6078 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6079 EVT ArgVT = Outs[i].VT; 6080 // Varargs Altivec parameters are padded to a 16 byte boundary. 6081 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6082 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6083 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6084 if (!isVarArg && !isPPC64) { 6085 // Non-varargs Altivec parameters go after all the non-Altivec 6086 // parameters; handle those later so we know how much padding we need. 6087 nAltivecParamsAtEnd++; 6088 continue; 6089 } 6090 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6091 NumBytes = ((NumBytes+15)/16)*16; 6092 } 6093 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6094 } 6095 6096 // Allow for Altivec parameters at the end, if needed. 6097 if (nAltivecParamsAtEnd) { 6098 NumBytes = ((NumBytes+15)/16)*16; 6099 NumBytes += 16*nAltivecParamsAtEnd; 6100 } 6101 6102 // The prolog code of the callee may store up to 8 GPR argument registers to 6103 // the stack, allowing va_start to index over them in memory if its varargs. 6104 // Because we cannot tell if this is needed on the caller side, we have to 6105 // conservatively assume that it is needed. As such, make sure we have at 6106 // least enough stack space for the caller to store the 8 GPRs. 6107 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6108 6109 // Tail call needs the stack to be aligned. 6110 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6111 CallConv == CallingConv::Fast) 6112 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6113 6114 // Calculate by how many bytes the stack has to be adjusted in case of tail 6115 // call optimization. 6116 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6117 6118 // To protect arguments on the stack from being clobbered in a tail call, 6119 // force all the loads to happen before doing any other lowering. 6120 if (isTailCall) 6121 Chain = DAG.getStackArgumentTokenFactor(Chain); 6122 6123 // Adjust the stack pointer for the new arguments... 6124 // These operations are automatically eliminated by the prolog/epilog pass 6125 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6126 SDValue CallSeqStart = Chain; 6127 6128 // Load the return address and frame pointer so it can be move somewhere else 6129 // later. 6130 SDValue LROp, FPOp; 6131 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6132 6133 // Set up a copy of the stack pointer for use loading and storing any 6134 // arguments that may not fit in the registers available for argument 6135 // passing. 6136 SDValue StackPtr; 6137 if (isPPC64) 6138 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6139 else 6140 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6141 6142 // Figure out which arguments are going to go in registers, and which in 6143 // memory. Also, if this is a vararg function, floating point operations 6144 // must be stored to our stack, and loaded into integer regs as well, if 6145 // any integer regs are available for argument passing. 6146 unsigned ArgOffset = LinkageSize; 6147 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6148 6149 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6150 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6151 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6152 }; 6153 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6154 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6155 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6156 }; 6157 static const MCPhysReg VR[] = { 6158 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6159 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6160 }; 6161 const unsigned NumGPRs = array_lengthof(GPR_32); 6162 const unsigned NumFPRs = 13; 6163 const unsigned NumVRs = array_lengthof(VR); 6164 6165 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6166 6167 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6168 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6169 6170 SmallVector<SDValue, 8> MemOpChains; 6171 for (unsigned i = 0; i != NumOps; ++i) { 6172 SDValue Arg = OutVals[i]; 6173 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6174 6175 // PtrOff will be used to store the current argument to the stack if a 6176 // register cannot be found for it. 6177 SDValue PtrOff; 6178 6179 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6180 6181 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6182 6183 // On PPC64, promote integers to 64-bit values. 6184 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6185 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6186 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6187 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6188 } 6189 6190 // FIXME memcpy is used way more than necessary. Correctness first. 6191 // Note: "by value" is code for passing a structure by value, not 6192 // basic types. 6193 if (Flags.isByVal()) { 6194 unsigned Size = Flags.getByValSize(); 6195 // Very small objects are passed right-justified. Everything else is 6196 // passed left-justified. 6197 if (Size==1 || Size==2) { 6198 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6199 if (GPR_idx != NumGPRs) { 6200 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6201 MachinePointerInfo(), VT); 6202 MemOpChains.push_back(Load.getValue(1)); 6203 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6204 6205 ArgOffset += PtrByteSize; 6206 } else { 6207 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6208 PtrOff.getValueType()); 6209 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6210 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6211 CallSeqStart, 6212 Flags, DAG, dl); 6213 ArgOffset += PtrByteSize; 6214 } 6215 continue; 6216 } 6217 // Copy entire object into memory. There are cases where gcc-generated 6218 // code assumes it is there, even if it could be put entirely into 6219 // registers. (This is not what the doc says.) 6220 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6221 CallSeqStart, 6222 Flags, DAG, dl); 6223 6224 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6225 // copy the pieces of the object that fit into registers from the 6226 // parameter save area. 6227 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6228 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6229 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6230 if (GPR_idx != NumGPRs) { 6231 SDValue Load = 6232 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6233 MemOpChains.push_back(Load.getValue(1)); 6234 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6235 ArgOffset += PtrByteSize; 6236 } else { 6237 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6238 break; 6239 } 6240 } 6241 continue; 6242 } 6243 6244 switch (Arg.getSimpleValueType().SimpleTy) { 6245 default: llvm_unreachable("Unexpected ValueType for argument!"); 6246 case MVT::i1: 6247 case MVT::i32: 6248 case MVT::i64: 6249 if (GPR_idx != NumGPRs) { 6250 if (Arg.getValueType() == MVT::i1) 6251 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6252 6253 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6254 } else { 6255 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6256 isPPC64, isTailCall, false, MemOpChains, 6257 TailCallArguments, dl); 6258 } 6259 ArgOffset += PtrByteSize; 6260 break; 6261 case MVT::f32: 6262 case MVT::f64: 6263 if (FPR_idx != NumFPRs) { 6264 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6265 6266 if (isVarArg) { 6267 SDValue Store = 6268 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6269 MemOpChains.push_back(Store); 6270 6271 // Float varargs are always shadowed in available integer registers 6272 if (GPR_idx != NumGPRs) { 6273 SDValue Load = 6274 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6275 MemOpChains.push_back(Load.getValue(1)); 6276 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6277 } 6278 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6279 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6280 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6281 SDValue Load = 6282 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6283 MemOpChains.push_back(Load.getValue(1)); 6284 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6285 } 6286 } else { 6287 // If we have any FPRs remaining, we may also have GPRs remaining. 6288 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6289 // GPRs. 6290 if (GPR_idx != NumGPRs) 6291 ++GPR_idx; 6292 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6293 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6294 ++GPR_idx; 6295 } 6296 } else 6297 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6298 isPPC64, isTailCall, false, MemOpChains, 6299 TailCallArguments, dl); 6300 if (isPPC64) 6301 ArgOffset += 8; 6302 else 6303 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6304 break; 6305 case MVT::v4f32: 6306 case MVT::v4i32: 6307 case MVT::v8i16: 6308 case MVT::v16i8: 6309 if (isVarArg) { 6310 // These go aligned on the stack, or in the corresponding R registers 6311 // when within range. The Darwin PPC ABI doc claims they also go in 6312 // V registers; in fact gcc does this only for arguments that are 6313 // prototyped, not for those that match the ... We do it for all 6314 // arguments, seems to work. 6315 while (ArgOffset % 16 !=0) { 6316 ArgOffset += PtrByteSize; 6317 if (GPR_idx != NumGPRs) 6318 GPR_idx++; 6319 } 6320 // We could elide this store in the case where the object fits 6321 // entirely in R registers. Maybe later. 6322 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6323 DAG.getConstant(ArgOffset, dl, PtrVT)); 6324 SDValue Store = 6325 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6326 MemOpChains.push_back(Store); 6327 if (VR_idx != NumVRs) { 6328 SDValue Load = 6329 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6330 MemOpChains.push_back(Load.getValue(1)); 6331 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6332 } 6333 ArgOffset += 16; 6334 for (unsigned i=0; i<16; i+=PtrByteSize) { 6335 if (GPR_idx == NumGPRs) 6336 break; 6337 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6338 DAG.getConstant(i, dl, PtrVT)); 6339 SDValue Load = 6340 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6341 MemOpChains.push_back(Load.getValue(1)); 6342 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6343 } 6344 break; 6345 } 6346 6347 // Non-varargs Altivec params generally go in registers, but have 6348 // stack space allocated at the end. 6349 if (VR_idx != NumVRs) { 6350 // Doesn't have GPR space allocated. 6351 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6352 } else if (nAltivecParamsAtEnd==0) { 6353 // We are emitting Altivec params in order. 6354 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6355 isPPC64, isTailCall, true, MemOpChains, 6356 TailCallArguments, dl); 6357 ArgOffset += 16; 6358 } 6359 break; 6360 } 6361 } 6362 // If all Altivec parameters fit in registers, as they usually do, 6363 // they get stack space following the non-Altivec parameters. We 6364 // don't track this here because nobody below needs it. 6365 // If there are more Altivec parameters than fit in registers emit 6366 // the stores here. 6367 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6368 unsigned j = 0; 6369 // Offset is aligned; skip 1st 12 params which go in V registers. 6370 ArgOffset = ((ArgOffset+15)/16)*16; 6371 ArgOffset += 12*16; 6372 for (unsigned i = 0; i != NumOps; ++i) { 6373 SDValue Arg = OutVals[i]; 6374 EVT ArgType = Outs[i].VT; 6375 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6376 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6377 if (++j > NumVRs) { 6378 SDValue PtrOff; 6379 // We are emitting Altivec params in order. 6380 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6381 isPPC64, isTailCall, true, MemOpChains, 6382 TailCallArguments, dl); 6383 ArgOffset += 16; 6384 } 6385 } 6386 } 6387 } 6388 6389 if (!MemOpChains.empty()) 6390 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6391 6392 // On Darwin, R12 must contain the address of an indirect callee. This does 6393 // not mean the MTCTR instruction must use R12; it's easier to model this as 6394 // an extra parameter, so do that. 6395 if (!isTailCall && 6396 !isFunctionGlobalAddress(Callee) && 6397 !isa<ExternalSymbolSDNode>(Callee) && 6398 !isBLACompatibleAddress(Callee, DAG)) 6399 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6400 PPC::R12), Callee)); 6401 6402 // Build a sequence of copy-to-reg nodes chained together with token chain 6403 // and flag operands which copy the outgoing args into the appropriate regs. 6404 SDValue InFlag; 6405 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6406 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6407 RegsToPass[i].second, InFlag); 6408 InFlag = Chain.getValue(1); 6409 } 6410 6411 if (isTailCall) 6412 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6413 TailCallArguments); 6414 6415 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6416 /* unused except on PPC64 ELFv1 */ false, DAG, 6417 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6418 NumBytes, Ins, InVals, CS); 6419 } 6420 6421 bool 6422 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6423 MachineFunction &MF, bool isVarArg, 6424 const SmallVectorImpl<ISD::OutputArg> &Outs, 6425 LLVMContext &Context) const { 6426 SmallVector<CCValAssign, 16> RVLocs; 6427 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6428 return CCInfo.CheckReturn( 6429 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6430 ? RetCC_PPC_Cold 6431 : RetCC_PPC); 6432 } 6433 6434 SDValue 6435 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6436 bool isVarArg, 6437 const SmallVectorImpl<ISD::OutputArg> &Outs, 6438 const SmallVectorImpl<SDValue> &OutVals, 6439 const SDLoc &dl, SelectionDAG &DAG) const { 6440 SmallVector<CCValAssign, 16> RVLocs; 6441 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6442 *DAG.getContext()); 6443 CCInfo.AnalyzeReturn(Outs, 6444 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6445 ? RetCC_PPC_Cold 6446 : RetCC_PPC); 6447 6448 SDValue Flag; 6449 SmallVector<SDValue, 4> RetOps(1, Chain); 6450 6451 // Copy the result values into the output registers. 6452 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6453 CCValAssign &VA = RVLocs[i]; 6454 assert(VA.isRegLoc() && "Can only return in registers!"); 6455 6456 SDValue Arg = OutVals[i]; 6457 6458 switch (VA.getLocInfo()) { 6459 default: llvm_unreachable("Unknown loc info!"); 6460 case CCValAssign::Full: break; 6461 case CCValAssign::AExt: 6462 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6463 break; 6464 case CCValAssign::ZExt: 6465 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6466 break; 6467 case CCValAssign::SExt: 6468 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6469 break; 6470 } 6471 6472 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6473 Flag = Chain.getValue(1); 6474 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6475 } 6476 6477 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6478 const MCPhysReg *I = 6479 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6480 if (I) { 6481 for (; *I; ++I) { 6482 6483 if (PPC::G8RCRegClass.contains(*I)) 6484 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6485 else if (PPC::F8RCRegClass.contains(*I)) 6486 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6487 else if (PPC::CRRCRegClass.contains(*I)) 6488 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6489 else if (PPC::VRRCRegClass.contains(*I)) 6490 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6491 else 6492 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6493 } 6494 } 6495 6496 RetOps[0] = Chain; // Update chain. 6497 6498 // Add the flag if we have it. 6499 if (Flag.getNode()) 6500 RetOps.push_back(Flag); 6501 6502 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6503 } 6504 6505 SDValue 6506 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6507 SelectionDAG &DAG) const { 6508 SDLoc dl(Op); 6509 6510 // Get the correct type for integers. 6511 EVT IntVT = Op.getValueType(); 6512 6513 // Get the inputs. 6514 SDValue Chain = Op.getOperand(0); 6515 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6516 // Build a DYNAREAOFFSET node. 6517 SDValue Ops[2] = {Chain, FPSIdx}; 6518 SDVTList VTs = DAG.getVTList(IntVT); 6519 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6520 } 6521 6522 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6523 SelectionDAG &DAG) const { 6524 // When we pop the dynamic allocation we need to restore the SP link. 6525 SDLoc dl(Op); 6526 6527 // Get the correct type for pointers. 6528 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6529 6530 // Construct the stack pointer operand. 6531 bool isPPC64 = Subtarget.isPPC64(); 6532 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6533 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6534 6535 // Get the operands for the STACKRESTORE. 6536 SDValue Chain = Op.getOperand(0); 6537 SDValue SaveSP = Op.getOperand(1); 6538 6539 // Load the old link SP. 6540 SDValue LoadLinkSP = 6541 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6542 6543 // Restore the stack pointer. 6544 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6545 6546 // Store the old link SP. 6547 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6548 } 6549 6550 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6551 MachineFunction &MF = DAG.getMachineFunction(); 6552 bool isPPC64 = Subtarget.isPPC64(); 6553 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6554 6555 // Get current frame pointer save index. The users of this index will be 6556 // primarily DYNALLOC instructions. 6557 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6558 int RASI = FI->getReturnAddrSaveIndex(); 6559 6560 // If the frame pointer save index hasn't been defined yet. 6561 if (!RASI) { 6562 // Find out what the fix offset of the frame pointer save area. 6563 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6564 // Allocate the frame index for frame pointer save area. 6565 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6566 // Save the result. 6567 FI->setReturnAddrSaveIndex(RASI); 6568 } 6569 return DAG.getFrameIndex(RASI, PtrVT); 6570 } 6571 6572 SDValue 6573 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6574 MachineFunction &MF = DAG.getMachineFunction(); 6575 bool isPPC64 = Subtarget.isPPC64(); 6576 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6577 6578 // Get current frame pointer save index. The users of this index will be 6579 // primarily DYNALLOC instructions. 6580 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6581 int FPSI = FI->getFramePointerSaveIndex(); 6582 6583 // If the frame pointer save index hasn't been defined yet. 6584 if (!FPSI) { 6585 // Find out what the fix offset of the frame pointer save area. 6586 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6587 // Allocate the frame index for frame pointer save area. 6588 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6589 // Save the result. 6590 FI->setFramePointerSaveIndex(FPSI); 6591 } 6592 return DAG.getFrameIndex(FPSI, PtrVT); 6593 } 6594 6595 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6596 SelectionDAG &DAG) const { 6597 // Get the inputs. 6598 SDValue Chain = Op.getOperand(0); 6599 SDValue Size = Op.getOperand(1); 6600 SDLoc dl(Op); 6601 6602 // Get the correct type for pointers. 6603 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6604 // Negate the size. 6605 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6606 DAG.getConstant(0, dl, PtrVT), Size); 6607 // Construct a node for the frame pointer save index. 6608 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6609 // Build a DYNALLOC node. 6610 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6611 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6612 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6613 } 6614 6615 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 6616 SelectionDAG &DAG) const { 6617 MachineFunction &MF = DAG.getMachineFunction(); 6618 6619 bool isPPC64 = Subtarget.isPPC64(); 6620 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6621 6622 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 6623 return DAG.getFrameIndex(FI, PtrVT); 6624 } 6625 6626 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6627 SelectionDAG &DAG) const { 6628 SDLoc DL(Op); 6629 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6630 DAG.getVTList(MVT::i32, MVT::Other), 6631 Op.getOperand(0), Op.getOperand(1)); 6632 } 6633 6634 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6635 SelectionDAG &DAG) const { 6636 SDLoc DL(Op); 6637 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6638 Op.getOperand(0), Op.getOperand(1)); 6639 } 6640 6641 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6642 if (Op.getValueType().isVector()) 6643 return LowerVectorLoad(Op, DAG); 6644 6645 assert(Op.getValueType() == MVT::i1 && 6646 "Custom lowering only for i1 loads"); 6647 6648 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6649 6650 SDLoc dl(Op); 6651 LoadSDNode *LD = cast<LoadSDNode>(Op); 6652 6653 SDValue Chain = LD->getChain(); 6654 SDValue BasePtr = LD->getBasePtr(); 6655 MachineMemOperand *MMO = LD->getMemOperand(); 6656 6657 SDValue NewLD = 6658 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6659 BasePtr, MVT::i8, MMO); 6660 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6661 6662 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6663 return DAG.getMergeValues(Ops, dl); 6664 } 6665 6666 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6667 if (Op.getOperand(1).getValueType().isVector()) 6668 return LowerVectorStore(Op, DAG); 6669 6670 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6671 "Custom lowering only for i1 stores"); 6672 6673 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6674 6675 SDLoc dl(Op); 6676 StoreSDNode *ST = cast<StoreSDNode>(Op); 6677 6678 SDValue Chain = ST->getChain(); 6679 SDValue BasePtr = ST->getBasePtr(); 6680 SDValue Value = ST->getValue(); 6681 MachineMemOperand *MMO = ST->getMemOperand(); 6682 6683 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6684 Value); 6685 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6686 } 6687 6688 // FIXME: Remove this once the ANDI glue bug is fixed: 6689 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6690 assert(Op.getValueType() == MVT::i1 && 6691 "Custom lowering only for i1 results"); 6692 6693 SDLoc DL(Op); 6694 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6695 Op.getOperand(0)); 6696 } 6697 6698 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6699 /// possible. 6700 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6701 // Not FP? Not a fsel. 6702 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6703 !Op.getOperand(2).getValueType().isFloatingPoint()) 6704 return Op; 6705 6706 // We might be able to do better than this under some circumstances, but in 6707 // general, fsel-based lowering of select is a finite-math-only optimization. 6708 // For more information, see section F.3 of the 2.06 ISA specification. 6709 if (!DAG.getTarget().Options.NoInfsFPMath || 6710 !DAG.getTarget().Options.NoNaNsFPMath) 6711 return Op; 6712 // TODO: Propagate flags from the select rather than global settings. 6713 SDNodeFlags Flags; 6714 Flags.setNoInfs(true); 6715 Flags.setNoNaNs(true); 6716 6717 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6718 6719 EVT ResVT = Op.getValueType(); 6720 EVT CmpVT = Op.getOperand(0).getValueType(); 6721 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6722 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6723 SDLoc dl(Op); 6724 6725 // If the RHS of the comparison is a 0.0, we don't need to do the 6726 // subtraction at all. 6727 SDValue Sel1; 6728 if (isFloatingPointZero(RHS)) 6729 switch (CC) { 6730 default: break; // SETUO etc aren't handled by fsel. 6731 case ISD::SETNE: 6732 std::swap(TV, FV); 6733 LLVM_FALLTHROUGH; 6734 case ISD::SETEQ: 6735 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6736 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6737 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6738 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6739 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6740 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6741 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6742 case ISD::SETULT: 6743 case ISD::SETLT: 6744 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6745 LLVM_FALLTHROUGH; 6746 case ISD::SETOGE: 6747 case ISD::SETGE: 6748 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6749 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6750 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6751 case ISD::SETUGT: 6752 case ISD::SETGT: 6753 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6754 LLVM_FALLTHROUGH; 6755 case ISD::SETOLE: 6756 case ISD::SETLE: 6757 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6758 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6759 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6760 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6761 } 6762 6763 SDValue Cmp; 6764 switch (CC) { 6765 default: break; // SETUO etc aren't handled by fsel. 6766 case ISD::SETNE: 6767 std::swap(TV, FV); 6768 LLVM_FALLTHROUGH; 6769 case ISD::SETEQ: 6770 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6771 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6772 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6773 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6774 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6775 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6776 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6777 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6778 case ISD::SETULT: 6779 case ISD::SETLT: 6780 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6781 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6782 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6783 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6784 case ISD::SETOGE: 6785 case ISD::SETGE: 6786 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6787 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6788 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6789 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6790 case ISD::SETUGT: 6791 case ISD::SETGT: 6792 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6793 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6794 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6795 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6796 case ISD::SETOLE: 6797 case ISD::SETLE: 6798 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6799 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6800 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6801 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6802 } 6803 return Op; 6804 } 6805 6806 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6807 SelectionDAG &DAG, 6808 const SDLoc &dl) const { 6809 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6810 SDValue Src = Op.getOperand(0); 6811 if (Src.getValueType() == MVT::f32) 6812 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6813 6814 SDValue Tmp; 6815 switch (Op.getSimpleValueType().SimpleTy) { 6816 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6817 case MVT::i32: 6818 Tmp = DAG.getNode( 6819 Op.getOpcode() == ISD::FP_TO_SINT 6820 ? PPCISD::FCTIWZ 6821 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6822 dl, MVT::f64, Src); 6823 break; 6824 case MVT::i64: 6825 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6826 "i64 FP_TO_UINT is supported only with FPCVT"); 6827 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6828 PPCISD::FCTIDUZ, 6829 dl, MVT::f64, Src); 6830 break; 6831 } 6832 6833 // Convert the FP value to an int value through memory. 6834 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6835 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6836 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6837 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6838 MachinePointerInfo MPI = 6839 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6840 6841 // Emit a store to the stack slot. 6842 SDValue Chain; 6843 if (i32Stack) { 6844 MachineFunction &MF = DAG.getMachineFunction(); 6845 MachineMemOperand *MMO = 6846 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6847 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6848 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6849 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6850 } else 6851 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 6852 6853 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6854 // add in a bias on big endian. 6855 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6856 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6857 DAG.getConstant(4, dl, FIPtr.getValueType())); 6858 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6859 } 6860 6861 RLI.Chain = Chain; 6862 RLI.Ptr = FIPtr; 6863 RLI.MPI = MPI; 6864 } 6865 6866 /// \brief Custom lowers floating point to integer conversions to use 6867 /// the direct move instructions available in ISA 2.07 to avoid the 6868 /// need for load/store combinations. 6869 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6870 SelectionDAG &DAG, 6871 const SDLoc &dl) const { 6872 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6873 SDValue Src = Op.getOperand(0); 6874 6875 if (Src.getValueType() == MVT::f32) 6876 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6877 6878 SDValue Tmp; 6879 switch (Op.getSimpleValueType().SimpleTy) { 6880 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6881 case MVT::i32: 6882 Tmp = DAG.getNode( 6883 Op.getOpcode() == ISD::FP_TO_SINT 6884 ? PPCISD::FCTIWZ 6885 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6886 dl, MVT::f64, Src); 6887 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6888 break; 6889 case MVT::i64: 6890 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6891 "i64 FP_TO_UINT is supported only with FPCVT"); 6892 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6893 PPCISD::FCTIDUZ, 6894 dl, MVT::f64, Src); 6895 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6896 break; 6897 } 6898 return Tmp; 6899 } 6900 6901 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6902 const SDLoc &dl) const { 6903 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6904 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6905 6906 ReuseLoadInfo RLI; 6907 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6908 6909 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 6910 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 6911 } 6912 6913 // We're trying to insert a regular store, S, and then a load, L. If the 6914 // incoming value, O, is a load, we might just be able to have our load use the 6915 // address used by O. However, we don't know if anything else will store to 6916 // that address before we can load from it. To prevent this situation, we need 6917 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6918 // the same chain operand as O, we create a token factor from the chain results 6919 // of O and L, and we replace all uses of O's chain result with that token 6920 // factor (see spliceIntoChain below for this last part). 6921 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6922 ReuseLoadInfo &RLI, 6923 SelectionDAG &DAG, 6924 ISD::LoadExtType ET) const { 6925 SDLoc dl(Op); 6926 if (ET == ISD::NON_EXTLOAD && 6927 (Op.getOpcode() == ISD::FP_TO_UINT || 6928 Op.getOpcode() == ISD::FP_TO_SINT) && 6929 isOperationLegalOrCustom(Op.getOpcode(), 6930 Op.getOperand(0).getValueType())) { 6931 6932 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6933 return true; 6934 } 6935 6936 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6937 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6938 LD->isNonTemporal()) 6939 return false; 6940 if (LD->getMemoryVT() != MemVT) 6941 return false; 6942 6943 RLI.Ptr = LD->getBasePtr(); 6944 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 6945 assert(LD->getAddressingMode() == ISD::PRE_INC && 6946 "Non-pre-inc AM on PPC?"); 6947 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6948 LD->getOffset()); 6949 } 6950 6951 RLI.Chain = LD->getChain(); 6952 RLI.MPI = LD->getPointerInfo(); 6953 RLI.IsDereferenceable = LD->isDereferenceable(); 6954 RLI.IsInvariant = LD->isInvariant(); 6955 RLI.Alignment = LD->getAlignment(); 6956 RLI.AAInfo = LD->getAAInfo(); 6957 RLI.Ranges = LD->getRanges(); 6958 6959 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6960 return true; 6961 } 6962 6963 // Given the head of the old chain, ResChain, insert a token factor containing 6964 // it and NewResChain, and make users of ResChain now be users of that token 6965 // factor. 6966 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 6967 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6968 SDValue NewResChain, 6969 SelectionDAG &DAG) const { 6970 if (!ResChain) 6971 return; 6972 6973 SDLoc dl(NewResChain); 6974 6975 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6976 NewResChain, DAG.getUNDEF(MVT::Other)); 6977 assert(TF.getNode() != NewResChain.getNode() && 6978 "A new TF really is required here"); 6979 6980 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6981 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6982 } 6983 6984 /// \brief Analyze profitability of direct move 6985 /// prefer float load to int load plus direct move 6986 /// when there is no integer use of int load 6987 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 6988 SDNode *Origin = Op.getOperand(0).getNode(); 6989 if (Origin->getOpcode() != ISD::LOAD) 6990 return true; 6991 6992 // If there is no LXSIBZX/LXSIHZX, like Power8, 6993 // prefer direct move if the memory size is 1 or 2 bytes. 6994 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 6995 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 6996 return true; 6997 6998 for (SDNode::use_iterator UI = Origin->use_begin(), 6999 UE = Origin->use_end(); 7000 UI != UE; ++UI) { 7001 7002 // Only look at the users of the loaded value. 7003 if (UI.getUse().get().getResNo() != 0) 7004 continue; 7005 7006 if (UI->getOpcode() != ISD::SINT_TO_FP && 7007 UI->getOpcode() != ISD::UINT_TO_FP) 7008 return true; 7009 } 7010 7011 return false; 7012 } 7013 7014 /// \brief Custom lowers integer to floating point conversions to use 7015 /// the direct move instructions available in ISA 2.07 to avoid the 7016 /// need for load/store combinations. 7017 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7018 SelectionDAG &DAG, 7019 const SDLoc &dl) const { 7020 assert((Op.getValueType() == MVT::f32 || 7021 Op.getValueType() == MVT::f64) && 7022 "Invalid floating point type as target of conversion"); 7023 assert(Subtarget.hasFPCVT() && 7024 "Int to FP conversions with direct moves require FPCVT"); 7025 SDValue FP; 7026 SDValue Src = Op.getOperand(0); 7027 bool SinglePrec = Op.getValueType() == MVT::f32; 7028 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7029 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 7030 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 7031 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 7032 7033 if (WordInt) { 7034 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7035 dl, MVT::f64, Src); 7036 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7037 } 7038 else { 7039 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7040 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7041 } 7042 7043 return FP; 7044 } 7045 7046 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 7047 SelectionDAG &DAG) const { 7048 SDLoc dl(Op); 7049 7050 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 7051 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 7052 return SDValue(); 7053 7054 SDValue Value = Op.getOperand(0); 7055 // The values are now known to be -1 (false) or 1 (true). To convert this 7056 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7057 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7058 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7059 7060 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7061 7062 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7063 7064 if (Op.getValueType() != MVT::v4f64) 7065 Value = DAG.getNode(ISD::FP_ROUND, dl, 7066 Op.getValueType(), Value, 7067 DAG.getIntPtrConstant(1, dl)); 7068 return Value; 7069 } 7070 7071 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 7072 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 7073 return SDValue(); 7074 7075 if (Op.getOperand(0).getValueType() == MVT::i1) 7076 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 7077 DAG.getConstantFP(1.0, dl, Op.getValueType()), 7078 DAG.getConstantFP(0.0, dl, Op.getValueType())); 7079 7080 // If we have direct moves, we can do all the conversion, skip the store/load 7081 // however, without FPCVT we can't do most conversions. 7082 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 7083 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 7084 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 7085 7086 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 7087 "UINT_TO_FP is supported only with FPCVT"); 7088 7089 // If we have FCFIDS, then use it when converting to single-precision. 7090 // Otherwise, convert to double-precision and then round. 7091 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7092 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 7093 : PPCISD::FCFIDS) 7094 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 7095 : PPCISD::FCFID); 7096 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7097 ? MVT::f32 7098 : MVT::f64; 7099 7100 if (Op.getOperand(0).getValueType() == MVT::i64) { 7101 SDValue SINT = Op.getOperand(0); 7102 // When converting to single-precision, we actually need to convert 7103 // to double-precision first and then round to single-precision. 7104 // To avoid double-rounding effects during that operation, we have 7105 // to prepare the input operand. Bits that might be truncated when 7106 // converting to double-precision are replaced by a bit that won't 7107 // be lost at this stage, but is below the single-precision rounding 7108 // position. 7109 // 7110 // However, if -enable-unsafe-fp-math is in effect, accept double 7111 // rounding to avoid the extra overhead. 7112 if (Op.getValueType() == MVT::f32 && 7113 !Subtarget.hasFPCVT() && 7114 !DAG.getTarget().Options.UnsafeFPMath) { 7115 7116 // Twiddle input to make sure the low 11 bits are zero. (If this 7117 // is the case, we are guaranteed the value will fit into the 53 bit 7118 // mantissa of an IEEE double-precision value without rounding.) 7119 // If any of those low 11 bits were not zero originally, make sure 7120 // bit 12 (value 2048) is set instead, so that the final rounding 7121 // to single-precision gets the correct result. 7122 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7123 SINT, DAG.getConstant(2047, dl, MVT::i64)); 7124 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 7125 Round, DAG.getConstant(2047, dl, MVT::i64)); 7126 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 7127 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7128 Round, DAG.getConstant(-2048, dl, MVT::i64)); 7129 7130 // However, we cannot use that value unconditionally: if the magnitude 7131 // of the input value is small, the bit-twiddling we did above might 7132 // end up visibly changing the output. Fortunately, in that case, we 7133 // don't need to twiddle bits since the original input will convert 7134 // exactly to double-precision floating-point already. Therefore, 7135 // construct a conditional to use the original value if the top 11 7136 // bits are all sign-bit copies, and use the rounded value computed 7137 // above otherwise. 7138 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 7139 SINT, DAG.getConstant(53, dl, MVT::i32)); 7140 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 7141 Cond, DAG.getConstant(1, dl, MVT::i64)); 7142 Cond = DAG.getSetCC(dl, MVT::i32, 7143 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 7144 7145 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 7146 } 7147 7148 ReuseLoadInfo RLI; 7149 SDValue Bits; 7150 7151 MachineFunction &MF = DAG.getMachineFunction(); 7152 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 7153 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7154 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7155 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7156 } else if (Subtarget.hasLFIWAX() && 7157 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 7158 MachineMemOperand *MMO = 7159 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7160 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7161 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7162 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 7163 DAG.getVTList(MVT::f64, MVT::Other), 7164 Ops, MVT::i32, MMO); 7165 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7166 } else if (Subtarget.hasFPCVT() && 7167 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 7168 MachineMemOperand *MMO = 7169 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7170 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7171 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7172 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 7173 DAG.getVTList(MVT::f64, MVT::Other), 7174 Ops, MVT::i32, MMO); 7175 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7176 } else if (((Subtarget.hasLFIWAX() && 7177 SINT.getOpcode() == ISD::SIGN_EXTEND) || 7178 (Subtarget.hasFPCVT() && 7179 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 7180 SINT.getOperand(0).getValueType() == MVT::i32) { 7181 MachineFrameInfo &MFI = MF.getFrameInfo(); 7182 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7183 7184 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7185 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7186 7187 SDValue Store = 7188 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 7189 MachinePointerInfo::getFixedStack( 7190 DAG.getMachineFunction(), FrameIdx)); 7191 7192 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7193 "Expected an i32 store"); 7194 7195 RLI.Ptr = FIdx; 7196 RLI.Chain = Store; 7197 RLI.MPI = 7198 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7199 RLI.Alignment = 4; 7200 7201 MachineMemOperand *MMO = 7202 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7203 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7204 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7205 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 7206 PPCISD::LFIWZX : PPCISD::LFIWAX, 7207 dl, DAG.getVTList(MVT::f64, MVT::Other), 7208 Ops, MVT::i32, MMO); 7209 } else 7210 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 7211 7212 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 7213 7214 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7215 FP = DAG.getNode(ISD::FP_ROUND, dl, 7216 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 7217 return FP; 7218 } 7219 7220 assert(Op.getOperand(0).getValueType() == MVT::i32 && 7221 "Unhandled INT_TO_FP type in custom expander!"); 7222 // Since we only generate this in 64-bit mode, we can take advantage of 7223 // 64-bit registers. In particular, sign extend the input value into the 7224 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 7225 // then lfd it and fcfid it. 7226 MachineFunction &MF = DAG.getMachineFunction(); 7227 MachineFrameInfo &MFI = MF.getFrameInfo(); 7228 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7229 7230 SDValue Ld; 7231 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 7232 ReuseLoadInfo RLI; 7233 bool ReusingLoad; 7234 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 7235 DAG))) { 7236 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7237 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7238 7239 SDValue Store = 7240 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7241 MachinePointerInfo::getFixedStack( 7242 DAG.getMachineFunction(), FrameIdx)); 7243 7244 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7245 "Expected an i32 store"); 7246 7247 RLI.Ptr = FIdx; 7248 RLI.Chain = Store; 7249 RLI.MPI = 7250 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7251 RLI.Alignment = 4; 7252 } 7253 7254 MachineMemOperand *MMO = 7255 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7256 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7257 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7258 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 7259 PPCISD::LFIWZX : PPCISD::LFIWAX, 7260 dl, DAG.getVTList(MVT::f64, MVT::Other), 7261 Ops, MVT::i32, MMO); 7262 if (ReusingLoad) 7263 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 7264 } else { 7265 assert(Subtarget.isPPC64() && 7266 "i32->FP without LFIWAX supported only on PPC64"); 7267 7268 int FrameIdx = MFI.CreateStackObject(8, 8, false); 7269 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7270 7271 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 7272 Op.getOperand(0)); 7273 7274 // STD the extended value into the stack slot. 7275 SDValue Store = DAG.getStore( 7276 DAG.getEntryNode(), dl, Ext64, FIdx, 7277 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7278 7279 // Load the value as a double. 7280 Ld = DAG.getLoad( 7281 MVT::f64, dl, Store, FIdx, 7282 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7283 } 7284 7285 // FCFID it and return it. 7286 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 7287 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7288 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 7289 DAG.getIntPtrConstant(0, dl)); 7290 return FP; 7291 } 7292 7293 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 7294 SelectionDAG &DAG) const { 7295 SDLoc dl(Op); 7296 /* 7297 The rounding mode is in bits 30:31 of FPSR, and has the following 7298 settings: 7299 00 Round to nearest 7300 01 Round to 0 7301 10 Round to +inf 7302 11 Round to -inf 7303 7304 FLT_ROUNDS, on the other hand, expects the following: 7305 -1 Undefined 7306 0 Round to 0 7307 1 Round to nearest 7308 2 Round to +inf 7309 3 Round to -inf 7310 7311 To perform the conversion, we do: 7312 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7313 */ 7314 7315 MachineFunction &MF = DAG.getMachineFunction(); 7316 EVT VT = Op.getValueType(); 7317 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7318 7319 // Save FP Control Word to register 7320 EVT NodeTys[] = { 7321 MVT::f64, // return register 7322 MVT::Glue // unused in this context 7323 }; 7324 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7325 7326 // Save FP register to stack slot 7327 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7328 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7329 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7330 MachinePointerInfo()); 7331 7332 // Load FP Control Word from low 32 bits of stack slot. 7333 SDValue Four = DAG.getConstant(4, dl, PtrVT); 7334 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 7335 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 7336 7337 // Transform as necessary 7338 SDValue CWD1 = 7339 DAG.getNode(ISD::AND, dl, MVT::i32, 7340 CWD, DAG.getConstant(3, dl, MVT::i32)); 7341 SDValue CWD2 = 7342 DAG.getNode(ISD::SRL, dl, MVT::i32, 7343 DAG.getNode(ISD::AND, dl, MVT::i32, 7344 DAG.getNode(ISD::XOR, dl, MVT::i32, 7345 CWD, DAG.getConstant(3, dl, MVT::i32)), 7346 DAG.getConstant(3, dl, MVT::i32)), 7347 DAG.getConstant(1, dl, MVT::i32)); 7348 7349 SDValue RetVal = 7350 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 7351 7352 return DAG.getNode((VT.getSizeInBits() < 16 ? 7353 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 7354 } 7355 7356 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7357 EVT VT = Op.getValueType(); 7358 unsigned BitWidth = VT.getSizeInBits(); 7359 SDLoc dl(Op); 7360 assert(Op.getNumOperands() == 3 && 7361 VT == Op.getOperand(1).getValueType() && 7362 "Unexpected SHL!"); 7363 7364 // Expand into a bunch of logical ops. Note that these ops 7365 // depend on the PPC behavior for oversized shift amounts. 7366 SDValue Lo = Op.getOperand(0); 7367 SDValue Hi = Op.getOperand(1); 7368 SDValue Amt = Op.getOperand(2); 7369 EVT AmtVT = Amt.getValueType(); 7370 7371 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7372 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7373 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 7374 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 7375 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 7376 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7377 DAG.getConstant(-BitWidth, dl, AmtVT)); 7378 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 7379 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7380 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 7381 SDValue OutOps[] = { OutLo, OutHi }; 7382 return DAG.getMergeValues(OutOps, dl); 7383 } 7384 7385 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7386 EVT VT = Op.getValueType(); 7387 SDLoc dl(Op); 7388 unsigned BitWidth = VT.getSizeInBits(); 7389 assert(Op.getNumOperands() == 3 && 7390 VT == Op.getOperand(1).getValueType() && 7391 "Unexpected SRL!"); 7392 7393 // Expand into a bunch of logical ops. Note that these ops 7394 // depend on the PPC behavior for oversized shift amounts. 7395 SDValue Lo = Op.getOperand(0); 7396 SDValue Hi = Op.getOperand(1); 7397 SDValue Amt = Op.getOperand(2); 7398 EVT AmtVT = Amt.getValueType(); 7399 7400 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7401 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7402 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7403 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7404 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7405 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7406 DAG.getConstant(-BitWidth, dl, AmtVT)); 7407 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 7408 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7409 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 7410 SDValue OutOps[] = { OutLo, OutHi }; 7411 return DAG.getMergeValues(OutOps, dl); 7412 } 7413 7414 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 7415 SDLoc dl(Op); 7416 EVT VT = Op.getValueType(); 7417 unsigned BitWidth = VT.getSizeInBits(); 7418 assert(Op.getNumOperands() == 3 && 7419 VT == Op.getOperand(1).getValueType() && 7420 "Unexpected SRA!"); 7421 7422 // Expand into a bunch of logical ops, followed by a select_cc. 7423 SDValue Lo = Op.getOperand(0); 7424 SDValue Hi = Op.getOperand(1); 7425 SDValue Amt = Op.getOperand(2); 7426 EVT AmtVT = Amt.getValueType(); 7427 7428 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7429 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7430 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7431 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7432 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7433 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7434 DAG.getConstant(-BitWidth, dl, AmtVT)); 7435 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 7436 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 7437 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 7438 Tmp4, Tmp6, ISD::SETLE); 7439 SDValue OutOps[] = { OutLo, OutHi }; 7440 return DAG.getMergeValues(OutOps, dl); 7441 } 7442 7443 //===----------------------------------------------------------------------===// 7444 // Vector related lowering. 7445 // 7446 7447 /// BuildSplatI - Build a canonical splati of Val with an element size of 7448 /// SplatSize. Cast the result to VT. 7449 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7450 SelectionDAG &DAG, const SDLoc &dl) { 7451 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7452 7453 static const MVT VTys[] = { // canonical VT to use for each size. 7454 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7455 }; 7456 7457 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7458 7459 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7460 if (Val == -1) 7461 SplatSize = 1; 7462 7463 EVT CanonicalVT = VTys[SplatSize-1]; 7464 7465 // Build a canonical splat for this value. 7466 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7467 } 7468 7469 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7470 /// specified intrinsic ID. 7471 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7472 const SDLoc &dl, EVT DestVT = MVT::Other) { 7473 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7474 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7475 DAG.getConstant(IID, dl, MVT::i32), Op); 7476 } 7477 7478 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7479 /// specified intrinsic ID. 7480 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7481 SelectionDAG &DAG, const SDLoc &dl, 7482 EVT DestVT = MVT::Other) { 7483 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7484 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7485 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7486 } 7487 7488 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7489 /// specified intrinsic ID. 7490 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7491 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7492 EVT DestVT = MVT::Other) { 7493 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7494 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7495 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7496 } 7497 7498 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7499 /// amount. The result has the specified value type. 7500 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7501 SelectionDAG &DAG, const SDLoc &dl) { 7502 // Force LHS/RHS to be the right type. 7503 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7504 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7505 7506 int Ops[16]; 7507 for (unsigned i = 0; i != 16; ++i) 7508 Ops[i] = i + Amt; 7509 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7510 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7511 } 7512 7513 /// Do we have an efficient pattern in a .td file for this node? 7514 /// 7515 /// \param V - pointer to the BuildVectorSDNode being matched 7516 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 7517 /// 7518 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 7519 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 7520 /// the opposite is true (expansion is beneficial) are: 7521 /// - The node builds a vector out of integers that are not 32 or 64-bits 7522 /// - The node builds a vector out of constants 7523 /// - The node is a "load-and-splat" 7524 /// In all other cases, we will choose to keep the BUILD_VECTOR. 7525 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 7526 bool HasDirectMove, 7527 bool HasP8Vector) { 7528 EVT VecVT = V->getValueType(0); 7529 bool RightType = VecVT == MVT::v2f64 || 7530 (HasP8Vector && VecVT == MVT::v4f32) || 7531 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 7532 if (!RightType) 7533 return false; 7534 7535 bool IsSplat = true; 7536 bool IsLoad = false; 7537 SDValue Op0 = V->getOperand(0); 7538 7539 // This function is called in a block that confirms the node is not a constant 7540 // splat. So a constant BUILD_VECTOR here means the vector is built out of 7541 // different constants. 7542 if (V->isConstant()) 7543 return false; 7544 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 7545 if (V->getOperand(i).isUndef()) 7546 return false; 7547 // We want to expand nodes that represent load-and-splat even if the 7548 // loaded value is a floating point truncation or conversion to int. 7549 if (V->getOperand(i).getOpcode() == ISD::LOAD || 7550 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 7551 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7552 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 7553 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7554 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 7555 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 7556 IsLoad = true; 7557 // If the operands are different or the input is not a load and has more 7558 // uses than just this BV node, then it isn't a splat. 7559 if (V->getOperand(i) != Op0 || 7560 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 7561 IsSplat = false; 7562 } 7563 return !(IsSplat && IsLoad); 7564 } 7565 7566 // If this is a case we can't handle, return null and let the default 7567 // expansion code take care of it. If we CAN select this case, and if it 7568 // selects to a single instruction, return Op. Otherwise, if we can codegen 7569 // this case more efficiently than a constant pool load, lower it to the 7570 // sequence of ops that should be used. 7571 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7572 SelectionDAG &DAG) const { 7573 SDLoc dl(Op); 7574 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7575 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7576 7577 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7578 // We first build an i32 vector, load it into a QPX register, 7579 // then convert it to a floating-point vector and compare it 7580 // to a zero vector to get the boolean result. 7581 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7582 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7583 MachinePointerInfo PtrInfo = 7584 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7585 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7586 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7587 7588 assert(BVN->getNumOperands() == 4 && 7589 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7590 7591 bool IsConst = true; 7592 for (unsigned i = 0; i < 4; ++i) { 7593 if (BVN->getOperand(i).isUndef()) continue; 7594 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7595 IsConst = false; 7596 break; 7597 } 7598 } 7599 7600 if (IsConst) { 7601 Constant *One = 7602 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7603 Constant *NegOne = 7604 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7605 7606 Constant *CV[4]; 7607 for (unsigned i = 0; i < 4; ++i) { 7608 if (BVN->getOperand(i).isUndef()) 7609 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7610 else if (isNullConstant(BVN->getOperand(i))) 7611 CV[i] = NegOne; 7612 else 7613 CV[i] = One; 7614 } 7615 7616 Constant *CP = ConstantVector::get(CV); 7617 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7618 16 /* alignment */); 7619 7620 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7621 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7622 return DAG.getMemIntrinsicNode( 7623 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7624 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7625 } 7626 7627 SmallVector<SDValue, 4> Stores; 7628 for (unsigned i = 0; i < 4; ++i) { 7629 if (BVN->getOperand(i).isUndef()) continue; 7630 7631 unsigned Offset = 4*i; 7632 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7633 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7634 7635 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7636 if (StoreSize > 4) { 7637 Stores.push_back( 7638 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7639 PtrInfo.getWithOffset(Offset), MVT::i32)); 7640 } else { 7641 SDValue StoreValue = BVN->getOperand(i); 7642 if (StoreSize < 4) 7643 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7644 7645 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7646 PtrInfo.getWithOffset(Offset))); 7647 } 7648 } 7649 7650 SDValue StoreChain; 7651 if (!Stores.empty()) 7652 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7653 else 7654 StoreChain = DAG.getEntryNode(); 7655 7656 // Now load from v4i32 into the QPX register; this will extend it to 7657 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7658 // is typed as v4f64 because the QPX register integer states are not 7659 // explicitly represented. 7660 7661 SDValue Ops[] = {StoreChain, 7662 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7663 FIdx}; 7664 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7665 7666 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7667 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7668 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7669 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7670 LoadedVect); 7671 7672 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7673 7674 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7675 } 7676 7677 // All other QPX vectors are handled by generic code. 7678 if (Subtarget.hasQPX()) 7679 return SDValue(); 7680 7681 // Check if this is a splat of a constant value. 7682 APInt APSplatBits, APSplatUndef; 7683 unsigned SplatBitSize; 7684 bool HasAnyUndefs; 7685 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7686 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7687 SplatBitSize > 32) { 7688 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 7689 // lowered to VSX instructions under certain conditions. 7690 // Without VSX, there is no pattern more efficient than expanding the node. 7691 if (Subtarget.hasVSX() && 7692 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 7693 Subtarget.hasP8Vector())) 7694 return Op; 7695 return SDValue(); 7696 } 7697 7698 unsigned SplatBits = APSplatBits.getZExtValue(); 7699 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7700 unsigned SplatSize = SplatBitSize / 8; 7701 7702 // First, handle single instruction cases. 7703 7704 // All zeros? 7705 if (SplatBits == 0) { 7706 // Canonicalize all zero vectors to be v4i32. 7707 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7708 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7709 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7710 } 7711 return Op; 7712 } 7713 7714 // We have XXSPLTIB for constant splats one byte wide 7715 if (Subtarget.hasP9Vector() && SplatSize == 1) { 7716 // This is a splat of 1-byte elements with some elements potentially undef. 7717 // Rather than trying to match undef in the SDAG patterns, ensure that all 7718 // elements are the same constant. 7719 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 7720 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 7721 dl, MVT::i32)); 7722 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 7723 if (Op.getValueType() != MVT::v16i8) 7724 return DAG.getBitcast(Op.getValueType(), NewBV); 7725 return NewBV; 7726 } 7727 7728 // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll 7729 // detect that constant splats like v8i16: 0xABAB are really just splats 7730 // of a 1-byte constant. In this case, we need to convert the node to a 7731 // splat of v16i8 and a bitcast. 7732 if (Op.getValueType() != MVT::v16i8) 7733 return DAG.getBitcast(Op.getValueType(), 7734 DAG.getConstant(SplatBits, dl, MVT::v16i8)); 7735 7736 return Op; 7737 } 7738 7739 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7740 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7741 (32-SplatBitSize)); 7742 if (SextVal >= -16 && SextVal <= 15) 7743 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7744 7745 // Two instruction sequences. 7746 7747 // If this value is in the range [-32,30] and is even, use: 7748 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7749 // If this value is in the range [17,31] and is odd, use: 7750 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7751 // If this value is in the range [-31,-17] and is odd, use: 7752 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7753 // Note the last two are three-instruction sequences. 7754 if (SextVal >= -32 && SextVal <= 31) { 7755 // To avoid having these optimizations undone by constant folding, 7756 // we convert to a pseudo that will be expanded later into one of 7757 // the above forms. 7758 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7759 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7760 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7761 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7762 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7763 if (VT == Op.getValueType()) 7764 return RetVal; 7765 else 7766 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7767 } 7768 7769 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7770 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7771 // for fneg/fabs. 7772 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7773 // Make -1 and vspltisw -1: 7774 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7775 7776 // Make the VSLW intrinsic, computing 0x8000_0000. 7777 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7778 OnesV, DAG, dl); 7779 7780 // xor by OnesV to invert it. 7781 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7782 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7783 } 7784 7785 // Check to see if this is a wide variety of vsplti*, binop self cases. 7786 static const signed char SplatCsts[] = { 7787 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7788 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7789 }; 7790 7791 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7792 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7793 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7794 int i = SplatCsts[idx]; 7795 7796 // Figure out what shift amount will be used by altivec if shifted by i in 7797 // this splat size. 7798 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7799 7800 // vsplti + shl self. 7801 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7802 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7803 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7804 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7805 Intrinsic::ppc_altivec_vslw 7806 }; 7807 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7808 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7809 } 7810 7811 // vsplti + srl self. 7812 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7813 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7814 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7815 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7816 Intrinsic::ppc_altivec_vsrw 7817 }; 7818 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7819 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7820 } 7821 7822 // vsplti + sra self. 7823 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7824 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7825 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7826 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7827 Intrinsic::ppc_altivec_vsraw 7828 }; 7829 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7830 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7831 } 7832 7833 // vsplti + rol self. 7834 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7835 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7836 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7837 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7838 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7839 Intrinsic::ppc_altivec_vrlw 7840 }; 7841 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7842 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7843 } 7844 7845 // t = vsplti c, result = vsldoi t, t, 1 7846 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7847 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7848 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7849 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7850 } 7851 // t = vsplti c, result = vsldoi t, t, 2 7852 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7853 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7854 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7855 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7856 } 7857 // t = vsplti c, result = vsldoi t, t, 3 7858 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7859 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7860 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7861 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7862 } 7863 } 7864 7865 return SDValue(); 7866 } 7867 7868 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7869 /// the specified operations to build the shuffle. 7870 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7871 SDValue RHS, SelectionDAG &DAG, 7872 const SDLoc &dl) { 7873 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7874 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7875 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7876 7877 enum { 7878 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7879 OP_VMRGHW, 7880 OP_VMRGLW, 7881 OP_VSPLTISW0, 7882 OP_VSPLTISW1, 7883 OP_VSPLTISW2, 7884 OP_VSPLTISW3, 7885 OP_VSLDOI4, 7886 OP_VSLDOI8, 7887 OP_VSLDOI12 7888 }; 7889 7890 if (OpNum == OP_COPY) { 7891 if (LHSID == (1*9+2)*9+3) return LHS; 7892 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7893 return RHS; 7894 } 7895 7896 SDValue OpLHS, OpRHS; 7897 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7898 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7899 7900 int ShufIdxs[16]; 7901 switch (OpNum) { 7902 default: llvm_unreachable("Unknown i32 permute!"); 7903 case OP_VMRGHW: 7904 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7905 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7906 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7907 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7908 break; 7909 case OP_VMRGLW: 7910 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7911 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7912 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7913 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7914 break; 7915 case OP_VSPLTISW0: 7916 for (unsigned i = 0; i != 16; ++i) 7917 ShufIdxs[i] = (i&3)+0; 7918 break; 7919 case OP_VSPLTISW1: 7920 for (unsigned i = 0; i != 16; ++i) 7921 ShufIdxs[i] = (i&3)+4; 7922 break; 7923 case OP_VSPLTISW2: 7924 for (unsigned i = 0; i != 16; ++i) 7925 ShufIdxs[i] = (i&3)+8; 7926 break; 7927 case OP_VSPLTISW3: 7928 for (unsigned i = 0; i != 16; ++i) 7929 ShufIdxs[i] = (i&3)+12; 7930 break; 7931 case OP_VSLDOI4: 7932 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7933 case OP_VSLDOI8: 7934 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7935 case OP_VSLDOI12: 7936 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7937 } 7938 EVT VT = OpLHS.getValueType(); 7939 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7940 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7941 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7942 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7943 } 7944 7945 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 7946 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 7947 /// SDValue. 7948 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 7949 SelectionDAG &DAG) const { 7950 const unsigned BytesInVector = 16; 7951 bool IsLE = Subtarget.isLittleEndian(); 7952 SDLoc dl(N); 7953 SDValue V1 = N->getOperand(0); 7954 SDValue V2 = N->getOperand(1); 7955 unsigned ShiftElts = 0, InsertAtByte = 0; 7956 bool Swap = false; 7957 7958 // Shifts required to get the byte we want at element 7. 7959 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 7960 0, 15, 14, 13, 12, 11, 10, 9}; 7961 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 7962 1, 2, 3, 4, 5, 6, 7, 8}; 7963 7964 ArrayRef<int> Mask = N->getMask(); 7965 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 7966 7967 // For each mask element, find out if we're just inserting something 7968 // from V2 into V1 or vice versa. 7969 // Possible permutations inserting an element from V2 into V1: 7970 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 7971 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 7972 // ... 7973 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 7974 // Inserting from V1 into V2 will be similar, except mask range will be 7975 // [16,31]. 7976 7977 bool FoundCandidate = false; 7978 // If both vector operands for the shuffle are the same vector, the mask 7979 // will contain only elements from the first one and the second one will be 7980 // undef. 7981 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 7982 // Go through the mask of half-words to find an element that's being moved 7983 // from one vector to the other. 7984 for (unsigned i = 0; i < BytesInVector; ++i) { 7985 unsigned CurrentElement = Mask[i]; 7986 // If 2nd operand is undefined, we should only look for element 7 in the 7987 // Mask. 7988 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 7989 continue; 7990 7991 bool OtherElementsInOrder = true; 7992 // Examine the other elements in the Mask to see if they're in original 7993 // order. 7994 for (unsigned j = 0; j < BytesInVector; ++j) { 7995 if (j == i) 7996 continue; 7997 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 7998 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 7999 // in which we always assume we're always picking from the 1st operand. 8000 int MaskOffset = 8001 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 8002 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 8003 OtherElementsInOrder = false; 8004 break; 8005 } 8006 } 8007 // If other elements are in original order, we record the number of shifts 8008 // we need to get the element we want into element 7. Also record which byte 8009 // in the vector we should insert into. 8010 if (OtherElementsInOrder) { 8011 // If 2nd operand is undefined, we assume no shifts and no swapping. 8012 if (V2.isUndef()) { 8013 ShiftElts = 0; 8014 Swap = false; 8015 } else { 8016 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 8017 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 8018 : BigEndianShifts[CurrentElement & 0xF]; 8019 Swap = CurrentElement < BytesInVector; 8020 } 8021 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 8022 FoundCandidate = true; 8023 break; 8024 } 8025 } 8026 8027 if (!FoundCandidate) 8028 return SDValue(); 8029 8030 // Candidate found, construct the proper SDAG sequence with VINSERTB, 8031 // optionally with VECSHL if shift is required. 8032 if (Swap) 8033 std::swap(V1, V2); 8034 if (V2.isUndef()) 8035 V2 = V1; 8036 if (ShiftElts) { 8037 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8038 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8039 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 8040 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8041 } 8042 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 8043 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8044 } 8045 8046 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 8047 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 8048 /// SDValue. 8049 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 8050 SelectionDAG &DAG) const { 8051 const unsigned NumHalfWords = 8; 8052 const unsigned BytesInVector = NumHalfWords * 2; 8053 // Check that the shuffle is on half-words. 8054 if (!isNByteElemShuffleMask(N, 2, 1)) 8055 return SDValue(); 8056 8057 bool IsLE = Subtarget.isLittleEndian(); 8058 SDLoc dl(N); 8059 SDValue V1 = N->getOperand(0); 8060 SDValue V2 = N->getOperand(1); 8061 unsigned ShiftElts = 0, InsertAtByte = 0; 8062 bool Swap = false; 8063 8064 // Shifts required to get the half-word we want at element 3. 8065 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 8066 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 8067 8068 uint32_t Mask = 0; 8069 uint32_t OriginalOrderLow = 0x1234567; 8070 uint32_t OriginalOrderHigh = 0x89ABCDEF; 8071 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 8072 // 32-bit space, only need 4-bit nibbles per element. 8073 for (unsigned i = 0; i < NumHalfWords; ++i) { 8074 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8075 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 8076 } 8077 8078 // For each mask element, find out if we're just inserting something 8079 // from V2 into V1 or vice versa. Possible permutations inserting an element 8080 // from V2 into V1: 8081 // X, 1, 2, 3, 4, 5, 6, 7 8082 // 0, X, 2, 3, 4, 5, 6, 7 8083 // 0, 1, X, 3, 4, 5, 6, 7 8084 // 0, 1, 2, X, 4, 5, 6, 7 8085 // 0, 1, 2, 3, X, 5, 6, 7 8086 // 0, 1, 2, 3, 4, X, 6, 7 8087 // 0, 1, 2, 3, 4, 5, X, 7 8088 // 0, 1, 2, 3, 4, 5, 6, X 8089 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 8090 8091 bool FoundCandidate = false; 8092 // Go through the mask of half-words to find an element that's being moved 8093 // from one vector to the other. 8094 for (unsigned i = 0; i < NumHalfWords; ++i) { 8095 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8096 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 8097 uint32_t MaskOtherElts = ~(0xF << MaskShift); 8098 uint32_t TargetOrder = 0x0; 8099 8100 // If both vector operands for the shuffle are the same vector, the mask 8101 // will contain only elements from the first one and the second one will be 8102 // undef. 8103 if (V2.isUndef()) { 8104 ShiftElts = 0; 8105 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 8106 TargetOrder = OriginalOrderLow; 8107 Swap = false; 8108 // Skip if not the correct element or mask of other elements don't equal 8109 // to our expected order. 8110 if (MaskOneElt == VINSERTHSrcElem && 8111 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8112 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8113 FoundCandidate = true; 8114 break; 8115 } 8116 } else { // If both operands are defined. 8117 // Target order is [8,15] if the current mask is between [0,7]. 8118 TargetOrder = 8119 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 8120 // Skip if mask of other elements don't equal our expected order. 8121 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8122 // We only need the last 3 bits for the number of shifts. 8123 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 8124 : BigEndianShifts[MaskOneElt & 0x7]; 8125 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8126 Swap = MaskOneElt < NumHalfWords; 8127 FoundCandidate = true; 8128 break; 8129 } 8130 } 8131 } 8132 8133 if (!FoundCandidate) 8134 return SDValue(); 8135 8136 // Candidate found, construct the proper SDAG sequence with VINSERTH, 8137 // optionally with VECSHL if shift is required. 8138 if (Swap) 8139 std::swap(V1, V2); 8140 if (V2.isUndef()) 8141 V2 = V1; 8142 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8143 if (ShiftElts) { 8144 // Double ShiftElts because we're left shifting on v16i8 type. 8145 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8146 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 8147 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 8148 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8149 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8150 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8151 } 8152 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 8153 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8154 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8155 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8156 } 8157 8158 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 8159 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 8160 /// return the code it can be lowered into. Worst case, it can always be 8161 /// lowered into a vperm. 8162 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 8163 SelectionDAG &DAG) const { 8164 SDLoc dl(Op); 8165 SDValue V1 = Op.getOperand(0); 8166 SDValue V2 = Op.getOperand(1); 8167 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 8168 EVT VT = Op.getValueType(); 8169 bool isLittleEndian = Subtarget.isLittleEndian(); 8170 8171 unsigned ShiftElts, InsertAtByte; 8172 bool Swap = false; 8173 if (Subtarget.hasP9Vector() && 8174 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 8175 isLittleEndian)) { 8176 if (Swap) 8177 std::swap(V1, V2); 8178 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8179 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 8180 if (ShiftElts) { 8181 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 8182 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8183 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 8184 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8185 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8186 } 8187 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 8188 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8189 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8190 } 8191 8192 if (Subtarget.hasP9Altivec()) { 8193 SDValue NewISDNode; 8194 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 8195 return NewISDNode; 8196 8197 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 8198 return NewISDNode; 8199 } 8200 8201 if (Subtarget.hasVSX() && 8202 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8203 if (Swap) 8204 std::swap(V1, V2); 8205 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8206 SDValue Conv2 = 8207 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 8208 8209 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 8210 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8211 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 8212 } 8213 8214 if (Subtarget.hasVSX() && 8215 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8216 if (Swap) 8217 std::swap(V1, V2); 8218 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8219 SDValue Conv2 = 8220 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 8221 8222 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 8223 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8224 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 8225 } 8226 8227 if (Subtarget.hasP9Vector()) { 8228 if (PPC::isXXBRHShuffleMask(SVOp)) { 8229 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8230 SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv); 8231 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 8232 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 8233 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8234 SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv); 8235 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 8236 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 8237 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8238 SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv); 8239 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 8240 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 8241 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 8242 SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv); 8243 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 8244 } 8245 } 8246 8247 if (Subtarget.hasVSX()) { 8248 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 8249 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 8250 8251 // If the source for the shuffle is a scalar_to_vector that came from a 8252 // 32-bit load, it will have used LXVWSX so we don't need to splat again. 8253 if (Subtarget.hasP9Vector() && 8254 ((isLittleEndian && SplatIdx == 3) || 8255 (!isLittleEndian && SplatIdx == 0))) { 8256 SDValue Src = V1.getOperand(0); 8257 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR && 8258 Src.getOperand(0).getOpcode() == ISD::LOAD && 8259 Src.getOperand(0).hasOneUse()) 8260 return V1; 8261 } 8262 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8263 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 8264 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8265 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 8266 } 8267 8268 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 8269 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 8270 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 8271 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 8272 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 8273 } 8274 } 8275 8276 if (Subtarget.hasQPX()) { 8277 if (VT.getVectorNumElements() != 4) 8278 return SDValue(); 8279 8280 if (V2.isUndef()) V2 = V1; 8281 8282 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 8283 if (AlignIdx != -1) { 8284 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 8285 DAG.getConstant(AlignIdx, dl, MVT::i32)); 8286 } else if (SVOp->isSplat()) { 8287 int SplatIdx = SVOp->getSplatIndex(); 8288 if (SplatIdx >= 4) { 8289 std::swap(V1, V2); 8290 SplatIdx -= 4; 8291 } 8292 8293 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 8294 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8295 } 8296 8297 // Lower this into a qvgpci/qvfperm pair. 8298 8299 // Compute the qvgpci literal 8300 unsigned idx = 0; 8301 for (unsigned i = 0; i < 4; ++i) { 8302 int m = SVOp->getMaskElt(i); 8303 unsigned mm = m >= 0 ? (unsigned) m : i; 8304 idx |= mm << (3-i)*3; 8305 } 8306 8307 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 8308 DAG.getConstant(idx, dl, MVT::i32)); 8309 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 8310 } 8311 8312 // Cases that are handled by instructions that take permute immediates 8313 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 8314 // selected by the instruction selector. 8315 if (V2.isUndef()) { 8316 if (PPC::isSplatShuffleMask(SVOp, 1) || 8317 PPC::isSplatShuffleMask(SVOp, 2) || 8318 PPC::isSplatShuffleMask(SVOp, 4) || 8319 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 8320 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 8321 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 8322 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 8323 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 8324 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 8325 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 8326 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 8327 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 8328 (Subtarget.hasP8Altivec() && ( 8329 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 8330 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 8331 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 8332 return Op; 8333 } 8334 } 8335 8336 // Altivec has a variety of "shuffle immediates" that take two vector inputs 8337 // and produce a fixed permutation. If any of these match, do not lower to 8338 // VPERM. 8339 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 8340 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 8341 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 8342 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 8343 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8344 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8345 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8346 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8347 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8348 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8349 (Subtarget.hasP8Altivec() && ( 8350 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 8351 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 8352 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 8353 return Op; 8354 8355 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 8356 // perfect shuffle table to emit an optimal matching sequence. 8357 ArrayRef<int> PermMask = SVOp->getMask(); 8358 8359 unsigned PFIndexes[4]; 8360 bool isFourElementShuffle = true; 8361 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 8362 unsigned EltNo = 8; // Start out undef. 8363 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 8364 if (PermMask[i*4+j] < 0) 8365 continue; // Undef, ignore it. 8366 8367 unsigned ByteSource = PermMask[i*4+j]; 8368 if ((ByteSource & 3) != j) { 8369 isFourElementShuffle = false; 8370 break; 8371 } 8372 8373 if (EltNo == 8) { 8374 EltNo = ByteSource/4; 8375 } else if (EltNo != ByteSource/4) { 8376 isFourElementShuffle = false; 8377 break; 8378 } 8379 } 8380 PFIndexes[i] = EltNo; 8381 } 8382 8383 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 8384 // perfect shuffle vector to determine if it is cost effective to do this as 8385 // discrete instructions, or whether we should use a vperm. 8386 // For now, we skip this for little endian until such time as we have a 8387 // little-endian perfect shuffle table. 8388 if (isFourElementShuffle && !isLittleEndian) { 8389 // Compute the index in the perfect shuffle table. 8390 unsigned PFTableIndex = 8391 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 8392 8393 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 8394 unsigned Cost = (PFEntry >> 30); 8395 8396 // Determining when to avoid vperm is tricky. Many things affect the cost 8397 // of vperm, particularly how many times the perm mask needs to be computed. 8398 // For example, if the perm mask can be hoisted out of a loop or is already 8399 // used (perhaps because there are multiple permutes with the same shuffle 8400 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 8401 // the loop requires an extra register. 8402 // 8403 // As a compromise, we only emit discrete instructions if the shuffle can be 8404 // generated in 3 or fewer operations. When we have loop information 8405 // available, if this block is within a loop, we should avoid using vperm 8406 // for 3-operation perms and use a constant pool load instead. 8407 if (Cost < 3) 8408 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8409 } 8410 8411 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 8412 // vector that will get spilled to the constant pool. 8413 if (V2.isUndef()) V2 = V1; 8414 8415 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 8416 // that it is in input element units, not in bytes. Convert now. 8417 8418 // For little endian, the order of the input vectors is reversed, and 8419 // the permutation mask is complemented with respect to 31. This is 8420 // necessary to produce proper semantics with the big-endian-biased vperm 8421 // instruction. 8422 EVT EltVT = V1.getValueType().getVectorElementType(); 8423 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 8424 8425 SmallVector<SDValue, 16> ResultMask; 8426 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 8427 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 8428 8429 for (unsigned j = 0; j != BytesPerElement; ++j) 8430 if (isLittleEndian) 8431 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 8432 dl, MVT::i32)); 8433 else 8434 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 8435 MVT::i32)); 8436 } 8437 8438 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 8439 if (isLittleEndian) 8440 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8441 V2, V1, VPermMask); 8442 else 8443 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8444 V1, V2, VPermMask); 8445 } 8446 8447 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 8448 /// vector comparison. If it is, return true and fill in Opc/isDot with 8449 /// information about the intrinsic. 8450 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 8451 bool &isDot, const PPCSubtarget &Subtarget) { 8452 unsigned IntrinsicID = 8453 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 8454 CompareOpc = -1; 8455 isDot = false; 8456 switch (IntrinsicID) { 8457 default: 8458 return false; 8459 // Comparison predicates. 8460 case Intrinsic::ppc_altivec_vcmpbfp_p: 8461 CompareOpc = 966; 8462 isDot = true; 8463 break; 8464 case Intrinsic::ppc_altivec_vcmpeqfp_p: 8465 CompareOpc = 198; 8466 isDot = true; 8467 break; 8468 case Intrinsic::ppc_altivec_vcmpequb_p: 8469 CompareOpc = 6; 8470 isDot = true; 8471 break; 8472 case Intrinsic::ppc_altivec_vcmpequh_p: 8473 CompareOpc = 70; 8474 isDot = true; 8475 break; 8476 case Intrinsic::ppc_altivec_vcmpequw_p: 8477 CompareOpc = 134; 8478 isDot = true; 8479 break; 8480 case Intrinsic::ppc_altivec_vcmpequd_p: 8481 if (Subtarget.hasP8Altivec()) { 8482 CompareOpc = 199; 8483 isDot = true; 8484 } else 8485 return false; 8486 break; 8487 case Intrinsic::ppc_altivec_vcmpneb_p: 8488 case Intrinsic::ppc_altivec_vcmpneh_p: 8489 case Intrinsic::ppc_altivec_vcmpnew_p: 8490 case Intrinsic::ppc_altivec_vcmpnezb_p: 8491 case Intrinsic::ppc_altivec_vcmpnezh_p: 8492 case Intrinsic::ppc_altivec_vcmpnezw_p: 8493 if (Subtarget.hasP9Altivec()) { 8494 switch (IntrinsicID) { 8495 default: 8496 llvm_unreachable("Unknown comparison intrinsic."); 8497 case Intrinsic::ppc_altivec_vcmpneb_p: 8498 CompareOpc = 7; 8499 break; 8500 case Intrinsic::ppc_altivec_vcmpneh_p: 8501 CompareOpc = 71; 8502 break; 8503 case Intrinsic::ppc_altivec_vcmpnew_p: 8504 CompareOpc = 135; 8505 break; 8506 case Intrinsic::ppc_altivec_vcmpnezb_p: 8507 CompareOpc = 263; 8508 break; 8509 case Intrinsic::ppc_altivec_vcmpnezh_p: 8510 CompareOpc = 327; 8511 break; 8512 case Intrinsic::ppc_altivec_vcmpnezw_p: 8513 CompareOpc = 391; 8514 break; 8515 } 8516 isDot = true; 8517 } else 8518 return false; 8519 break; 8520 case Intrinsic::ppc_altivec_vcmpgefp_p: 8521 CompareOpc = 454; 8522 isDot = true; 8523 break; 8524 case Intrinsic::ppc_altivec_vcmpgtfp_p: 8525 CompareOpc = 710; 8526 isDot = true; 8527 break; 8528 case Intrinsic::ppc_altivec_vcmpgtsb_p: 8529 CompareOpc = 774; 8530 isDot = true; 8531 break; 8532 case Intrinsic::ppc_altivec_vcmpgtsh_p: 8533 CompareOpc = 838; 8534 isDot = true; 8535 break; 8536 case Intrinsic::ppc_altivec_vcmpgtsw_p: 8537 CompareOpc = 902; 8538 isDot = true; 8539 break; 8540 case Intrinsic::ppc_altivec_vcmpgtsd_p: 8541 if (Subtarget.hasP8Altivec()) { 8542 CompareOpc = 967; 8543 isDot = true; 8544 } else 8545 return false; 8546 break; 8547 case Intrinsic::ppc_altivec_vcmpgtub_p: 8548 CompareOpc = 518; 8549 isDot = true; 8550 break; 8551 case Intrinsic::ppc_altivec_vcmpgtuh_p: 8552 CompareOpc = 582; 8553 isDot = true; 8554 break; 8555 case Intrinsic::ppc_altivec_vcmpgtuw_p: 8556 CompareOpc = 646; 8557 isDot = true; 8558 break; 8559 case Intrinsic::ppc_altivec_vcmpgtud_p: 8560 if (Subtarget.hasP8Altivec()) { 8561 CompareOpc = 711; 8562 isDot = true; 8563 } else 8564 return false; 8565 break; 8566 8567 // VSX predicate comparisons use the same infrastructure 8568 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8569 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8570 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8571 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8572 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8573 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8574 if (Subtarget.hasVSX()) { 8575 switch (IntrinsicID) { 8576 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8577 CompareOpc = 99; 8578 break; 8579 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8580 CompareOpc = 115; 8581 break; 8582 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8583 CompareOpc = 107; 8584 break; 8585 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8586 CompareOpc = 67; 8587 break; 8588 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8589 CompareOpc = 83; 8590 break; 8591 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8592 CompareOpc = 75; 8593 break; 8594 } 8595 isDot = true; 8596 } else 8597 return false; 8598 break; 8599 8600 // Normal Comparisons. 8601 case Intrinsic::ppc_altivec_vcmpbfp: 8602 CompareOpc = 966; 8603 break; 8604 case Intrinsic::ppc_altivec_vcmpeqfp: 8605 CompareOpc = 198; 8606 break; 8607 case Intrinsic::ppc_altivec_vcmpequb: 8608 CompareOpc = 6; 8609 break; 8610 case Intrinsic::ppc_altivec_vcmpequh: 8611 CompareOpc = 70; 8612 break; 8613 case Intrinsic::ppc_altivec_vcmpequw: 8614 CompareOpc = 134; 8615 break; 8616 case Intrinsic::ppc_altivec_vcmpequd: 8617 if (Subtarget.hasP8Altivec()) 8618 CompareOpc = 199; 8619 else 8620 return false; 8621 break; 8622 case Intrinsic::ppc_altivec_vcmpneb: 8623 case Intrinsic::ppc_altivec_vcmpneh: 8624 case Intrinsic::ppc_altivec_vcmpnew: 8625 case Intrinsic::ppc_altivec_vcmpnezb: 8626 case Intrinsic::ppc_altivec_vcmpnezh: 8627 case Intrinsic::ppc_altivec_vcmpnezw: 8628 if (Subtarget.hasP9Altivec()) 8629 switch (IntrinsicID) { 8630 default: 8631 llvm_unreachable("Unknown comparison intrinsic."); 8632 case Intrinsic::ppc_altivec_vcmpneb: 8633 CompareOpc = 7; 8634 break; 8635 case Intrinsic::ppc_altivec_vcmpneh: 8636 CompareOpc = 71; 8637 break; 8638 case Intrinsic::ppc_altivec_vcmpnew: 8639 CompareOpc = 135; 8640 break; 8641 case Intrinsic::ppc_altivec_vcmpnezb: 8642 CompareOpc = 263; 8643 break; 8644 case Intrinsic::ppc_altivec_vcmpnezh: 8645 CompareOpc = 327; 8646 break; 8647 case Intrinsic::ppc_altivec_vcmpnezw: 8648 CompareOpc = 391; 8649 break; 8650 } 8651 else 8652 return false; 8653 break; 8654 case Intrinsic::ppc_altivec_vcmpgefp: 8655 CompareOpc = 454; 8656 break; 8657 case Intrinsic::ppc_altivec_vcmpgtfp: 8658 CompareOpc = 710; 8659 break; 8660 case Intrinsic::ppc_altivec_vcmpgtsb: 8661 CompareOpc = 774; 8662 break; 8663 case Intrinsic::ppc_altivec_vcmpgtsh: 8664 CompareOpc = 838; 8665 break; 8666 case Intrinsic::ppc_altivec_vcmpgtsw: 8667 CompareOpc = 902; 8668 break; 8669 case Intrinsic::ppc_altivec_vcmpgtsd: 8670 if (Subtarget.hasP8Altivec()) 8671 CompareOpc = 967; 8672 else 8673 return false; 8674 break; 8675 case Intrinsic::ppc_altivec_vcmpgtub: 8676 CompareOpc = 518; 8677 break; 8678 case Intrinsic::ppc_altivec_vcmpgtuh: 8679 CompareOpc = 582; 8680 break; 8681 case Intrinsic::ppc_altivec_vcmpgtuw: 8682 CompareOpc = 646; 8683 break; 8684 case Intrinsic::ppc_altivec_vcmpgtud: 8685 if (Subtarget.hasP8Altivec()) 8686 CompareOpc = 711; 8687 else 8688 return false; 8689 break; 8690 } 8691 return true; 8692 } 8693 8694 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 8695 /// lower, do it, otherwise return null. 8696 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 8697 SelectionDAG &DAG) const { 8698 unsigned IntrinsicID = 8699 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8700 8701 SDLoc dl(Op); 8702 8703 if (IntrinsicID == Intrinsic::thread_pointer) { 8704 // Reads the thread pointer register, used for __builtin_thread_pointer. 8705 if (Subtarget.isPPC64()) 8706 return DAG.getRegister(PPC::X13, MVT::i64); 8707 return DAG.getRegister(PPC::R2, MVT::i32); 8708 } 8709 8710 // We are looking for absolute values here. 8711 // The idea is to try to fit one of two patterns: 8712 // max (a, (0-a)) OR max ((0-a), a) 8713 if (Subtarget.hasP9Vector() && 8714 (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw || 8715 IntrinsicID == Intrinsic::ppc_altivec_vmaxsh || 8716 IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) { 8717 SDValue V1 = Op.getOperand(1); 8718 SDValue V2 = Op.getOperand(2); 8719 if (V1.getSimpleValueType() == V2.getSimpleValueType() && 8720 (V1.getSimpleValueType() == MVT::v4i32 || 8721 V1.getSimpleValueType() == MVT::v8i16 || 8722 V1.getSimpleValueType() == MVT::v16i8)) { 8723 if ( V1.getOpcode() == ISD::SUB && 8724 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 8725 V1.getOperand(1) == V2 ) { 8726 // Generate the abs instruction with the operands 8727 return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2); 8728 } 8729 8730 if ( V2.getOpcode() == ISD::SUB && 8731 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 8732 V2.getOperand(1) == V1 ) { 8733 // Generate the abs instruction with the operands 8734 return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1); 8735 } 8736 } 8737 } 8738 8739 // If this is a lowered altivec predicate compare, CompareOpc is set to the 8740 // opcode number of the comparison. 8741 int CompareOpc; 8742 bool isDot; 8743 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 8744 return SDValue(); // Don't custom lower most intrinsics. 8745 8746 // If this is a non-dot comparison, make the VCMP node and we are done. 8747 if (!isDot) { 8748 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 8749 Op.getOperand(1), Op.getOperand(2), 8750 DAG.getConstant(CompareOpc, dl, MVT::i32)); 8751 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 8752 } 8753 8754 // Create the PPCISD altivec 'dot' comparison node. 8755 SDValue Ops[] = { 8756 Op.getOperand(2), // LHS 8757 Op.getOperand(3), // RHS 8758 DAG.getConstant(CompareOpc, dl, MVT::i32) 8759 }; 8760 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 8761 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 8762 8763 // Now that we have the comparison, emit a copy from the CR to a GPR. 8764 // This is flagged to the above dot comparison. 8765 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 8766 DAG.getRegister(PPC::CR6, MVT::i32), 8767 CompNode.getValue(1)); 8768 8769 // Unpack the result based on how the target uses it. 8770 unsigned BitNo; // Bit # of CR6. 8771 bool InvertBit; // Invert result? 8772 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 8773 default: // Can't happen, don't crash on invalid number though. 8774 case 0: // Return the value of the EQ bit of CR6. 8775 BitNo = 0; InvertBit = false; 8776 break; 8777 case 1: // Return the inverted value of the EQ bit of CR6. 8778 BitNo = 0; InvertBit = true; 8779 break; 8780 case 2: // Return the value of the LT bit of CR6. 8781 BitNo = 2; InvertBit = false; 8782 break; 8783 case 3: // Return the inverted value of the LT bit of CR6. 8784 BitNo = 2; InvertBit = true; 8785 break; 8786 } 8787 8788 // Shift the bit into the low position. 8789 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 8790 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 8791 // Isolate the bit. 8792 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 8793 DAG.getConstant(1, dl, MVT::i32)); 8794 8795 // If we are supposed to, toggle the bit. 8796 if (InvertBit) 8797 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 8798 DAG.getConstant(1, dl, MVT::i32)); 8799 return Flags; 8800 } 8801 8802 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 8803 SelectionDAG &DAG) const { 8804 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 8805 // the beginning of the argument list. 8806 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 8807 SDLoc DL(Op); 8808 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 8809 case Intrinsic::ppc_cfence: { 8810 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 8811 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 8812 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 8813 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 8814 Op.getOperand(ArgStart + 1)), 8815 Op.getOperand(0)), 8816 0); 8817 } 8818 default: 8819 break; 8820 } 8821 return SDValue(); 8822 } 8823 8824 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 8825 // Check for a DIV with the same operands as this REM. 8826 for (auto UI : Op.getOperand(1)->uses()) { 8827 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 8828 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 8829 if (UI->getOperand(0) == Op.getOperand(0) && 8830 UI->getOperand(1) == Op.getOperand(1)) 8831 return SDValue(); 8832 } 8833 return Op; 8834 } 8835 8836 // Lower scalar BSWAP64 to xxbrd. 8837 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 8838 SDLoc dl(Op); 8839 // MTVSRDD 8840 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 8841 Op.getOperand(0)); 8842 // XXBRD 8843 Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op); 8844 // MFVSRD 8845 int VectorIndex = 0; 8846 if (Subtarget.isLittleEndian()) 8847 VectorIndex = 1; 8848 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 8849 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 8850 return Op; 8851 } 8852 8853 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 8854 // compared to a value that is atomically loaded (atomic loads zero-extend). 8855 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 8856 SelectionDAG &DAG) const { 8857 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 8858 "Expecting an atomic compare-and-swap here."); 8859 SDLoc dl(Op); 8860 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 8861 EVT MemVT = AtomicNode->getMemoryVT(); 8862 if (MemVT.getSizeInBits() >= 32) 8863 return Op; 8864 8865 SDValue CmpOp = Op.getOperand(2); 8866 // If this is already correctly zero-extended, leave it alone. 8867 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 8868 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 8869 return Op; 8870 8871 // Clear the high bits of the compare operand. 8872 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 8873 SDValue NewCmpOp = 8874 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 8875 DAG.getConstant(MaskVal, dl, MVT::i32)); 8876 8877 // Replace the existing compare operand with the properly zero-extended one. 8878 SmallVector<SDValue, 4> Ops; 8879 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 8880 Ops.push_back(AtomicNode->getOperand(i)); 8881 Ops[2] = NewCmpOp; 8882 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 8883 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 8884 auto NodeTy = 8885 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 8886 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 8887 } 8888 8889 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 8890 SelectionDAG &DAG) const { 8891 SDLoc dl(Op); 8892 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 8893 // instructions), but for smaller types, we need to first extend up to v2i32 8894 // before doing going farther. 8895 if (Op.getValueType() == MVT::v2i64) { 8896 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 8897 if (ExtVT != MVT::v2i32) { 8898 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 8899 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 8900 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 8901 ExtVT.getVectorElementType(), 4))); 8902 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 8903 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 8904 DAG.getValueType(MVT::v2i32)); 8905 } 8906 8907 return Op; 8908 } 8909 8910 return SDValue(); 8911 } 8912 8913 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 8914 SelectionDAG &DAG) const { 8915 SDLoc dl(Op); 8916 // Create a stack slot that is 16-byte aligned. 8917 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8918 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8919 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8920 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8921 8922 // Store the input value into Value#0 of the stack slot. 8923 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 8924 MachinePointerInfo()); 8925 // Load it out. 8926 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 8927 } 8928 8929 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 8930 SelectionDAG &DAG) const { 8931 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 8932 "Should only be called for ISD::INSERT_VECTOR_ELT"); 8933 8934 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 8935 // We have legal lowering for constant indices but not for variable ones. 8936 if (!C) 8937 return SDValue(); 8938 8939 EVT VT = Op.getValueType(); 8940 SDLoc dl(Op); 8941 SDValue V1 = Op.getOperand(0); 8942 SDValue V2 = Op.getOperand(1); 8943 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 8944 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 8945 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 8946 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 8947 unsigned InsertAtElement = C->getZExtValue(); 8948 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 8949 if (Subtarget.isLittleEndian()) { 8950 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 8951 } 8952 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 8953 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8954 } 8955 return Op; 8956 } 8957 8958 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 8959 SelectionDAG &DAG) const { 8960 SDLoc dl(Op); 8961 SDNode *N = Op.getNode(); 8962 8963 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 8964 "Unknown extract_vector_elt type"); 8965 8966 SDValue Value = N->getOperand(0); 8967 8968 // The first part of this is like the store lowering except that we don't 8969 // need to track the chain. 8970 8971 // The values are now known to be -1 (false) or 1 (true). To convert this 8972 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8973 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8974 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8975 8976 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8977 // understand how to form the extending load. 8978 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8979 8980 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8981 8982 // Now convert to an integer and store. 8983 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8984 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8985 Value); 8986 8987 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8988 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8989 MachinePointerInfo PtrInfo = 8990 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8991 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8992 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8993 8994 SDValue StoreChain = DAG.getEntryNode(); 8995 SDValue Ops[] = {StoreChain, 8996 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8997 Value, FIdx}; 8998 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8999 9000 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9001 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9002 9003 // Extract the value requested. 9004 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9005 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9006 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9007 9008 SDValue IntVal = 9009 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 9010 9011 if (!Subtarget.useCRBits()) 9012 return IntVal; 9013 9014 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 9015 } 9016 9017 /// Lowering for QPX v4i1 loads 9018 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 9019 SelectionDAG &DAG) const { 9020 SDLoc dl(Op); 9021 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 9022 SDValue LoadChain = LN->getChain(); 9023 SDValue BasePtr = LN->getBasePtr(); 9024 9025 if (Op.getValueType() == MVT::v4f64 || 9026 Op.getValueType() == MVT::v4f32) { 9027 EVT MemVT = LN->getMemoryVT(); 9028 unsigned Alignment = LN->getAlignment(); 9029 9030 // If this load is properly aligned, then it is legal. 9031 if (Alignment >= MemVT.getStoreSize()) 9032 return Op; 9033 9034 EVT ScalarVT = Op.getValueType().getScalarType(), 9035 ScalarMemVT = MemVT.getScalarType(); 9036 unsigned Stride = ScalarMemVT.getStoreSize(); 9037 9038 SDValue Vals[4], LoadChains[4]; 9039 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9040 SDValue Load; 9041 if (ScalarVT != ScalarMemVT) 9042 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 9043 BasePtr, 9044 LN->getPointerInfo().getWithOffset(Idx * Stride), 9045 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9046 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9047 else 9048 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 9049 LN->getPointerInfo().getWithOffset(Idx * Stride), 9050 MinAlign(Alignment, Idx * Stride), 9051 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9052 9053 if (Idx == 0 && LN->isIndexed()) { 9054 assert(LN->getAddressingMode() == ISD::PRE_INC && 9055 "Unknown addressing mode on vector load"); 9056 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 9057 LN->getAddressingMode()); 9058 } 9059 9060 Vals[Idx] = Load; 9061 LoadChains[Idx] = Load.getValue(1); 9062 9063 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9064 DAG.getConstant(Stride, dl, 9065 BasePtr.getValueType())); 9066 } 9067 9068 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9069 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 9070 9071 if (LN->isIndexed()) { 9072 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 9073 return DAG.getMergeValues(RetOps, dl); 9074 } 9075 9076 SDValue RetOps[] = { Value, TF }; 9077 return DAG.getMergeValues(RetOps, dl); 9078 } 9079 9080 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 9081 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 9082 9083 // To lower v4i1 from a byte array, we load the byte elements of the 9084 // vector and then reuse the BUILD_VECTOR logic. 9085 9086 SDValue VectElmts[4], VectElmtChains[4]; 9087 for (unsigned i = 0; i < 4; ++i) { 9088 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9089 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9090 9091 VectElmts[i] = DAG.getExtLoad( 9092 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 9093 LN->getPointerInfo().getWithOffset(i), MVT::i8, 9094 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9095 VectElmtChains[i] = VectElmts[i].getValue(1); 9096 } 9097 9098 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 9099 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 9100 9101 SDValue RVals[] = { Value, LoadChain }; 9102 return DAG.getMergeValues(RVals, dl); 9103 } 9104 9105 /// Lowering for QPX v4i1 stores 9106 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 9107 SelectionDAG &DAG) const { 9108 SDLoc dl(Op); 9109 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 9110 SDValue StoreChain = SN->getChain(); 9111 SDValue BasePtr = SN->getBasePtr(); 9112 SDValue Value = SN->getValue(); 9113 9114 if (Value.getValueType() == MVT::v4f64 || 9115 Value.getValueType() == MVT::v4f32) { 9116 EVT MemVT = SN->getMemoryVT(); 9117 unsigned Alignment = SN->getAlignment(); 9118 9119 // If this store is properly aligned, then it is legal. 9120 if (Alignment >= MemVT.getStoreSize()) 9121 return Op; 9122 9123 EVT ScalarVT = Value.getValueType().getScalarType(), 9124 ScalarMemVT = MemVT.getScalarType(); 9125 unsigned Stride = ScalarMemVT.getStoreSize(); 9126 9127 SDValue Stores[4]; 9128 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9129 SDValue Ex = DAG.getNode( 9130 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 9131 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 9132 SDValue Store; 9133 if (ScalarVT != ScalarMemVT) 9134 Store = 9135 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 9136 SN->getPointerInfo().getWithOffset(Idx * Stride), 9137 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9138 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9139 else 9140 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 9141 SN->getPointerInfo().getWithOffset(Idx * Stride), 9142 MinAlign(Alignment, Idx * Stride), 9143 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9144 9145 if (Idx == 0 && SN->isIndexed()) { 9146 assert(SN->getAddressingMode() == ISD::PRE_INC && 9147 "Unknown addressing mode on vector store"); 9148 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 9149 SN->getAddressingMode()); 9150 } 9151 9152 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9153 DAG.getConstant(Stride, dl, 9154 BasePtr.getValueType())); 9155 Stores[Idx] = Store; 9156 } 9157 9158 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9159 9160 if (SN->isIndexed()) { 9161 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 9162 return DAG.getMergeValues(RetOps, dl); 9163 } 9164 9165 return TF; 9166 } 9167 9168 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 9169 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 9170 9171 // The values are now known to be -1 (false) or 1 (true). To convert this 9172 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9173 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9174 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9175 9176 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9177 // understand how to form the extending load. 9178 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9179 9180 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9181 9182 // Now convert to an integer and store. 9183 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9184 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9185 Value); 9186 9187 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9188 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9189 MachinePointerInfo PtrInfo = 9190 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9191 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9192 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9193 9194 SDValue Ops[] = {StoreChain, 9195 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9196 Value, FIdx}; 9197 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9198 9199 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9200 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9201 9202 // Move data into the byte array. 9203 SDValue Loads[4], LoadChains[4]; 9204 for (unsigned i = 0; i < 4; ++i) { 9205 unsigned Offset = 4*i; 9206 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9207 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9208 9209 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 9210 PtrInfo.getWithOffset(Offset)); 9211 LoadChains[i] = Loads[i].getValue(1); 9212 } 9213 9214 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9215 9216 SDValue Stores[4]; 9217 for (unsigned i = 0; i < 4; ++i) { 9218 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9219 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9220 9221 Stores[i] = DAG.getTruncStore( 9222 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 9223 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 9224 SN->getAAInfo()); 9225 } 9226 9227 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9228 9229 return StoreChain; 9230 } 9231 9232 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 9233 SDLoc dl(Op); 9234 if (Op.getValueType() == MVT::v4i32) { 9235 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9236 9237 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 9238 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 9239 9240 SDValue RHSSwap = // = vrlw RHS, 16 9241 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 9242 9243 // Shrinkify inputs to v8i16. 9244 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 9245 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 9246 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 9247 9248 // Low parts multiplied together, generating 32-bit results (we ignore the 9249 // top parts). 9250 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 9251 LHS, RHS, DAG, dl, MVT::v4i32); 9252 9253 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 9254 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 9255 // Shift the high parts up 16 bits. 9256 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 9257 Neg16, DAG, dl); 9258 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 9259 } else if (Op.getValueType() == MVT::v8i16) { 9260 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9261 9262 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 9263 9264 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 9265 LHS, RHS, Zero, DAG, dl); 9266 } else if (Op.getValueType() == MVT::v16i8) { 9267 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9268 bool isLittleEndian = Subtarget.isLittleEndian(); 9269 9270 // Multiply the even 8-bit parts, producing 16-bit sums. 9271 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 9272 LHS, RHS, DAG, dl, MVT::v8i16); 9273 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 9274 9275 // Multiply the odd 8-bit parts, producing 16-bit sums. 9276 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 9277 LHS, RHS, DAG, dl, MVT::v8i16); 9278 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 9279 9280 // Merge the results together. Because vmuleub and vmuloub are 9281 // instructions with a big-endian bias, we must reverse the 9282 // element numbering and reverse the meaning of "odd" and "even" 9283 // when generating little endian code. 9284 int Ops[16]; 9285 for (unsigned i = 0; i != 8; ++i) { 9286 if (isLittleEndian) { 9287 Ops[i*2 ] = 2*i; 9288 Ops[i*2+1] = 2*i+16; 9289 } else { 9290 Ops[i*2 ] = 2*i+1; 9291 Ops[i*2+1] = 2*i+1+16; 9292 } 9293 } 9294 if (isLittleEndian) 9295 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 9296 else 9297 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 9298 } else { 9299 llvm_unreachable("Unknown mul to lower!"); 9300 } 9301 } 9302 9303 /// LowerOperation - Provide custom lowering hooks for some operations. 9304 /// 9305 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9306 switch (Op.getOpcode()) { 9307 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 9308 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 9309 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 9310 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 9311 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 9312 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 9313 case ISD::SETCC: return LowerSETCC(Op, DAG); 9314 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 9315 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 9316 case ISD::VASTART: 9317 return LowerVASTART(Op, DAG); 9318 9319 case ISD::VAARG: 9320 return LowerVAARG(Op, DAG); 9321 9322 case ISD::VACOPY: 9323 return LowerVACOPY(Op, DAG); 9324 9325 case ISD::STACKRESTORE: 9326 return LowerSTACKRESTORE(Op, DAG); 9327 9328 case ISD::DYNAMIC_STACKALLOC: 9329 return LowerDYNAMIC_STACKALLOC(Op, DAG); 9330 9331 case ISD::GET_DYNAMIC_AREA_OFFSET: 9332 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 9333 9334 case ISD::EH_DWARF_CFA: 9335 return LowerEH_DWARF_CFA(Op, DAG); 9336 9337 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 9338 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 9339 9340 case ISD::LOAD: return LowerLOAD(Op, DAG); 9341 case ISD::STORE: return LowerSTORE(Op, DAG); 9342 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 9343 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 9344 case ISD::FP_TO_UINT: 9345 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 9346 SDLoc(Op)); 9347 case ISD::UINT_TO_FP: 9348 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 9349 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9350 9351 // Lower 64-bit shifts. 9352 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 9353 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 9354 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 9355 9356 // Vector-related lowering. 9357 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 9358 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 9359 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 9360 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 9361 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 9362 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 9363 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9364 case ISD::MUL: return LowerMUL(Op, DAG); 9365 9366 // For counter-based loop handling. 9367 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 9368 9369 // Frame & Return address. 9370 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9371 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9372 9373 case ISD::INTRINSIC_VOID: 9374 return LowerINTRINSIC_VOID(Op, DAG); 9375 case ISD::SREM: 9376 case ISD::UREM: 9377 return LowerREM(Op, DAG); 9378 case ISD::BSWAP: 9379 return LowerBSWAP(Op, DAG); 9380 case ISD::ATOMIC_CMP_SWAP: 9381 return LowerATOMIC_CMP_SWAP(Op, DAG); 9382 } 9383 } 9384 9385 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 9386 SmallVectorImpl<SDValue>&Results, 9387 SelectionDAG &DAG) const { 9388 SDLoc dl(N); 9389 switch (N->getOpcode()) { 9390 default: 9391 llvm_unreachable("Do not know how to custom type legalize this operation!"); 9392 case ISD::READCYCLECOUNTER: { 9393 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 9394 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 9395 9396 Results.push_back(RTB); 9397 Results.push_back(RTB.getValue(1)); 9398 Results.push_back(RTB.getValue(2)); 9399 break; 9400 } 9401 case ISD::INTRINSIC_W_CHAIN: { 9402 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 9403 Intrinsic::ppc_is_decremented_ctr_nonzero) 9404 break; 9405 9406 assert(N->getValueType(0) == MVT::i1 && 9407 "Unexpected result type for CTR decrement intrinsic"); 9408 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 9409 N->getValueType(0)); 9410 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 9411 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 9412 N->getOperand(1)); 9413 9414 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 9415 Results.push_back(NewInt.getValue(1)); 9416 break; 9417 } 9418 case ISD::VAARG: { 9419 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 9420 return; 9421 9422 EVT VT = N->getValueType(0); 9423 9424 if (VT == MVT::i64) { 9425 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 9426 9427 Results.push_back(NewNode); 9428 Results.push_back(NewNode.getValue(1)); 9429 } 9430 return; 9431 } 9432 case ISD::FP_ROUND_INREG: { 9433 assert(N->getValueType(0) == MVT::ppcf128); 9434 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 9435 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 9436 MVT::f64, N->getOperand(0), 9437 DAG.getIntPtrConstant(0, dl)); 9438 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 9439 MVT::f64, N->getOperand(0), 9440 DAG.getIntPtrConstant(1, dl)); 9441 9442 // Add the two halves of the long double in round-to-zero mode. 9443 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 9444 9445 // We know the low half is about to be thrown away, so just use something 9446 // convenient. 9447 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 9448 FPreg, FPreg)); 9449 return; 9450 } 9451 case ISD::FP_TO_SINT: 9452 case ISD::FP_TO_UINT: 9453 // LowerFP_TO_INT() can only handle f32 and f64. 9454 if (N->getOperand(0).getValueType() == MVT::ppcf128) 9455 return; 9456 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 9457 return; 9458 } 9459 } 9460 9461 //===----------------------------------------------------------------------===// 9462 // Other Lowering Code 9463 //===----------------------------------------------------------------------===// 9464 9465 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 9466 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 9467 Function *Func = Intrinsic::getDeclaration(M, Id); 9468 return Builder.CreateCall(Func, {}); 9469 } 9470 9471 // The mappings for emitLeading/TrailingFence is taken from 9472 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 9473 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 9474 Instruction *Inst, 9475 AtomicOrdering Ord) const { 9476 if (Ord == AtomicOrdering::SequentiallyConsistent) 9477 return callIntrinsic(Builder, Intrinsic::ppc_sync); 9478 if (isReleaseOrStronger(Ord)) 9479 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9480 return nullptr; 9481 } 9482 9483 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 9484 Instruction *Inst, 9485 AtomicOrdering Ord) const { 9486 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 9487 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 9488 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 9489 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 9490 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 9491 return Builder.CreateCall( 9492 Intrinsic::getDeclaration( 9493 Builder.GetInsertBlock()->getParent()->getParent(), 9494 Intrinsic::ppc_cfence, {Inst->getType()}), 9495 {Inst}); 9496 // FIXME: Can use isync for rmw operation. 9497 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9498 } 9499 return nullptr; 9500 } 9501 9502 MachineBasicBlock * 9503 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 9504 unsigned AtomicSize, 9505 unsigned BinOpcode, 9506 unsigned CmpOpcode, 9507 unsigned CmpPred) const { 9508 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9509 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9510 9511 auto LoadMnemonic = PPC::LDARX; 9512 auto StoreMnemonic = PPC::STDCX; 9513 switch (AtomicSize) { 9514 default: 9515 llvm_unreachable("Unexpected size of atomic entity"); 9516 case 1: 9517 LoadMnemonic = PPC::LBARX; 9518 StoreMnemonic = PPC::STBCX; 9519 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9520 break; 9521 case 2: 9522 LoadMnemonic = PPC::LHARX; 9523 StoreMnemonic = PPC::STHCX; 9524 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9525 break; 9526 case 4: 9527 LoadMnemonic = PPC::LWARX; 9528 StoreMnemonic = PPC::STWCX; 9529 break; 9530 case 8: 9531 LoadMnemonic = PPC::LDARX; 9532 StoreMnemonic = PPC::STDCX; 9533 break; 9534 } 9535 9536 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9537 MachineFunction *F = BB->getParent(); 9538 MachineFunction::iterator It = ++BB->getIterator(); 9539 9540 unsigned dest = MI.getOperand(0).getReg(); 9541 unsigned ptrA = MI.getOperand(1).getReg(); 9542 unsigned ptrB = MI.getOperand(2).getReg(); 9543 unsigned incr = MI.getOperand(3).getReg(); 9544 DebugLoc dl = MI.getDebugLoc(); 9545 9546 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9547 MachineBasicBlock *loop2MBB = 9548 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9549 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9550 F->insert(It, loopMBB); 9551 if (CmpOpcode) 9552 F->insert(It, loop2MBB); 9553 F->insert(It, exitMBB); 9554 exitMBB->splice(exitMBB->begin(), BB, 9555 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9556 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9557 9558 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9559 unsigned TmpReg = (!BinOpcode) ? incr : 9560 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 9561 : &PPC::GPRCRegClass); 9562 9563 // thisMBB: 9564 // ... 9565 // fallthrough --> loopMBB 9566 BB->addSuccessor(loopMBB); 9567 9568 // loopMBB: 9569 // l[wd]arx dest, ptr 9570 // add r0, dest, incr 9571 // st[wd]cx. r0, ptr 9572 // bne- loopMBB 9573 // fallthrough --> exitMBB 9574 9575 // For max/min... 9576 // loopMBB: 9577 // l[wd]arx dest, ptr 9578 // cmpl?[wd] incr, dest 9579 // bgt exitMBB 9580 // loop2MBB: 9581 // st[wd]cx. dest, ptr 9582 // bne- loopMBB 9583 // fallthrough --> exitMBB 9584 9585 BB = loopMBB; 9586 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9587 .addReg(ptrA).addReg(ptrB); 9588 if (BinOpcode) 9589 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 9590 if (CmpOpcode) { 9591 // Signed comparisons of byte or halfword values must be sign-extended. 9592 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 9593 unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9594 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 9595 ExtReg).addReg(dest); 9596 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9597 .addReg(incr).addReg(ExtReg); 9598 } else 9599 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9600 .addReg(incr).addReg(dest); 9601 9602 BuildMI(BB, dl, TII->get(PPC::BCC)) 9603 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9604 BB->addSuccessor(loop2MBB); 9605 BB->addSuccessor(exitMBB); 9606 BB = loop2MBB; 9607 } 9608 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9609 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 9610 BuildMI(BB, dl, TII->get(PPC::BCC)) 9611 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9612 BB->addSuccessor(loopMBB); 9613 BB->addSuccessor(exitMBB); 9614 9615 // exitMBB: 9616 // ... 9617 BB = exitMBB; 9618 return BB; 9619 } 9620 9621 MachineBasicBlock * 9622 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 9623 MachineBasicBlock *BB, 9624 bool is8bit, // operation 9625 unsigned BinOpcode, 9626 unsigned CmpOpcode, 9627 unsigned CmpPred) const { 9628 // If we support part-word atomic mnemonics, just use them 9629 if (Subtarget.hasPartwordAtomics()) 9630 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, 9631 CmpOpcode, CmpPred); 9632 9633 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9634 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9635 // In 64 bit mode we have to use 64 bits for addresses, even though the 9636 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 9637 // registers without caring whether they're 32 or 64, but here we're 9638 // doing actual arithmetic on the addresses. 9639 bool is64bit = Subtarget.isPPC64(); 9640 bool isLittleEndian = Subtarget.isLittleEndian(); 9641 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9642 9643 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9644 MachineFunction *F = BB->getParent(); 9645 MachineFunction::iterator It = ++BB->getIterator(); 9646 9647 unsigned dest = MI.getOperand(0).getReg(); 9648 unsigned ptrA = MI.getOperand(1).getReg(); 9649 unsigned ptrB = MI.getOperand(2).getReg(); 9650 unsigned incr = MI.getOperand(3).getReg(); 9651 DebugLoc dl = MI.getDebugLoc(); 9652 9653 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9654 MachineBasicBlock *loop2MBB = 9655 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9656 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9657 F->insert(It, loopMBB); 9658 if (CmpOpcode) 9659 F->insert(It, loop2MBB); 9660 F->insert(It, exitMBB); 9661 exitMBB->splice(exitMBB->begin(), BB, 9662 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9663 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9664 9665 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9666 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9667 : &PPC::GPRCRegClass; 9668 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9669 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9670 unsigned ShiftReg = 9671 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9672 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 9673 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9674 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9675 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9676 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9677 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 9678 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9679 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9680 unsigned Ptr1Reg; 9681 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 9682 9683 // thisMBB: 9684 // ... 9685 // fallthrough --> loopMBB 9686 BB->addSuccessor(loopMBB); 9687 9688 // The 4-byte load must be aligned, while a char or short may be 9689 // anywhere in the word. Hence all this nasty bookkeeping code. 9690 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9691 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9692 // xori shift, shift1, 24 [16] 9693 // rlwinm ptr, ptr1, 0, 0, 29 9694 // slw incr2, incr, shift 9695 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9696 // slw mask, mask2, shift 9697 // loopMBB: 9698 // lwarx tmpDest, ptr 9699 // add tmp, tmpDest, incr2 9700 // andc tmp2, tmpDest, mask 9701 // and tmp3, tmp, mask 9702 // or tmp4, tmp3, tmp2 9703 // stwcx. tmp4, ptr 9704 // bne- loopMBB 9705 // fallthrough --> exitMBB 9706 // srw dest, tmpDest, shift 9707 if (ptrA != ZeroReg) { 9708 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9709 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9710 .addReg(ptrA).addReg(ptrB); 9711 } else { 9712 Ptr1Reg = ptrB; 9713 } 9714 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9715 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9716 if (!isLittleEndian) 9717 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9718 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9719 if (is64bit) 9720 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9721 .addReg(Ptr1Reg).addImm(0).addImm(61); 9722 else 9723 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9724 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9725 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 9726 .addReg(incr).addReg(ShiftReg); 9727 if (is8bit) 9728 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9729 else { 9730 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9731 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 9732 } 9733 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9734 .addReg(Mask2Reg).addReg(ShiftReg); 9735 9736 BB = loopMBB; 9737 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9738 .addReg(ZeroReg).addReg(PtrReg); 9739 if (BinOpcode) 9740 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 9741 .addReg(Incr2Reg).addReg(TmpDestReg); 9742 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 9743 .addReg(TmpDestReg).addReg(MaskReg); 9744 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 9745 .addReg(TmpReg).addReg(MaskReg); 9746 if (CmpOpcode) { 9747 // For unsigned comparisons, we can directly compare the shifted values. 9748 // For signed comparisons we shift and sign extend. 9749 unsigned SReg = RegInfo.createVirtualRegister(RC); 9750 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg) 9751 .addReg(TmpDestReg).addReg(MaskReg); 9752 unsigned ValueReg = SReg; 9753 unsigned CmpReg = Incr2Reg; 9754 if (CmpOpcode == PPC::CMPW) { 9755 ValueReg = RegInfo.createVirtualRegister(RC); 9756 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 9757 .addReg(SReg).addReg(ShiftReg); 9758 unsigned ValueSReg = RegInfo.createVirtualRegister(RC); 9759 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 9760 .addReg(ValueReg); 9761 ValueReg = ValueSReg; 9762 CmpReg = incr; 9763 } 9764 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9765 .addReg(CmpReg).addReg(ValueReg); 9766 BuildMI(BB, dl, TII->get(PPC::BCC)) 9767 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9768 BB->addSuccessor(loop2MBB); 9769 BB->addSuccessor(exitMBB); 9770 BB = loop2MBB; 9771 } 9772 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 9773 .addReg(Tmp3Reg).addReg(Tmp2Reg); 9774 BuildMI(BB, dl, TII->get(PPC::STWCX)) 9775 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 9776 BuildMI(BB, dl, TII->get(PPC::BCC)) 9777 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9778 BB->addSuccessor(loopMBB); 9779 BB->addSuccessor(exitMBB); 9780 9781 // exitMBB: 9782 // ... 9783 BB = exitMBB; 9784 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 9785 .addReg(ShiftReg); 9786 return BB; 9787 } 9788 9789 llvm::MachineBasicBlock * 9790 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 9791 MachineBasicBlock *MBB) const { 9792 DebugLoc DL = MI.getDebugLoc(); 9793 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9794 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 9795 9796 MachineFunction *MF = MBB->getParent(); 9797 MachineRegisterInfo &MRI = MF->getRegInfo(); 9798 9799 const BasicBlock *BB = MBB->getBasicBlock(); 9800 MachineFunction::iterator I = ++MBB->getIterator(); 9801 9802 // Memory Reference 9803 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9804 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9805 9806 unsigned DstReg = MI.getOperand(0).getReg(); 9807 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 9808 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 9809 unsigned mainDstReg = MRI.createVirtualRegister(RC); 9810 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 9811 9812 MVT PVT = getPointerTy(MF->getDataLayout()); 9813 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9814 "Invalid Pointer Size!"); 9815 // For v = setjmp(buf), we generate 9816 // 9817 // thisMBB: 9818 // SjLjSetup mainMBB 9819 // bl mainMBB 9820 // v_restore = 1 9821 // b sinkMBB 9822 // 9823 // mainMBB: 9824 // buf[LabelOffset] = LR 9825 // v_main = 0 9826 // 9827 // sinkMBB: 9828 // v = phi(main, restore) 9829 // 9830 9831 MachineBasicBlock *thisMBB = MBB; 9832 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 9833 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 9834 MF->insert(I, mainMBB); 9835 MF->insert(I, sinkMBB); 9836 9837 MachineInstrBuilder MIB; 9838 9839 // Transfer the remainder of BB and its successor edges to sinkMBB. 9840 sinkMBB->splice(sinkMBB->begin(), MBB, 9841 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 9842 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 9843 9844 // Note that the structure of the jmp_buf used here is not compatible 9845 // with that used by libc, and is not designed to be. Specifically, it 9846 // stores only those 'reserved' registers that LLVM does not otherwise 9847 // understand how to spill. Also, by convention, by the time this 9848 // intrinsic is called, Clang has already stored the frame address in the 9849 // first slot of the buffer and stack address in the third. Following the 9850 // X86 target code, we'll store the jump address in the second slot. We also 9851 // need to save the TOC pointer (R2) to handle jumps between shared 9852 // libraries, and that will be stored in the fourth slot. The thread 9853 // identifier (R13) is not affected. 9854 9855 // thisMBB: 9856 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9857 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9858 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9859 9860 // Prepare IP either in reg. 9861 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 9862 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 9863 unsigned BufReg = MI.getOperand(1).getReg(); 9864 9865 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 9866 setUsesTOCBasePtr(*MBB->getParent()); 9867 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 9868 .addReg(PPC::X2) 9869 .addImm(TOCOffset) 9870 .addReg(BufReg); 9871 MIB.setMemRefs(MMOBegin, MMOEnd); 9872 } 9873 9874 // Naked functions never have a base pointer, and so we use r1. For all 9875 // other functions, this decision must be delayed until during PEI. 9876 unsigned BaseReg; 9877 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 9878 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 9879 else 9880 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 9881 9882 MIB = BuildMI(*thisMBB, MI, DL, 9883 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 9884 .addReg(BaseReg) 9885 .addImm(BPOffset) 9886 .addReg(BufReg); 9887 MIB.setMemRefs(MMOBegin, MMOEnd); 9888 9889 // Setup 9890 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 9891 MIB.addRegMask(TRI->getNoPreservedMask()); 9892 9893 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 9894 9895 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 9896 .addMBB(mainMBB); 9897 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 9898 9899 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 9900 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 9901 9902 // mainMBB: 9903 // mainDstReg = 0 9904 MIB = 9905 BuildMI(mainMBB, DL, 9906 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 9907 9908 // Store IP 9909 if (Subtarget.isPPC64()) { 9910 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 9911 .addReg(LabelReg) 9912 .addImm(LabelOffset) 9913 .addReg(BufReg); 9914 } else { 9915 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 9916 .addReg(LabelReg) 9917 .addImm(LabelOffset) 9918 .addReg(BufReg); 9919 } 9920 9921 MIB.setMemRefs(MMOBegin, MMOEnd); 9922 9923 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 9924 mainMBB->addSuccessor(sinkMBB); 9925 9926 // sinkMBB: 9927 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 9928 TII->get(PPC::PHI), DstReg) 9929 .addReg(mainDstReg).addMBB(mainMBB) 9930 .addReg(restoreDstReg).addMBB(thisMBB); 9931 9932 MI.eraseFromParent(); 9933 return sinkMBB; 9934 } 9935 9936 MachineBasicBlock * 9937 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 9938 MachineBasicBlock *MBB) const { 9939 DebugLoc DL = MI.getDebugLoc(); 9940 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9941 9942 MachineFunction *MF = MBB->getParent(); 9943 MachineRegisterInfo &MRI = MF->getRegInfo(); 9944 9945 // Memory Reference 9946 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9947 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9948 9949 MVT PVT = getPointerTy(MF->getDataLayout()); 9950 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9951 "Invalid Pointer Size!"); 9952 9953 const TargetRegisterClass *RC = 9954 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 9955 unsigned Tmp = MRI.createVirtualRegister(RC); 9956 // Since FP is only updated here but NOT referenced, it's treated as GPR. 9957 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 9958 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 9959 unsigned BP = 9960 (PVT == MVT::i64) 9961 ? PPC::X30 9962 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 9963 : PPC::R30); 9964 9965 MachineInstrBuilder MIB; 9966 9967 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9968 const int64_t SPOffset = 2 * PVT.getStoreSize(); 9969 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9970 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9971 9972 unsigned BufReg = MI.getOperand(0).getReg(); 9973 9974 // Reload FP (the jumped-to function may not have had a 9975 // frame pointer, and if so, then its r31 will be restored 9976 // as necessary). 9977 if (PVT == MVT::i64) { 9978 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 9979 .addImm(0) 9980 .addReg(BufReg); 9981 } else { 9982 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 9983 .addImm(0) 9984 .addReg(BufReg); 9985 } 9986 MIB.setMemRefs(MMOBegin, MMOEnd); 9987 9988 // Reload IP 9989 if (PVT == MVT::i64) { 9990 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 9991 .addImm(LabelOffset) 9992 .addReg(BufReg); 9993 } else { 9994 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 9995 .addImm(LabelOffset) 9996 .addReg(BufReg); 9997 } 9998 MIB.setMemRefs(MMOBegin, MMOEnd); 9999 10000 // Reload SP 10001 if (PVT == MVT::i64) { 10002 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 10003 .addImm(SPOffset) 10004 .addReg(BufReg); 10005 } else { 10006 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 10007 .addImm(SPOffset) 10008 .addReg(BufReg); 10009 } 10010 MIB.setMemRefs(MMOBegin, MMOEnd); 10011 10012 // Reload BP 10013 if (PVT == MVT::i64) { 10014 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 10015 .addImm(BPOffset) 10016 .addReg(BufReg); 10017 } else { 10018 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 10019 .addImm(BPOffset) 10020 .addReg(BufReg); 10021 } 10022 MIB.setMemRefs(MMOBegin, MMOEnd); 10023 10024 // Reload TOC 10025 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 10026 setUsesTOCBasePtr(*MBB->getParent()); 10027 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 10028 .addImm(TOCOffset) 10029 .addReg(BufReg); 10030 10031 MIB.setMemRefs(MMOBegin, MMOEnd); 10032 } 10033 10034 // Jump 10035 BuildMI(*MBB, MI, DL, 10036 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 10037 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 10038 10039 MI.eraseFromParent(); 10040 return MBB; 10041 } 10042 10043 MachineBasicBlock * 10044 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 10045 MachineBasicBlock *BB) const { 10046 if (MI.getOpcode() == TargetOpcode::STACKMAP || 10047 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10048 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 10049 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10050 // Call lowering should have added an r2 operand to indicate a dependence 10051 // on the TOC base pointer value. It can't however, because there is no 10052 // way to mark the dependence as implicit there, and so the stackmap code 10053 // will confuse it with a regular operand. Instead, add the dependence 10054 // here. 10055 setUsesTOCBasePtr(*BB->getParent()); 10056 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 10057 } 10058 10059 return emitPatchPoint(MI, BB); 10060 } 10061 10062 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 10063 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 10064 return emitEHSjLjSetJmp(MI, BB); 10065 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 10066 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 10067 return emitEHSjLjLongJmp(MI, BB); 10068 } 10069 10070 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10071 10072 // To "insert" these instructions we actually have to insert their 10073 // control-flow patterns. 10074 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10075 MachineFunction::iterator It = ++BB->getIterator(); 10076 10077 MachineFunction *F = BB->getParent(); 10078 10079 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10080 MI.getOpcode() == PPC::SELECT_CC_I8 || 10081 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) { 10082 SmallVector<MachineOperand, 2> Cond; 10083 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10084 MI.getOpcode() == PPC::SELECT_CC_I8) 10085 Cond.push_back(MI.getOperand(4)); 10086 else 10087 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 10088 Cond.push_back(MI.getOperand(1)); 10089 10090 DebugLoc dl = MI.getDebugLoc(); 10091 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 10092 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 10093 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10094 MI.getOpcode() == PPC::SELECT_CC_I8 || 10095 MI.getOpcode() == PPC::SELECT_CC_F4 || 10096 MI.getOpcode() == PPC::SELECT_CC_F8 || 10097 MI.getOpcode() == PPC::SELECT_CC_QFRC || 10098 MI.getOpcode() == PPC::SELECT_CC_QSRC || 10099 MI.getOpcode() == PPC::SELECT_CC_QBRC || 10100 MI.getOpcode() == PPC::SELECT_CC_VRRC || 10101 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 10102 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 10103 MI.getOpcode() == PPC::SELECT_CC_VSRC || 10104 MI.getOpcode() == PPC::SELECT_I4 || 10105 MI.getOpcode() == PPC::SELECT_I8 || 10106 MI.getOpcode() == PPC::SELECT_F4 || 10107 MI.getOpcode() == PPC::SELECT_F8 || 10108 MI.getOpcode() == PPC::SELECT_QFRC || 10109 MI.getOpcode() == PPC::SELECT_QSRC || 10110 MI.getOpcode() == PPC::SELECT_QBRC || 10111 MI.getOpcode() == PPC::SELECT_VRRC || 10112 MI.getOpcode() == PPC::SELECT_VSFRC || 10113 MI.getOpcode() == PPC::SELECT_VSSRC || 10114 MI.getOpcode() == PPC::SELECT_VSRC) { 10115 // The incoming instruction knows the destination vreg to set, the 10116 // condition code register to branch on, the true/false values to 10117 // select between, and a branch opcode to use. 10118 10119 // thisMBB: 10120 // ... 10121 // TrueVal = ... 10122 // cmpTY ccX, r1, r2 10123 // bCC copy1MBB 10124 // fallthrough --> copy0MBB 10125 MachineBasicBlock *thisMBB = BB; 10126 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 10127 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10128 DebugLoc dl = MI.getDebugLoc(); 10129 F->insert(It, copy0MBB); 10130 F->insert(It, sinkMBB); 10131 10132 // Transfer the remainder of BB and its successor edges to sinkMBB. 10133 sinkMBB->splice(sinkMBB->begin(), BB, 10134 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10135 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10136 10137 // Next, add the true and fallthrough blocks as its successors. 10138 BB->addSuccessor(copy0MBB); 10139 BB->addSuccessor(sinkMBB); 10140 10141 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 10142 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 10143 MI.getOpcode() == PPC::SELECT_QFRC || 10144 MI.getOpcode() == PPC::SELECT_QSRC || 10145 MI.getOpcode() == PPC::SELECT_QBRC || 10146 MI.getOpcode() == PPC::SELECT_VRRC || 10147 MI.getOpcode() == PPC::SELECT_VSFRC || 10148 MI.getOpcode() == PPC::SELECT_VSSRC || 10149 MI.getOpcode() == PPC::SELECT_VSRC) { 10150 BuildMI(BB, dl, TII->get(PPC::BC)) 10151 .addReg(MI.getOperand(1).getReg()) 10152 .addMBB(sinkMBB); 10153 } else { 10154 unsigned SelectPred = MI.getOperand(4).getImm(); 10155 BuildMI(BB, dl, TII->get(PPC::BCC)) 10156 .addImm(SelectPred) 10157 .addReg(MI.getOperand(1).getReg()) 10158 .addMBB(sinkMBB); 10159 } 10160 10161 // copy0MBB: 10162 // %FalseValue = ... 10163 // # fallthrough to sinkMBB 10164 BB = copy0MBB; 10165 10166 // Update machine-CFG edges 10167 BB->addSuccessor(sinkMBB); 10168 10169 // sinkMBB: 10170 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 10171 // ... 10172 BB = sinkMBB; 10173 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 10174 .addReg(MI.getOperand(3).getReg()) 10175 .addMBB(copy0MBB) 10176 .addReg(MI.getOperand(2).getReg()) 10177 .addMBB(thisMBB); 10178 } else if (MI.getOpcode() == PPC::ReadTB) { 10179 // To read the 64-bit time-base register on a 32-bit target, we read the 10180 // two halves. Should the counter have wrapped while it was being read, we 10181 // need to try again. 10182 // ... 10183 // readLoop: 10184 // mfspr Rx,TBU # load from TBU 10185 // mfspr Ry,TB # load from TB 10186 // mfspr Rz,TBU # load from TBU 10187 // cmpw crX,Rx,Rz # check if 'old'='new' 10188 // bne readLoop # branch if they're not equal 10189 // ... 10190 10191 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 10192 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10193 DebugLoc dl = MI.getDebugLoc(); 10194 F->insert(It, readMBB); 10195 F->insert(It, sinkMBB); 10196 10197 // Transfer the remainder of BB and its successor edges to sinkMBB. 10198 sinkMBB->splice(sinkMBB->begin(), BB, 10199 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10200 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10201 10202 BB->addSuccessor(readMBB); 10203 BB = readMBB; 10204 10205 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10206 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10207 unsigned LoReg = MI.getOperand(0).getReg(); 10208 unsigned HiReg = MI.getOperand(1).getReg(); 10209 10210 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 10211 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 10212 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 10213 10214 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10215 10216 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 10217 .addReg(HiReg).addReg(ReadAgainReg); 10218 BuildMI(BB, dl, TII->get(PPC::BCC)) 10219 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 10220 10221 BB->addSuccessor(readMBB); 10222 BB->addSuccessor(sinkMBB); 10223 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 10224 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 10225 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 10226 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 10227 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 10228 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 10229 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 10230 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 10231 10232 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 10233 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 10234 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 10235 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 10236 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 10237 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 10238 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 10239 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 10240 10241 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 10242 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 10243 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 10244 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 10245 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 10246 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 10247 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 10248 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 10249 10250 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 10251 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 10252 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 10253 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 10254 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 10255 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 10256 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 10257 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 10258 10259 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 10260 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 10261 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 10262 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 10263 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 10264 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 10265 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 10266 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 10267 10268 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 10269 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 10270 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 10271 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 10272 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 10273 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 10274 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 10275 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 10276 10277 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 10278 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 10279 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 10280 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 10281 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 10282 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 10283 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 10284 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 10285 10286 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 10287 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 10288 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 10289 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 10290 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 10291 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 10292 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 10293 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 10294 10295 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 10296 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 10297 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 10298 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 10299 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 10300 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 10301 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 10302 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 10303 10304 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 10305 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 10306 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 10307 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 10308 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 10309 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 10310 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 10311 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 10312 10313 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 10314 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 10315 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 10316 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 10317 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 10318 BB = EmitAtomicBinary(MI, BB, 4, 0); 10319 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 10320 BB = EmitAtomicBinary(MI, BB, 8, 0); 10321 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 10322 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 10323 (Subtarget.hasPartwordAtomics() && 10324 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 10325 (Subtarget.hasPartwordAtomics() && 10326 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 10327 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 10328 10329 auto LoadMnemonic = PPC::LDARX; 10330 auto StoreMnemonic = PPC::STDCX; 10331 switch (MI.getOpcode()) { 10332 default: 10333 llvm_unreachable("Compare and swap of unknown size"); 10334 case PPC::ATOMIC_CMP_SWAP_I8: 10335 LoadMnemonic = PPC::LBARX; 10336 StoreMnemonic = PPC::STBCX; 10337 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10338 break; 10339 case PPC::ATOMIC_CMP_SWAP_I16: 10340 LoadMnemonic = PPC::LHARX; 10341 StoreMnemonic = PPC::STHCX; 10342 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10343 break; 10344 case PPC::ATOMIC_CMP_SWAP_I32: 10345 LoadMnemonic = PPC::LWARX; 10346 StoreMnemonic = PPC::STWCX; 10347 break; 10348 case PPC::ATOMIC_CMP_SWAP_I64: 10349 LoadMnemonic = PPC::LDARX; 10350 StoreMnemonic = PPC::STDCX; 10351 break; 10352 } 10353 unsigned dest = MI.getOperand(0).getReg(); 10354 unsigned ptrA = MI.getOperand(1).getReg(); 10355 unsigned ptrB = MI.getOperand(2).getReg(); 10356 unsigned oldval = MI.getOperand(3).getReg(); 10357 unsigned newval = MI.getOperand(4).getReg(); 10358 DebugLoc dl = MI.getDebugLoc(); 10359 10360 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10361 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10362 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10363 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10364 F->insert(It, loop1MBB); 10365 F->insert(It, loop2MBB); 10366 F->insert(It, midMBB); 10367 F->insert(It, exitMBB); 10368 exitMBB->splice(exitMBB->begin(), BB, 10369 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10370 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10371 10372 // thisMBB: 10373 // ... 10374 // fallthrough --> loopMBB 10375 BB->addSuccessor(loop1MBB); 10376 10377 // loop1MBB: 10378 // l[bhwd]arx dest, ptr 10379 // cmp[wd] dest, oldval 10380 // bne- midMBB 10381 // loop2MBB: 10382 // st[bhwd]cx. newval, ptr 10383 // bne- loopMBB 10384 // b exitBB 10385 // midMBB: 10386 // st[bhwd]cx. dest, ptr 10387 // exitBB: 10388 BB = loop1MBB; 10389 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10390 .addReg(ptrA).addReg(ptrB); 10391 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 10392 .addReg(oldval).addReg(dest); 10393 BuildMI(BB, dl, TII->get(PPC::BCC)) 10394 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10395 BB->addSuccessor(loop2MBB); 10396 BB->addSuccessor(midMBB); 10397 10398 BB = loop2MBB; 10399 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10400 .addReg(newval).addReg(ptrA).addReg(ptrB); 10401 BuildMI(BB, dl, TII->get(PPC::BCC)) 10402 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10403 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10404 BB->addSuccessor(loop1MBB); 10405 BB->addSuccessor(exitMBB); 10406 10407 BB = midMBB; 10408 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10409 .addReg(dest).addReg(ptrA).addReg(ptrB); 10410 BB->addSuccessor(exitMBB); 10411 10412 // exitMBB: 10413 // ... 10414 BB = exitMBB; 10415 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 10416 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 10417 // We must use 64-bit registers for addresses when targeting 64-bit, 10418 // since we're actually doing arithmetic on them. Other registers 10419 // can be 32-bit. 10420 bool is64bit = Subtarget.isPPC64(); 10421 bool isLittleEndian = Subtarget.isLittleEndian(); 10422 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 10423 10424 unsigned dest = MI.getOperand(0).getReg(); 10425 unsigned ptrA = MI.getOperand(1).getReg(); 10426 unsigned ptrB = MI.getOperand(2).getReg(); 10427 unsigned oldval = MI.getOperand(3).getReg(); 10428 unsigned newval = MI.getOperand(4).getReg(); 10429 DebugLoc dl = MI.getDebugLoc(); 10430 10431 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10432 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10433 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10434 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10435 F->insert(It, loop1MBB); 10436 F->insert(It, loop2MBB); 10437 F->insert(It, midMBB); 10438 F->insert(It, exitMBB); 10439 exitMBB->splice(exitMBB->begin(), BB, 10440 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10441 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10442 10443 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10444 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 10445 : &PPC::GPRCRegClass; 10446 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 10447 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 10448 unsigned ShiftReg = 10449 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 10450 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 10451 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 10452 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 10453 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 10454 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 10455 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 10456 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 10457 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 10458 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 10459 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 10460 unsigned Ptr1Reg; 10461 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 10462 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10463 // thisMBB: 10464 // ... 10465 // fallthrough --> loopMBB 10466 BB->addSuccessor(loop1MBB); 10467 10468 // The 4-byte load must be aligned, while a char or short may be 10469 // anywhere in the word. Hence all this nasty bookkeeping code. 10470 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10471 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10472 // xori shift, shift1, 24 [16] 10473 // rlwinm ptr, ptr1, 0, 0, 29 10474 // slw newval2, newval, shift 10475 // slw oldval2, oldval,shift 10476 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10477 // slw mask, mask2, shift 10478 // and newval3, newval2, mask 10479 // and oldval3, oldval2, mask 10480 // loop1MBB: 10481 // lwarx tmpDest, ptr 10482 // and tmp, tmpDest, mask 10483 // cmpw tmp, oldval3 10484 // bne- midMBB 10485 // loop2MBB: 10486 // andc tmp2, tmpDest, mask 10487 // or tmp4, tmp2, newval3 10488 // stwcx. tmp4, ptr 10489 // bne- loop1MBB 10490 // b exitBB 10491 // midMBB: 10492 // stwcx. tmpDest, ptr 10493 // exitBB: 10494 // srw dest, tmpDest, shift 10495 if (ptrA != ZeroReg) { 10496 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10497 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10498 .addReg(ptrA).addReg(ptrB); 10499 } else { 10500 Ptr1Reg = ptrB; 10501 } 10502 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 10503 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 10504 if (!isLittleEndian) 10505 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 10506 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 10507 if (is64bit) 10508 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10509 .addReg(Ptr1Reg).addImm(0).addImm(61); 10510 else 10511 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10512 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 10513 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 10514 .addReg(newval).addReg(ShiftReg); 10515 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 10516 .addReg(oldval).addReg(ShiftReg); 10517 if (is8bit) 10518 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10519 else { 10520 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10521 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10522 .addReg(Mask3Reg).addImm(65535); 10523 } 10524 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10525 .addReg(Mask2Reg).addReg(ShiftReg); 10526 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 10527 .addReg(NewVal2Reg).addReg(MaskReg); 10528 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 10529 .addReg(OldVal2Reg).addReg(MaskReg); 10530 10531 BB = loop1MBB; 10532 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10533 .addReg(ZeroReg).addReg(PtrReg); 10534 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 10535 .addReg(TmpDestReg).addReg(MaskReg); 10536 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 10537 .addReg(TmpReg).addReg(OldVal3Reg); 10538 BuildMI(BB, dl, TII->get(PPC::BCC)) 10539 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10540 BB->addSuccessor(loop2MBB); 10541 BB->addSuccessor(midMBB); 10542 10543 BB = loop2MBB; 10544 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 10545 .addReg(TmpDestReg).addReg(MaskReg); 10546 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 10547 .addReg(Tmp2Reg).addReg(NewVal3Reg); 10548 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 10549 .addReg(ZeroReg).addReg(PtrReg); 10550 BuildMI(BB, dl, TII->get(PPC::BCC)) 10551 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10552 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10553 BB->addSuccessor(loop1MBB); 10554 BB->addSuccessor(exitMBB); 10555 10556 BB = midMBB; 10557 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 10558 .addReg(ZeroReg).addReg(PtrReg); 10559 BB->addSuccessor(exitMBB); 10560 10561 // exitMBB: 10562 // ... 10563 BB = exitMBB; 10564 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 10565 .addReg(ShiftReg); 10566 } else if (MI.getOpcode() == PPC::FADDrtz) { 10567 // This pseudo performs an FADD with rounding mode temporarily forced 10568 // to round-to-zero. We emit this via custom inserter since the FPSCR 10569 // is not modeled at the SelectionDAG level. 10570 unsigned Dest = MI.getOperand(0).getReg(); 10571 unsigned Src1 = MI.getOperand(1).getReg(); 10572 unsigned Src2 = MI.getOperand(2).getReg(); 10573 DebugLoc dl = MI.getDebugLoc(); 10574 10575 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10576 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 10577 10578 // Save FPSCR value. 10579 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 10580 10581 // Set rounding mode to round-to-zero. 10582 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 10583 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 10584 10585 // Perform addition. 10586 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 10587 10588 // Restore FPSCR value. 10589 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 10590 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10591 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 10592 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10593 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 10594 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10595 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 10596 ? PPC::ANDIo8 10597 : PPC::ANDIo; 10598 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10599 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 10600 10601 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10602 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 10603 &PPC::GPRCRegClass : 10604 &PPC::G8RCRegClass); 10605 10606 DebugLoc dl = MI.getDebugLoc(); 10607 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 10608 .addReg(MI.getOperand(1).getReg()) 10609 .addImm(1); 10610 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 10611 MI.getOperand(0).getReg()) 10612 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 10613 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 10614 DebugLoc Dl = MI.getDebugLoc(); 10615 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10616 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10617 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 10618 return BB; 10619 } else { 10620 llvm_unreachable("Unexpected instr type to insert"); 10621 } 10622 10623 MI.eraseFromParent(); // The pseudo instruction is gone now. 10624 return BB; 10625 } 10626 10627 //===----------------------------------------------------------------------===// 10628 // Target Optimization Hooks 10629 //===----------------------------------------------------------------------===// 10630 10631 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 10632 // For the estimates, convergence is quadratic, so we essentially double the 10633 // number of digits correct after every iteration. For both FRE and FRSQRTE, 10634 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 10635 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 10636 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 10637 if (VT.getScalarType() == MVT::f64) 10638 RefinementSteps++; 10639 return RefinementSteps; 10640 } 10641 10642 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 10643 int Enabled, int &RefinementSteps, 10644 bool &UseOneConstNR, 10645 bool Reciprocal) const { 10646 EVT VT = Operand.getValueType(); 10647 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 10648 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 10649 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10650 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10651 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10652 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10653 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10654 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10655 10656 UseOneConstNR = true; 10657 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 10658 } 10659 return SDValue(); 10660 } 10661 10662 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 10663 int Enabled, 10664 int &RefinementSteps) const { 10665 EVT VT = Operand.getValueType(); 10666 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 10667 (VT == MVT::f64 && Subtarget.hasFRE()) || 10668 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10669 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10670 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10671 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10672 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10673 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10674 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 10675 } 10676 return SDValue(); 10677 } 10678 10679 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 10680 // Note: This functionality is used only when unsafe-fp-math is enabled, and 10681 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 10682 // enabled for division), this functionality is redundant with the default 10683 // combiner logic (once the division -> reciprocal/multiply transformation 10684 // has taken place). As a result, this matters more for older cores than for 10685 // newer ones. 10686 10687 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 10688 // reciprocal if there are two or more FDIVs (for embedded cores with only 10689 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 10690 switch (Subtarget.getDarwinDirective()) { 10691 default: 10692 return 3; 10693 case PPC::DIR_440: 10694 case PPC::DIR_A2: 10695 case PPC::DIR_E500mc: 10696 case PPC::DIR_E5500: 10697 return 2; 10698 } 10699 } 10700 10701 // isConsecutiveLSLoc needs to work even if all adds have not yet been 10702 // collapsed, and so we need to look through chains of them. 10703 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 10704 int64_t& Offset, SelectionDAG &DAG) { 10705 if (DAG.isBaseWithConstantOffset(Loc)) { 10706 Base = Loc.getOperand(0); 10707 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 10708 10709 // The base might itself be a base plus an offset, and if so, accumulate 10710 // that as well. 10711 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 10712 } 10713 } 10714 10715 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 10716 unsigned Bytes, int Dist, 10717 SelectionDAG &DAG) { 10718 if (VT.getSizeInBits() / 8 != Bytes) 10719 return false; 10720 10721 SDValue BaseLoc = Base->getBasePtr(); 10722 if (Loc.getOpcode() == ISD::FrameIndex) { 10723 if (BaseLoc.getOpcode() != ISD::FrameIndex) 10724 return false; 10725 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10726 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 10727 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 10728 int FS = MFI.getObjectSize(FI); 10729 int BFS = MFI.getObjectSize(BFI); 10730 if (FS != BFS || FS != (int)Bytes) return false; 10731 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 10732 } 10733 10734 SDValue Base1 = Loc, Base2 = BaseLoc; 10735 int64_t Offset1 = 0, Offset2 = 0; 10736 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 10737 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 10738 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 10739 return true; 10740 10741 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10742 const GlobalValue *GV1 = nullptr; 10743 const GlobalValue *GV2 = nullptr; 10744 Offset1 = 0; 10745 Offset2 = 0; 10746 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 10747 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 10748 if (isGA1 && isGA2 && GV1 == GV2) 10749 return Offset1 == (Offset2 + Dist*Bytes); 10750 return false; 10751 } 10752 10753 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 10754 // not enforce equality of the chain operands. 10755 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 10756 unsigned Bytes, int Dist, 10757 SelectionDAG &DAG) { 10758 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 10759 EVT VT = LS->getMemoryVT(); 10760 SDValue Loc = LS->getBasePtr(); 10761 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 10762 } 10763 10764 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 10765 EVT VT; 10766 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10767 default: return false; 10768 case Intrinsic::ppc_qpx_qvlfd: 10769 case Intrinsic::ppc_qpx_qvlfda: 10770 VT = MVT::v4f64; 10771 break; 10772 case Intrinsic::ppc_qpx_qvlfs: 10773 case Intrinsic::ppc_qpx_qvlfsa: 10774 VT = MVT::v4f32; 10775 break; 10776 case Intrinsic::ppc_qpx_qvlfcd: 10777 case Intrinsic::ppc_qpx_qvlfcda: 10778 VT = MVT::v2f64; 10779 break; 10780 case Intrinsic::ppc_qpx_qvlfcs: 10781 case Intrinsic::ppc_qpx_qvlfcsa: 10782 VT = MVT::v2f32; 10783 break; 10784 case Intrinsic::ppc_qpx_qvlfiwa: 10785 case Intrinsic::ppc_qpx_qvlfiwz: 10786 case Intrinsic::ppc_altivec_lvx: 10787 case Intrinsic::ppc_altivec_lvxl: 10788 case Intrinsic::ppc_vsx_lxvw4x: 10789 case Intrinsic::ppc_vsx_lxvw4x_be: 10790 VT = MVT::v4i32; 10791 break; 10792 case Intrinsic::ppc_vsx_lxvd2x: 10793 case Intrinsic::ppc_vsx_lxvd2x_be: 10794 VT = MVT::v2f64; 10795 break; 10796 case Intrinsic::ppc_altivec_lvebx: 10797 VT = MVT::i8; 10798 break; 10799 case Intrinsic::ppc_altivec_lvehx: 10800 VT = MVT::i16; 10801 break; 10802 case Intrinsic::ppc_altivec_lvewx: 10803 VT = MVT::i32; 10804 break; 10805 } 10806 10807 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 10808 } 10809 10810 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 10811 EVT VT; 10812 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10813 default: return false; 10814 case Intrinsic::ppc_qpx_qvstfd: 10815 case Intrinsic::ppc_qpx_qvstfda: 10816 VT = MVT::v4f64; 10817 break; 10818 case Intrinsic::ppc_qpx_qvstfs: 10819 case Intrinsic::ppc_qpx_qvstfsa: 10820 VT = MVT::v4f32; 10821 break; 10822 case Intrinsic::ppc_qpx_qvstfcd: 10823 case Intrinsic::ppc_qpx_qvstfcda: 10824 VT = MVT::v2f64; 10825 break; 10826 case Intrinsic::ppc_qpx_qvstfcs: 10827 case Intrinsic::ppc_qpx_qvstfcsa: 10828 VT = MVT::v2f32; 10829 break; 10830 case Intrinsic::ppc_qpx_qvstfiw: 10831 case Intrinsic::ppc_qpx_qvstfiwa: 10832 case Intrinsic::ppc_altivec_stvx: 10833 case Intrinsic::ppc_altivec_stvxl: 10834 case Intrinsic::ppc_vsx_stxvw4x: 10835 VT = MVT::v4i32; 10836 break; 10837 case Intrinsic::ppc_vsx_stxvd2x: 10838 VT = MVT::v2f64; 10839 break; 10840 case Intrinsic::ppc_vsx_stxvw4x_be: 10841 VT = MVT::v4i32; 10842 break; 10843 case Intrinsic::ppc_vsx_stxvd2x_be: 10844 VT = MVT::v2f64; 10845 break; 10846 case Intrinsic::ppc_altivec_stvebx: 10847 VT = MVT::i8; 10848 break; 10849 case Intrinsic::ppc_altivec_stvehx: 10850 VT = MVT::i16; 10851 break; 10852 case Intrinsic::ppc_altivec_stvewx: 10853 VT = MVT::i32; 10854 break; 10855 } 10856 10857 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 10858 } 10859 10860 return false; 10861 } 10862 10863 // Return true is there is a nearyby consecutive load to the one provided 10864 // (regardless of alignment). We search up and down the chain, looking though 10865 // token factors and other loads (but nothing else). As a result, a true result 10866 // indicates that it is safe to create a new consecutive load adjacent to the 10867 // load provided. 10868 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 10869 SDValue Chain = LD->getChain(); 10870 EVT VT = LD->getMemoryVT(); 10871 10872 SmallSet<SDNode *, 16> LoadRoots; 10873 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 10874 SmallSet<SDNode *, 16> Visited; 10875 10876 // First, search up the chain, branching to follow all token-factor operands. 10877 // If we find a consecutive load, then we're done, otherwise, record all 10878 // nodes just above the top-level loads and token factors. 10879 while (!Queue.empty()) { 10880 SDNode *ChainNext = Queue.pop_back_val(); 10881 if (!Visited.insert(ChainNext).second) 10882 continue; 10883 10884 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 10885 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10886 return true; 10887 10888 if (!Visited.count(ChainLD->getChain().getNode())) 10889 Queue.push_back(ChainLD->getChain().getNode()); 10890 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 10891 for (const SDUse &O : ChainNext->ops()) 10892 if (!Visited.count(O.getNode())) 10893 Queue.push_back(O.getNode()); 10894 } else 10895 LoadRoots.insert(ChainNext); 10896 } 10897 10898 // Second, search down the chain, starting from the top-level nodes recorded 10899 // in the first phase. These top-level nodes are the nodes just above all 10900 // loads and token factors. Starting with their uses, recursively look though 10901 // all loads (just the chain uses) and token factors to find a consecutive 10902 // load. 10903 Visited.clear(); 10904 Queue.clear(); 10905 10906 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 10907 IE = LoadRoots.end(); I != IE; ++I) { 10908 Queue.push_back(*I); 10909 10910 while (!Queue.empty()) { 10911 SDNode *LoadRoot = Queue.pop_back_val(); 10912 if (!Visited.insert(LoadRoot).second) 10913 continue; 10914 10915 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 10916 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10917 return true; 10918 10919 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 10920 UE = LoadRoot->use_end(); UI != UE; ++UI) 10921 if (((isa<MemSDNode>(*UI) && 10922 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 10923 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 10924 Queue.push_back(*UI); 10925 } 10926 } 10927 10928 return false; 10929 } 10930 10931 /// This function is called when we have proved that a SETCC node can be replaced 10932 /// by subtraction (and other supporting instructions) so that the result of 10933 /// comparison is kept in a GPR instead of CR. This function is purely for 10934 /// codegen purposes and has some flags to guide the codegen process. 10935 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 10936 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 10937 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10938 10939 // Zero extend the operands to the largest legal integer. Originally, they 10940 // must be of a strictly smaller size. 10941 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 10942 DAG.getConstant(Size, DL, MVT::i32)); 10943 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 10944 DAG.getConstant(Size, DL, MVT::i32)); 10945 10946 // Swap if needed. Depends on the condition code. 10947 if (Swap) 10948 std::swap(Op0, Op1); 10949 10950 // Subtract extended integers. 10951 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 10952 10953 // Move the sign bit to the least significant position and zero out the rest. 10954 // Now the least significant bit carries the result of original comparison. 10955 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 10956 DAG.getConstant(Size - 1, DL, MVT::i32)); 10957 auto Final = Shifted; 10958 10959 // Complement the result if needed. Based on the condition code. 10960 if (Complement) 10961 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 10962 DAG.getConstant(1, DL, MVT::i64)); 10963 10964 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 10965 } 10966 10967 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 10968 DAGCombinerInfo &DCI) const { 10969 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10970 10971 SelectionDAG &DAG = DCI.DAG; 10972 SDLoc DL(N); 10973 10974 // Size of integers being compared has a critical role in the following 10975 // analysis, so we prefer to do this when all types are legal. 10976 if (!DCI.isAfterLegalizeVectorOps()) 10977 return SDValue(); 10978 10979 // If all users of SETCC extend its value to a legal integer type 10980 // then we replace SETCC with a subtraction 10981 for (SDNode::use_iterator UI = N->use_begin(), 10982 UE = N->use_end(); UI != UE; ++UI) { 10983 if (UI->getOpcode() != ISD::ZERO_EXTEND) 10984 return SDValue(); 10985 } 10986 10987 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 10988 auto OpSize = N->getOperand(0).getValueSizeInBits(); 10989 10990 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 10991 10992 if (OpSize < Size) { 10993 switch (CC) { 10994 default: break; 10995 case ISD::SETULT: 10996 return generateEquivalentSub(N, Size, false, false, DL, DAG); 10997 case ISD::SETULE: 10998 return generateEquivalentSub(N, Size, true, true, DL, DAG); 10999 case ISD::SETUGT: 11000 return generateEquivalentSub(N, Size, false, true, DL, DAG); 11001 case ISD::SETUGE: 11002 return generateEquivalentSub(N, Size, true, false, DL, DAG); 11003 } 11004 } 11005 11006 return SDValue(); 11007 } 11008 11009 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 11010 DAGCombinerInfo &DCI) const { 11011 SelectionDAG &DAG = DCI.DAG; 11012 SDLoc dl(N); 11013 11014 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 11015 // If we're tracking CR bits, we need to be careful that we don't have: 11016 // trunc(binary-ops(zext(x), zext(y))) 11017 // or 11018 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 11019 // such that we're unnecessarily moving things into GPRs when it would be 11020 // better to keep them in CR bits. 11021 11022 // Note that trunc here can be an actual i1 trunc, or can be the effective 11023 // truncation that comes from a setcc or select_cc. 11024 if (N->getOpcode() == ISD::TRUNCATE && 11025 N->getValueType(0) != MVT::i1) 11026 return SDValue(); 11027 11028 if (N->getOperand(0).getValueType() != MVT::i32 && 11029 N->getOperand(0).getValueType() != MVT::i64) 11030 return SDValue(); 11031 11032 if (N->getOpcode() == ISD::SETCC || 11033 N->getOpcode() == ISD::SELECT_CC) { 11034 // If we're looking at a comparison, then we need to make sure that the 11035 // high bits (all except for the first) don't matter the result. 11036 ISD::CondCode CC = 11037 cast<CondCodeSDNode>(N->getOperand( 11038 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 11039 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 11040 11041 if (ISD::isSignedIntSetCC(CC)) { 11042 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 11043 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 11044 return SDValue(); 11045 } else if (ISD::isUnsignedIntSetCC(CC)) { 11046 if (!DAG.MaskedValueIsZero(N->getOperand(0), 11047 APInt::getHighBitsSet(OpBits, OpBits-1)) || 11048 !DAG.MaskedValueIsZero(N->getOperand(1), 11049 APInt::getHighBitsSet(OpBits, OpBits-1))) 11050 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 11051 : SDValue()); 11052 } else { 11053 // This is neither a signed nor an unsigned comparison, just make sure 11054 // that the high bits are equal. 11055 KnownBits Op1Known, Op2Known; 11056 DAG.computeKnownBits(N->getOperand(0), Op1Known); 11057 DAG.computeKnownBits(N->getOperand(1), Op2Known); 11058 11059 // We don't really care about what is known about the first bit (if 11060 // anything), so clear it in all masks prior to comparing them. 11061 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 11062 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 11063 11064 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 11065 return SDValue(); 11066 } 11067 } 11068 11069 // We now know that the higher-order bits are irrelevant, we just need to 11070 // make sure that all of the intermediate operations are bit operations, and 11071 // all inputs are extensions. 11072 if (N->getOperand(0).getOpcode() != ISD::AND && 11073 N->getOperand(0).getOpcode() != ISD::OR && 11074 N->getOperand(0).getOpcode() != ISD::XOR && 11075 N->getOperand(0).getOpcode() != ISD::SELECT && 11076 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 11077 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 11078 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 11079 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 11080 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 11081 return SDValue(); 11082 11083 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 11084 N->getOperand(1).getOpcode() != ISD::AND && 11085 N->getOperand(1).getOpcode() != ISD::OR && 11086 N->getOperand(1).getOpcode() != ISD::XOR && 11087 N->getOperand(1).getOpcode() != ISD::SELECT && 11088 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 11089 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 11090 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 11091 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 11092 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 11093 return SDValue(); 11094 11095 SmallVector<SDValue, 4> Inputs; 11096 SmallVector<SDValue, 8> BinOps, PromOps; 11097 SmallPtrSet<SDNode *, 16> Visited; 11098 11099 for (unsigned i = 0; i < 2; ++i) { 11100 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11101 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11102 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11103 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11104 isa<ConstantSDNode>(N->getOperand(i))) 11105 Inputs.push_back(N->getOperand(i)); 11106 else 11107 BinOps.push_back(N->getOperand(i)); 11108 11109 if (N->getOpcode() == ISD::TRUNCATE) 11110 break; 11111 } 11112 11113 // Visit all inputs, collect all binary operations (and, or, xor and 11114 // select) that are all fed by extensions. 11115 while (!BinOps.empty()) { 11116 SDValue BinOp = BinOps.back(); 11117 BinOps.pop_back(); 11118 11119 if (!Visited.insert(BinOp.getNode()).second) 11120 continue; 11121 11122 PromOps.push_back(BinOp); 11123 11124 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11125 // The condition of the select is not promoted. 11126 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11127 continue; 11128 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11129 continue; 11130 11131 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11132 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11133 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11134 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11135 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11136 Inputs.push_back(BinOp.getOperand(i)); 11137 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11138 BinOp.getOperand(i).getOpcode() == ISD::OR || 11139 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11140 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11141 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 11142 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11143 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11144 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11145 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 11146 BinOps.push_back(BinOp.getOperand(i)); 11147 } else { 11148 // We have an input that is not an extension or another binary 11149 // operation; we'll abort this transformation. 11150 return SDValue(); 11151 } 11152 } 11153 } 11154 11155 // Make sure that this is a self-contained cluster of operations (which 11156 // is not quite the same thing as saying that everything has only one 11157 // use). 11158 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11159 if (isa<ConstantSDNode>(Inputs[i])) 11160 continue; 11161 11162 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11163 UE = Inputs[i].getNode()->use_end(); 11164 UI != UE; ++UI) { 11165 SDNode *User = *UI; 11166 if (User != N && !Visited.count(User)) 11167 return SDValue(); 11168 11169 // Make sure that we're not going to promote the non-output-value 11170 // operand(s) or SELECT or SELECT_CC. 11171 // FIXME: Although we could sometimes handle this, and it does occur in 11172 // practice that one of the condition inputs to the select is also one of 11173 // the outputs, we currently can't deal with this. 11174 if (User->getOpcode() == ISD::SELECT) { 11175 if (User->getOperand(0) == Inputs[i]) 11176 return SDValue(); 11177 } else if (User->getOpcode() == ISD::SELECT_CC) { 11178 if (User->getOperand(0) == Inputs[i] || 11179 User->getOperand(1) == Inputs[i]) 11180 return SDValue(); 11181 } 11182 } 11183 } 11184 11185 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11186 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11187 UE = PromOps[i].getNode()->use_end(); 11188 UI != UE; ++UI) { 11189 SDNode *User = *UI; 11190 if (User != N && !Visited.count(User)) 11191 return SDValue(); 11192 11193 // Make sure that we're not going to promote the non-output-value 11194 // operand(s) or SELECT or SELECT_CC. 11195 // FIXME: Although we could sometimes handle this, and it does occur in 11196 // practice that one of the condition inputs to the select is also one of 11197 // the outputs, we currently can't deal with this. 11198 if (User->getOpcode() == ISD::SELECT) { 11199 if (User->getOperand(0) == PromOps[i]) 11200 return SDValue(); 11201 } else if (User->getOpcode() == ISD::SELECT_CC) { 11202 if (User->getOperand(0) == PromOps[i] || 11203 User->getOperand(1) == PromOps[i]) 11204 return SDValue(); 11205 } 11206 } 11207 } 11208 11209 // Replace all inputs with the extension operand. 11210 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11211 // Constants may have users outside the cluster of to-be-promoted nodes, 11212 // and so we need to replace those as we do the promotions. 11213 if (isa<ConstantSDNode>(Inputs[i])) 11214 continue; 11215 else 11216 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 11217 } 11218 11219 std::list<HandleSDNode> PromOpHandles; 11220 for (auto &PromOp : PromOps) 11221 PromOpHandles.emplace_back(PromOp); 11222 11223 // Replace all operations (these are all the same, but have a different 11224 // (i1) return type). DAG.getNode will validate that the types of 11225 // a binary operator match, so go through the list in reverse so that 11226 // we've likely promoted both operands first. Any intermediate truncations or 11227 // extensions disappear. 11228 while (!PromOpHandles.empty()) { 11229 SDValue PromOp = PromOpHandles.back().getValue(); 11230 PromOpHandles.pop_back(); 11231 11232 if (PromOp.getOpcode() == ISD::TRUNCATE || 11233 PromOp.getOpcode() == ISD::SIGN_EXTEND || 11234 PromOp.getOpcode() == ISD::ZERO_EXTEND || 11235 PromOp.getOpcode() == ISD::ANY_EXTEND) { 11236 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 11237 PromOp.getOperand(0).getValueType() != MVT::i1) { 11238 // The operand is not yet ready (see comment below). 11239 PromOpHandles.emplace_front(PromOp); 11240 continue; 11241 } 11242 11243 SDValue RepValue = PromOp.getOperand(0); 11244 if (isa<ConstantSDNode>(RepValue)) 11245 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 11246 11247 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 11248 continue; 11249 } 11250 11251 unsigned C; 11252 switch (PromOp.getOpcode()) { 11253 default: C = 0; break; 11254 case ISD::SELECT: C = 1; break; 11255 case ISD::SELECT_CC: C = 2; break; 11256 } 11257 11258 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11259 PromOp.getOperand(C).getValueType() != MVT::i1) || 11260 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11261 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 11262 // The to-be-promoted operands of this node have not yet been 11263 // promoted (this should be rare because we're going through the 11264 // list backward, but if one of the operands has several users in 11265 // this cluster of to-be-promoted nodes, it is possible). 11266 PromOpHandles.emplace_front(PromOp); 11267 continue; 11268 } 11269 11270 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11271 PromOp.getNode()->op_end()); 11272 11273 // If there are any constant inputs, make sure they're replaced now. 11274 for (unsigned i = 0; i < 2; ++i) 11275 if (isa<ConstantSDNode>(Ops[C+i])) 11276 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 11277 11278 DAG.ReplaceAllUsesOfValueWith(PromOp, 11279 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 11280 } 11281 11282 // Now we're left with the initial truncation itself. 11283 if (N->getOpcode() == ISD::TRUNCATE) 11284 return N->getOperand(0); 11285 11286 // Otherwise, this is a comparison. The operands to be compared have just 11287 // changed type (to i1), but everything else is the same. 11288 return SDValue(N, 0); 11289 } 11290 11291 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 11292 DAGCombinerInfo &DCI) const { 11293 SelectionDAG &DAG = DCI.DAG; 11294 SDLoc dl(N); 11295 11296 // If we're tracking CR bits, we need to be careful that we don't have: 11297 // zext(binary-ops(trunc(x), trunc(y))) 11298 // or 11299 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 11300 // such that we're unnecessarily moving things into CR bits that can more 11301 // efficiently stay in GPRs. Note that if we're not certain that the high 11302 // bits are set as required by the final extension, we still may need to do 11303 // some masking to get the proper behavior. 11304 11305 // This same functionality is important on PPC64 when dealing with 11306 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 11307 // the return values of functions. Because it is so similar, it is handled 11308 // here as well. 11309 11310 if (N->getValueType(0) != MVT::i32 && 11311 N->getValueType(0) != MVT::i64) 11312 return SDValue(); 11313 11314 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 11315 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 11316 return SDValue(); 11317 11318 if (N->getOperand(0).getOpcode() != ISD::AND && 11319 N->getOperand(0).getOpcode() != ISD::OR && 11320 N->getOperand(0).getOpcode() != ISD::XOR && 11321 N->getOperand(0).getOpcode() != ISD::SELECT && 11322 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 11323 return SDValue(); 11324 11325 SmallVector<SDValue, 4> Inputs; 11326 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 11327 SmallPtrSet<SDNode *, 16> Visited; 11328 11329 // Visit all inputs, collect all binary operations (and, or, xor and 11330 // select) that are all fed by truncations. 11331 while (!BinOps.empty()) { 11332 SDValue BinOp = BinOps.back(); 11333 BinOps.pop_back(); 11334 11335 if (!Visited.insert(BinOp.getNode()).second) 11336 continue; 11337 11338 PromOps.push_back(BinOp); 11339 11340 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11341 // The condition of the select is not promoted. 11342 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11343 continue; 11344 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11345 continue; 11346 11347 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11348 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11349 Inputs.push_back(BinOp.getOperand(i)); 11350 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11351 BinOp.getOperand(i).getOpcode() == ISD::OR || 11352 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11353 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11354 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 11355 BinOps.push_back(BinOp.getOperand(i)); 11356 } else { 11357 // We have an input that is not a truncation or another binary 11358 // operation; we'll abort this transformation. 11359 return SDValue(); 11360 } 11361 } 11362 } 11363 11364 // The operands of a select that must be truncated when the select is 11365 // promoted because the operand is actually part of the to-be-promoted set. 11366 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 11367 11368 // Make sure that this is a self-contained cluster of operations (which 11369 // is not quite the same thing as saying that everything has only one 11370 // use). 11371 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11372 if (isa<ConstantSDNode>(Inputs[i])) 11373 continue; 11374 11375 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11376 UE = Inputs[i].getNode()->use_end(); 11377 UI != UE; ++UI) { 11378 SDNode *User = *UI; 11379 if (User != N && !Visited.count(User)) 11380 return SDValue(); 11381 11382 // If we're going to promote the non-output-value operand(s) or SELECT or 11383 // SELECT_CC, record them for truncation. 11384 if (User->getOpcode() == ISD::SELECT) { 11385 if (User->getOperand(0) == Inputs[i]) 11386 SelectTruncOp[0].insert(std::make_pair(User, 11387 User->getOperand(0).getValueType())); 11388 } else if (User->getOpcode() == ISD::SELECT_CC) { 11389 if (User->getOperand(0) == Inputs[i]) 11390 SelectTruncOp[0].insert(std::make_pair(User, 11391 User->getOperand(0).getValueType())); 11392 if (User->getOperand(1) == Inputs[i]) 11393 SelectTruncOp[1].insert(std::make_pair(User, 11394 User->getOperand(1).getValueType())); 11395 } 11396 } 11397 } 11398 11399 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11400 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11401 UE = PromOps[i].getNode()->use_end(); 11402 UI != UE; ++UI) { 11403 SDNode *User = *UI; 11404 if (User != N && !Visited.count(User)) 11405 return SDValue(); 11406 11407 // If we're going to promote the non-output-value operand(s) or SELECT or 11408 // SELECT_CC, record them for truncation. 11409 if (User->getOpcode() == ISD::SELECT) { 11410 if (User->getOperand(0) == PromOps[i]) 11411 SelectTruncOp[0].insert(std::make_pair(User, 11412 User->getOperand(0).getValueType())); 11413 } else if (User->getOpcode() == ISD::SELECT_CC) { 11414 if (User->getOperand(0) == PromOps[i]) 11415 SelectTruncOp[0].insert(std::make_pair(User, 11416 User->getOperand(0).getValueType())); 11417 if (User->getOperand(1) == PromOps[i]) 11418 SelectTruncOp[1].insert(std::make_pair(User, 11419 User->getOperand(1).getValueType())); 11420 } 11421 } 11422 } 11423 11424 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 11425 bool ReallyNeedsExt = false; 11426 if (N->getOpcode() != ISD::ANY_EXTEND) { 11427 // If all of the inputs are not already sign/zero extended, then 11428 // we'll still need to do that at the end. 11429 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11430 if (isa<ConstantSDNode>(Inputs[i])) 11431 continue; 11432 11433 unsigned OpBits = 11434 Inputs[i].getOperand(0).getValueSizeInBits(); 11435 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 11436 11437 if ((N->getOpcode() == ISD::ZERO_EXTEND && 11438 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 11439 APInt::getHighBitsSet(OpBits, 11440 OpBits-PromBits))) || 11441 (N->getOpcode() == ISD::SIGN_EXTEND && 11442 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 11443 (OpBits-(PromBits-1)))) { 11444 ReallyNeedsExt = true; 11445 break; 11446 } 11447 } 11448 } 11449 11450 // Replace all inputs, either with the truncation operand, or a 11451 // truncation or extension to the final output type. 11452 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11453 // Constant inputs need to be replaced with the to-be-promoted nodes that 11454 // use them because they might have users outside of the cluster of 11455 // promoted nodes. 11456 if (isa<ConstantSDNode>(Inputs[i])) 11457 continue; 11458 11459 SDValue InSrc = Inputs[i].getOperand(0); 11460 if (Inputs[i].getValueType() == N->getValueType(0)) 11461 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 11462 else if (N->getOpcode() == ISD::SIGN_EXTEND) 11463 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11464 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 11465 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11466 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11467 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 11468 else 11469 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11470 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 11471 } 11472 11473 std::list<HandleSDNode> PromOpHandles; 11474 for (auto &PromOp : PromOps) 11475 PromOpHandles.emplace_back(PromOp); 11476 11477 // Replace all operations (these are all the same, but have a different 11478 // (promoted) return type). DAG.getNode will validate that the types of 11479 // a binary operator match, so go through the list in reverse so that 11480 // we've likely promoted both operands first. 11481 while (!PromOpHandles.empty()) { 11482 SDValue PromOp = PromOpHandles.back().getValue(); 11483 PromOpHandles.pop_back(); 11484 11485 unsigned C; 11486 switch (PromOp.getOpcode()) { 11487 default: C = 0; break; 11488 case ISD::SELECT: C = 1; break; 11489 case ISD::SELECT_CC: C = 2; break; 11490 } 11491 11492 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11493 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 11494 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11495 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 11496 // The to-be-promoted operands of this node have not yet been 11497 // promoted (this should be rare because we're going through the 11498 // list backward, but if one of the operands has several users in 11499 // this cluster of to-be-promoted nodes, it is possible). 11500 PromOpHandles.emplace_front(PromOp); 11501 continue; 11502 } 11503 11504 // For SELECT and SELECT_CC nodes, we do a similar check for any 11505 // to-be-promoted comparison inputs. 11506 if (PromOp.getOpcode() == ISD::SELECT || 11507 PromOp.getOpcode() == ISD::SELECT_CC) { 11508 if ((SelectTruncOp[0].count(PromOp.getNode()) && 11509 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 11510 (SelectTruncOp[1].count(PromOp.getNode()) && 11511 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 11512 PromOpHandles.emplace_front(PromOp); 11513 continue; 11514 } 11515 } 11516 11517 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11518 PromOp.getNode()->op_end()); 11519 11520 // If this node has constant inputs, then they'll need to be promoted here. 11521 for (unsigned i = 0; i < 2; ++i) { 11522 if (!isa<ConstantSDNode>(Ops[C+i])) 11523 continue; 11524 if (Ops[C+i].getValueType() == N->getValueType(0)) 11525 continue; 11526 11527 if (N->getOpcode() == ISD::SIGN_EXTEND) 11528 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11529 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11530 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11531 else 11532 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11533 } 11534 11535 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 11536 // truncate them again to the original value type. 11537 if (PromOp.getOpcode() == ISD::SELECT || 11538 PromOp.getOpcode() == ISD::SELECT_CC) { 11539 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 11540 if (SI0 != SelectTruncOp[0].end()) 11541 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 11542 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 11543 if (SI1 != SelectTruncOp[1].end()) 11544 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 11545 } 11546 11547 DAG.ReplaceAllUsesOfValueWith(PromOp, 11548 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 11549 } 11550 11551 // Now we're left with the initial extension itself. 11552 if (!ReallyNeedsExt) 11553 return N->getOperand(0); 11554 11555 // To zero extend, just mask off everything except for the first bit (in the 11556 // i1 case). 11557 if (N->getOpcode() == ISD::ZERO_EXTEND) 11558 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 11559 DAG.getConstant(APInt::getLowBitsSet( 11560 N->getValueSizeInBits(0), PromBits), 11561 dl, N->getValueType(0))); 11562 11563 assert(N->getOpcode() == ISD::SIGN_EXTEND && 11564 "Invalid extension type"); 11565 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 11566 SDValue ShiftCst = 11567 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 11568 return DAG.getNode( 11569 ISD::SRA, dl, N->getValueType(0), 11570 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 11571 ShiftCst); 11572 } 11573 11574 /// \brief Reduces the number of fp-to-int conversion when building a vector. 11575 /// 11576 /// If this vector is built out of floating to integer conversions, 11577 /// transform it to a vector built out of floating point values followed by a 11578 /// single floating to integer conversion of the vector. 11579 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 11580 /// becomes (fptosi (build_vector ($A, $B, ...))) 11581 SDValue PPCTargetLowering:: 11582 combineElementTruncationToVectorTruncation(SDNode *N, 11583 DAGCombinerInfo &DCI) const { 11584 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11585 "Should be called with a BUILD_VECTOR node"); 11586 11587 SelectionDAG &DAG = DCI.DAG; 11588 SDLoc dl(N); 11589 11590 SDValue FirstInput = N->getOperand(0); 11591 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 11592 "The input operand must be an fp-to-int conversion."); 11593 11594 // This combine happens after legalization so the fp_to_[su]i nodes are 11595 // already converted to PPCSISD nodes. 11596 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 11597 if (FirstConversion == PPCISD::FCTIDZ || 11598 FirstConversion == PPCISD::FCTIDUZ || 11599 FirstConversion == PPCISD::FCTIWZ || 11600 FirstConversion == PPCISD::FCTIWUZ) { 11601 bool IsSplat = true; 11602 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 11603 FirstConversion == PPCISD::FCTIWUZ; 11604 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 11605 SmallVector<SDValue, 4> Ops; 11606 EVT TargetVT = N->getValueType(0); 11607 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11608 if (N->getOperand(i).getOpcode() != PPCISD::MFVSR) 11609 return SDValue(); 11610 unsigned NextConversion = N->getOperand(i).getOperand(0).getOpcode(); 11611 if (NextConversion != FirstConversion) 11612 return SDValue(); 11613 if (N->getOperand(i) != FirstInput) 11614 IsSplat = false; 11615 } 11616 11617 // If this is a splat, we leave it as-is since there will be only a single 11618 // fp-to-int conversion followed by a splat of the integer. This is better 11619 // for 32-bit and smaller ints and neutral for 64-bit ints. 11620 if (IsSplat) 11621 return SDValue(); 11622 11623 // Now that we know we have the right type of node, get its operands 11624 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11625 SDValue In = N->getOperand(i).getOperand(0); 11626 // For 32-bit values, we need to add an FP_ROUND node. 11627 if (Is32Bit) { 11628 if (In.isUndef()) 11629 Ops.push_back(DAG.getUNDEF(SrcVT)); 11630 else { 11631 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 11632 MVT::f32, In.getOperand(0), 11633 DAG.getIntPtrConstant(1, dl)); 11634 Ops.push_back(Trunc); 11635 } 11636 } else 11637 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 11638 } 11639 11640 unsigned Opcode; 11641 if (FirstConversion == PPCISD::FCTIDZ || 11642 FirstConversion == PPCISD::FCTIWZ) 11643 Opcode = ISD::FP_TO_SINT; 11644 else 11645 Opcode = ISD::FP_TO_UINT; 11646 11647 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 11648 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 11649 return DAG.getNode(Opcode, dl, TargetVT, BV); 11650 } 11651 return SDValue(); 11652 } 11653 11654 /// \brief Reduce the number of loads when building a vector. 11655 /// 11656 /// Building a vector out of multiple loads can be converted to a load 11657 /// of the vector type if the loads are consecutive. If the loads are 11658 /// consecutive but in descending order, a shuffle is added at the end 11659 /// to reorder the vector. 11660 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 11661 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11662 "Should be called with a BUILD_VECTOR node"); 11663 11664 SDLoc dl(N); 11665 bool InputsAreConsecutiveLoads = true; 11666 bool InputsAreReverseConsecutive = true; 11667 unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8; 11668 SDValue FirstInput = N->getOperand(0); 11669 bool IsRoundOfExtLoad = false; 11670 11671 if (FirstInput.getOpcode() == ISD::FP_ROUND && 11672 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 11673 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 11674 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 11675 } 11676 // Not a build vector of (possibly fp_rounded) loads. 11677 if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) 11678 return SDValue(); 11679 11680 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 11681 // If any inputs are fp_round(extload), they all must be. 11682 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 11683 return SDValue(); 11684 11685 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 11686 N->getOperand(i); 11687 if (NextInput.getOpcode() != ISD::LOAD) 11688 return SDValue(); 11689 11690 SDValue PreviousInput = 11691 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 11692 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 11693 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 11694 11695 // If any inputs are fp_round(extload), they all must be. 11696 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 11697 return SDValue(); 11698 11699 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 11700 InputsAreConsecutiveLoads = false; 11701 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 11702 InputsAreReverseConsecutive = false; 11703 11704 // Exit early if the loads are neither consecutive nor reverse consecutive. 11705 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 11706 return SDValue(); 11707 } 11708 11709 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 11710 "The loads cannot be both consecutive and reverse consecutive."); 11711 11712 SDValue FirstLoadOp = 11713 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 11714 SDValue LastLoadOp = 11715 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 11716 N->getOperand(N->getNumOperands()-1); 11717 11718 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 11719 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 11720 if (InputsAreConsecutiveLoads) { 11721 assert(LD1 && "Input needs to be a LoadSDNode."); 11722 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 11723 LD1->getBasePtr(), LD1->getPointerInfo(), 11724 LD1->getAlignment()); 11725 } 11726 if (InputsAreReverseConsecutive) { 11727 assert(LDL && "Input needs to be a LoadSDNode."); 11728 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 11729 LDL->getBasePtr(), LDL->getPointerInfo(), 11730 LDL->getAlignment()); 11731 SmallVector<int, 16> Ops; 11732 for (int i = N->getNumOperands() - 1; i >= 0; i--) 11733 Ops.push_back(i); 11734 11735 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 11736 DAG.getUNDEF(N->getValueType(0)), Ops); 11737 } 11738 return SDValue(); 11739 } 11740 11741 // This function adds the required vector_shuffle needed to get 11742 // the elements of the vector extract in the correct position 11743 // as specified by the CorrectElems encoding. 11744 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 11745 SDValue Input, uint64_t Elems, 11746 uint64_t CorrectElems) { 11747 SDLoc dl(N); 11748 11749 unsigned NumElems = Input.getValueType().getVectorNumElements(); 11750 SmallVector<int, 16> ShuffleMask(NumElems, -1); 11751 11752 // Knowing the element indices being extracted from the original 11753 // vector and the order in which they're being inserted, just put 11754 // them at element indices required for the instruction. 11755 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11756 if (DAG.getDataLayout().isLittleEndian()) 11757 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 11758 else 11759 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 11760 CorrectElems = CorrectElems >> 8; 11761 Elems = Elems >> 8; 11762 } 11763 11764 SDValue Shuffle = 11765 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 11766 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 11767 11768 EVT Ty = N->getValueType(0); 11769 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 11770 return BV; 11771 } 11772 11773 // Look for build vector patterns where input operands come from sign 11774 // extended vector_extract elements of specific indices. If the correct indices 11775 // aren't used, add a vector shuffle to fix up the indices and create a new 11776 // PPCISD:SExtVElems node which selects the vector sign extend instructions 11777 // during instruction selection. 11778 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 11779 // This array encodes the indices that the vector sign extend instructions 11780 // extract from when extending from one type to another for both BE and LE. 11781 // The right nibble of each byte corresponds to the LE incides. 11782 // and the left nibble of each byte corresponds to the BE incides. 11783 // For example: 0x3074B8FC byte->word 11784 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 11785 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 11786 // For example: 0x000070F8 byte->double word 11787 // For LE: the allowed indices are: 0x0,0x8 11788 // For BE: the allowed indices are: 0x7,0xF 11789 uint64_t TargetElems[] = { 11790 0x3074B8FC, // b->w 11791 0x000070F8, // b->d 11792 0x10325476, // h->w 11793 0x00003074, // h->d 11794 0x00001032, // w->d 11795 }; 11796 11797 uint64_t Elems = 0; 11798 int Index; 11799 SDValue Input; 11800 11801 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 11802 if (!Op) 11803 return false; 11804 if (Op.getOpcode() != ISD::SIGN_EXTEND) 11805 return false; 11806 11807 SDValue Extract = Op.getOperand(0); 11808 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11809 return false; 11810 11811 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 11812 if (!ExtOp) 11813 return false; 11814 11815 Index = ExtOp->getZExtValue(); 11816 if (Input && Input != Extract.getOperand(0)) 11817 return false; 11818 11819 if (!Input) 11820 Input = Extract.getOperand(0); 11821 11822 Elems = Elems << 8; 11823 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 11824 Elems |= Index; 11825 11826 return true; 11827 }; 11828 11829 // If the build vector operands aren't sign extended vector extracts, 11830 // of the same input vector, then return. 11831 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11832 if (!isSExtOfVecExtract(N->getOperand(i))) { 11833 return SDValue(); 11834 } 11835 } 11836 11837 // If the vector extract indicies are not correct, add the appropriate 11838 // vector_shuffle. 11839 int TgtElemArrayIdx; 11840 int InputSize = Input.getValueType().getScalarSizeInBits(); 11841 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 11842 if (InputSize + OutputSize == 40) 11843 TgtElemArrayIdx = 0; 11844 else if (InputSize + OutputSize == 72) 11845 TgtElemArrayIdx = 1; 11846 else if (InputSize + OutputSize == 48) 11847 TgtElemArrayIdx = 2; 11848 else if (InputSize + OutputSize == 80) 11849 TgtElemArrayIdx = 3; 11850 else if (InputSize + OutputSize == 96) 11851 TgtElemArrayIdx = 4; 11852 else 11853 return SDValue(); 11854 11855 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 11856 CorrectElems = DAG.getDataLayout().isLittleEndian() 11857 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 11858 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 11859 if (Elems != CorrectElems) { 11860 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 11861 } 11862 11863 // Regular lowering will catch cases where a shuffle is not needed. 11864 return SDValue(); 11865 } 11866 11867 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 11868 DAGCombinerInfo &DCI) const { 11869 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11870 "Should be called with a BUILD_VECTOR node"); 11871 11872 SelectionDAG &DAG = DCI.DAG; 11873 SDLoc dl(N); 11874 11875 if (!Subtarget.hasVSX()) 11876 return SDValue(); 11877 11878 // The target independent DAG combiner will leave a build_vector of 11879 // float-to-int conversions intact. We can generate MUCH better code for 11880 // a float-to-int conversion of a vector of floats. 11881 SDValue FirstInput = N->getOperand(0); 11882 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 11883 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 11884 if (Reduced) 11885 return Reduced; 11886 } 11887 11888 // If we're building a vector out of consecutive loads, just load that 11889 // vector type. 11890 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 11891 if (Reduced) 11892 return Reduced; 11893 11894 // If we're building a vector out of extended elements from another vector 11895 // we have P9 vector integer extend instructions. 11896 if (Subtarget.hasP9Altivec()) { 11897 Reduced = combineBVOfVecSExt(N, DAG); 11898 if (Reduced) 11899 return Reduced; 11900 } 11901 11902 11903 if (N->getValueType(0) != MVT::v2f64) 11904 return SDValue(); 11905 11906 // Looking for: 11907 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 11908 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 11909 FirstInput.getOpcode() != ISD::UINT_TO_FP) 11910 return SDValue(); 11911 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 11912 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 11913 return SDValue(); 11914 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 11915 return SDValue(); 11916 11917 SDValue Ext1 = FirstInput.getOperand(0); 11918 SDValue Ext2 = N->getOperand(1).getOperand(0); 11919 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 11920 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11921 return SDValue(); 11922 11923 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 11924 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 11925 if (!Ext1Op || !Ext2Op) 11926 return SDValue(); 11927 if (Ext1.getValueType() != MVT::i32 || 11928 Ext2.getValueType() != MVT::i32) 11929 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 11930 return SDValue(); 11931 11932 int FirstElem = Ext1Op->getZExtValue(); 11933 int SecondElem = Ext2Op->getZExtValue(); 11934 int SubvecIdx; 11935 if (FirstElem == 0 && SecondElem == 1) 11936 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 11937 else if (FirstElem == 2 && SecondElem == 3) 11938 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 11939 else 11940 return SDValue(); 11941 11942 SDValue SrcVec = Ext1.getOperand(0); 11943 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 11944 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 11945 return DAG.getNode(NodeType, dl, MVT::v2f64, 11946 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 11947 } 11948 11949 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 11950 DAGCombinerInfo &DCI) const { 11951 assert((N->getOpcode() == ISD::SINT_TO_FP || 11952 N->getOpcode() == ISD::UINT_TO_FP) && 11953 "Need an int -> FP conversion node here"); 11954 11955 if (useSoftFloat() || !Subtarget.has64BitSupport()) 11956 return SDValue(); 11957 11958 SelectionDAG &DAG = DCI.DAG; 11959 SDLoc dl(N); 11960 SDValue Op(N, 0); 11961 11962 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 11963 // from the hardware. 11964 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 11965 return SDValue(); 11966 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 11967 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 11968 return SDValue(); 11969 11970 SDValue FirstOperand(Op.getOperand(0)); 11971 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 11972 (FirstOperand.getValueType() == MVT::i8 || 11973 FirstOperand.getValueType() == MVT::i16); 11974 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 11975 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 11976 bool DstDouble = Op.getValueType() == MVT::f64; 11977 unsigned ConvOp = Signed ? 11978 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 11979 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 11980 SDValue WidthConst = 11981 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 11982 dl, false); 11983 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 11984 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 11985 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 11986 DAG.getVTList(MVT::f64, MVT::Other), 11987 Ops, MVT::i8, LDN->getMemOperand()); 11988 11989 // For signed conversion, we need to sign-extend the value in the VSR 11990 if (Signed) { 11991 SDValue ExtOps[] = { Ld, WidthConst }; 11992 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 11993 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 11994 } else 11995 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 11996 } 11997 11998 11999 // For i32 intermediate values, unfortunately, the conversion functions 12000 // leave the upper 32 bits of the value are undefined. Within the set of 12001 // scalar instructions, we have no method for zero- or sign-extending the 12002 // value. Thus, we cannot handle i32 intermediate values here. 12003 if (Op.getOperand(0).getValueType() == MVT::i32) 12004 return SDValue(); 12005 12006 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 12007 "UINT_TO_FP is supported only with FPCVT"); 12008 12009 // If we have FCFIDS, then use it when converting to single-precision. 12010 // Otherwise, convert to double-precision and then round. 12011 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12012 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 12013 : PPCISD::FCFIDS) 12014 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 12015 : PPCISD::FCFID); 12016 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12017 ? MVT::f32 12018 : MVT::f64; 12019 12020 // If we're converting from a float, to an int, and back to a float again, 12021 // then we don't need the store/load pair at all. 12022 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 12023 Subtarget.hasFPCVT()) || 12024 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 12025 SDValue Src = Op.getOperand(0).getOperand(0); 12026 if (Src.getValueType() == MVT::f32) { 12027 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 12028 DCI.AddToWorklist(Src.getNode()); 12029 } else if (Src.getValueType() != MVT::f64) { 12030 // Make sure that we don't pick up a ppc_fp128 source value. 12031 return SDValue(); 12032 } 12033 12034 unsigned FCTOp = 12035 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 12036 PPCISD::FCTIDUZ; 12037 12038 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 12039 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 12040 12041 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 12042 FP = DAG.getNode(ISD::FP_ROUND, dl, 12043 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 12044 DCI.AddToWorklist(FP.getNode()); 12045 } 12046 12047 return FP; 12048 } 12049 12050 return SDValue(); 12051 } 12052 12053 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 12054 // builtins) into loads with swaps. 12055 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 12056 DAGCombinerInfo &DCI) const { 12057 SelectionDAG &DAG = DCI.DAG; 12058 SDLoc dl(N); 12059 SDValue Chain; 12060 SDValue Base; 12061 MachineMemOperand *MMO; 12062 12063 switch (N->getOpcode()) { 12064 default: 12065 llvm_unreachable("Unexpected opcode for little endian VSX load"); 12066 case ISD::LOAD: { 12067 LoadSDNode *LD = cast<LoadSDNode>(N); 12068 Chain = LD->getChain(); 12069 Base = LD->getBasePtr(); 12070 MMO = LD->getMemOperand(); 12071 // If the MMO suggests this isn't a load of a full vector, leave 12072 // things alone. For a built-in, we have to make the change for 12073 // correctness, so if there is a size problem that will be a bug. 12074 if (MMO->getSize() < 16) 12075 return SDValue(); 12076 break; 12077 } 12078 case ISD::INTRINSIC_W_CHAIN: { 12079 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12080 Chain = Intrin->getChain(); 12081 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 12082 // us what we want. Get operand 2 instead. 12083 Base = Intrin->getOperand(2); 12084 MMO = Intrin->getMemOperand(); 12085 break; 12086 } 12087 } 12088 12089 MVT VecTy = N->getValueType(0).getSimpleVT(); 12090 12091 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 12092 // aligned and the type is a vector with elements up to 4 bytes 12093 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12094 && VecTy.getScalarSizeInBits() <= 32 ) { 12095 return SDValue(); 12096 } 12097 12098 SDValue LoadOps[] = { Chain, Base }; 12099 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 12100 DAG.getVTList(MVT::v2f64, MVT::Other), 12101 LoadOps, MVT::v2f64, MMO); 12102 12103 DCI.AddToWorklist(Load.getNode()); 12104 Chain = Load.getValue(1); 12105 SDValue Swap = DAG.getNode( 12106 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 12107 DCI.AddToWorklist(Swap.getNode()); 12108 12109 // Add a bitcast if the resulting load type doesn't match v2f64. 12110 if (VecTy != MVT::v2f64) { 12111 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 12112 DCI.AddToWorklist(N.getNode()); 12113 // Package {bitcast value, swap's chain} to match Load's shape. 12114 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 12115 N, Swap.getValue(1)); 12116 } 12117 12118 return Swap; 12119 } 12120 12121 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 12122 // builtins) into stores with swaps. 12123 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 12124 DAGCombinerInfo &DCI) const { 12125 SelectionDAG &DAG = DCI.DAG; 12126 SDLoc dl(N); 12127 SDValue Chain; 12128 SDValue Base; 12129 unsigned SrcOpnd; 12130 MachineMemOperand *MMO; 12131 12132 switch (N->getOpcode()) { 12133 default: 12134 llvm_unreachable("Unexpected opcode for little endian VSX store"); 12135 case ISD::STORE: { 12136 StoreSDNode *ST = cast<StoreSDNode>(N); 12137 Chain = ST->getChain(); 12138 Base = ST->getBasePtr(); 12139 MMO = ST->getMemOperand(); 12140 SrcOpnd = 1; 12141 // If the MMO suggests this isn't a store of a full vector, leave 12142 // things alone. For a built-in, we have to make the change for 12143 // correctness, so if there is a size problem that will be a bug. 12144 if (MMO->getSize() < 16) 12145 return SDValue(); 12146 break; 12147 } 12148 case ISD::INTRINSIC_VOID: { 12149 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12150 Chain = Intrin->getChain(); 12151 // Intrin->getBasePtr() oddly does not get what we want. 12152 Base = Intrin->getOperand(3); 12153 MMO = Intrin->getMemOperand(); 12154 SrcOpnd = 2; 12155 break; 12156 } 12157 } 12158 12159 SDValue Src = N->getOperand(SrcOpnd); 12160 MVT VecTy = Src.getValueType().getSimpleVT(); 12161 12162 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 12163 // aligned and the type is a vector with elements up to 4 bytes 12164 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12165 && VecTy.getScalarSizeInBits() <= 32 ) { 12166 return SDValue(); 12167 } 12168 12169 // All stores are done as v2f64 and possible bit cast. 12170 if (VecTy != MVT::v2f64) { 12171 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 12172 DCI.AddToWorklist(Src.getNode()); 12173 } 12174 12175 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 12176 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 12177 DCI.AddToWorklist(Swap.getNode()); 12178 Chain = Swap.getValue(1); 12179 SDValue StoreOps[] = { Chain, Swap, Base }; 12180 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 12181 DAG.getVTList(MVT::Other), 12182 StoreOps, VecTy, MMO); 12183 DCI.AddToWorklist(Store.getNode()); 12184 return Store; 12185 } 12186 12187 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 12188 DAGCombinerInfo &DCI) const { 12189 SelectionDAG &DAG = DCI.DAG; 12190 SDLoc dl(N); 12191 switch (N->getOpcode()) { 12192 default: break; 12193 case ISD::SHL: 12194 return combineSHL(N, DCI); 12195 case ISD::SRA: 12196 return combineSRA(N, DCI); 12197 case ISD::SRL: 12198 return combineSRL(N, DCI); 12199 case PPCISD::SHL: 12200 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 12201 return N->getOperand(0); 12202 break; 12203 case PPCISD::SRL: 12204 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 12205 return N->getOperand(0); 12206 break; 12207 case PPCISD::SRA: 12208 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 12209 if (C->isNullValue() || // 0 >>s V -> 0. 12210 C->isAllOnesValue()) // -1 >>s V -> -1. 12211 return N->getOperand(0); 12212 } 12213 break; 12214 case ISD::SIGN_EXTEND: 12215 case ISD::ZERO_EXTEND: 12216 case ISD::ANY_EXTEND: 12217 return DAGCombineExtBoolTrunc(N, DCI); 12218 case ISD::TRUNCATE: 12219 case ISD::SETCC: 12220 case ISD::SELECT_CC: 12221 return DAGCombineTruncBoolExt(N, DCI); 12222 case ISD::SINT_TO_FP: 12223 case ISD::UINT_TO_FP: 12224 return combineFPToIntToFP(N, DCI); 12225 case ISD::STORE: { 12226 EVT Op1VT = N->getOperand(1).getValueType(); 12227 bool ValidTypeForStoreFltAsInt = (Op1VT == MVT::i32) || 12228 (Subtarget.hasP9Vector() && (Op1VT == MVT::i8 || Op1VT == MVT::i16)); 12229 12230 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 12231 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 12232 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 12233 ValidTypeForStoreFltAsInt && 12234 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 12235 SDValue Val = N->getOperand(1).getOperand(0); 12236 if (Val.getValueType() == MVT::f32) { 12237 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 12238 DCI.AddToWorklist(Val.getNode()); 12239 } 12240 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 12241 DCI.AddToWorklist(Val.getNode()); 12242 12243 if (Op1VT == MVT::i32) { 12244 SDValue Ops[] = { 12245 N->getOperand(0), Val, N->getOperand(2), 12246 DAG.getValueType(N->getOperand(1).getValueType()) 12247 }; 12248 12249 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 12250 DAG.getVTList(MVT::Other), Ops, 12251 cast<StoreSDNode>(N)->getMemoryVT(), 12252 cast<StoreSDNode>(N)->getMemOperand()); 12253 } else { 12254 unsigned WidthInBytes = 12255 N->getOperand(1).getValueType() == MVT::i8 ? 1 : 2; 12256 SDValue WidthConst = DAG.getIntPtrConstant(WidthInBytes, dl, false); 12257 12258 SDValue Ops[] = { 12259 N->getOperand(0), Val, N->getOperand(2), WidthConst, 12260 DAG.getValueType(N->getOperand(1).getValueType()) 12261 }; 12262 Val = DAG.getMemIntrinsicNode(PPCISD::STXSIX, dl, 12263 DAG.getVTList(MVT::Other), Ops, 12264 cast<StoreSDNode>(N)->getMemoryVT(), 12265 cast<StoreSDNode>(N)->getMemOperand()); 12266 } 12267 12268 DCI.AddToWorklist(Val.getNode()); 12269 return Val; 12270 } 12271 12272 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 12273 if (cast<StoreSDNode>(N)->isUnindexed() && 12274 N->getOperand(1).getOpcode() == ISD::BSWAP && 12275 N->getOperand(1).getNode()->hasOneUse() && 12276 (N->getOperand(1).getValueType() == MVT::i32 || 12277 N->getOperand(1).getValueType() == MVT::i16 || 12278 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 12279 N->getOperand(1).getValueType() == MVT::i64))) { 12280 SDValue BSwapOp = N->getOperand(1).getOperand(0); 12281 // Do an any-extend to 32-bits if this is a half-word input. 12282 if (BSwapOp.getValueType() == MVT::i16) 12283 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 12284 12285 // If the type of BSWAP operand is wider than stored memory width 12286 // it need to be shifted to the right side before STBRX. 12287 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 12288 if (Op1VT.bitsGT(mVT)) { 12289 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 12290 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 12291 DAG.getConstant(Shift, dl, MVT::i32)); 12292 // Need to truncate if this is a bswap of i64 stored as i32/i16. 12293 if (Op1VT == MVT::i64) 12294 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 12295 } 12296 12297 SDValue Ops[] = { 12298 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 12299 }; 12300 return 12301 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 12302 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 12303 cast<StoreSDNode>(N)->getMemOperand()); 12304 } 12305 12306 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 12307 // So it can increase the chance of CSE constant construction. 12308 EVT VT = N->getOperand(1).getValueType(); 12309 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 12310 isa<ConstantSDNode>(N->getOperand(1)) && VT == MVT::i32) { 12311 // Need to sign-extended to 64-bits to handle negative values. 12312 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 12313 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 12314 MemVT.getSizeInBits()); 12315 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 12316 12317 // DAG.getTruncStore() can't be used here because it doesn't accept 12318 // the general (base + offset) addressing mode. 12319 // So we use UpdateNodeOperands and setTruncatingStore instead. 12320 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 12321 N->getOperand(3)); 12322 cast<StoreSDNode>(N)->setTruncatingStore(true); 12323 return SDValue(N, 0); 12324 } 12325 12326 // For little endian, VSX stores require generating xxswapd/lxvd2x. 12327 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12328 if (VT.isSimple()) { 12329 MVT StoreVT = VT.getSimpleVT(); 12330 if (Subtarget.needsSwapsForVSXMemOps() && 12331 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 12332 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 12333 return expandVSXStoreForLE(N, DCI); 12334 } 12335 break; 12336 } 12337 case ISD::LOAD: { 12338 LoadSDNode *LD = cast<LoadSDNode>(N); 12339 EVT VT = LD->getValueType(0); 12340 12341 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12342 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12343 if (VT.isSimple()) { 12344 MVT LoadVT = VT.getSimpleVT(); 12345 if (Subtarget.needsSwapsForVSXMemOps() && 12346 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 12347 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 12348 return expandVSXLoadForLE(N, DCI); 12349 } 12350 12351 // We sometimes end up with a 64-bit integer load, from which we extract 12352 // two single-precision floating-point numbers. This happens with 12353 // std::complex<float>, and other similar structures, because of the way we 12354 // canonicalize structure copies. However, if we lack direct moves, 12355 // then the final bitcasts from the extracted integer values to the 12356 // floating-point numbers turn into store/load pairs. Even with direct moves, 12357 // just loading the two floating-point numbers is likely better. 12358 auto ReplaceTwoFloatLoad = [&]() { 12359 if (VT != MVT::i64) 12360 return false; 12361 12362 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 12363 LD->isVolatile()) 12364 return false; 12365 12366 // We're looking for a sequence like this: 12367 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 12368 // t16: i64 = srl t13, Constant:i32<32> 12369 // t17: i32 = truncate t16 12370 // t18: f32 = bitcast t17 12371 // t19: i32 = truncate t13 12372 // t20: f32 = bitcast t19 12373 12374 if (!LD->hasNUsesOfValue(2, 0)) 12375 return false; 12376 12377 auto UI = LD->use_begin(); 12378 while (UI.getUse().getResNo() != 0) ++UI; 12379 SDNode *Trunc = *UI++; 12380 while (UI.getUse().getResNo() != 0) ++UI; 12381 SDNode *RightShift = *UI; 12382 if (Trunc->getOpcode() != ISD::TRUNCATE) 12383 std::swap(Trunc, RightShift); 12384 12385 if (Trunc->getOpcode() != ISD::TRUNCATE || 12386 Trunc->getValueType(0) != MVT::i32 || 12387 !Trunc->hasOneUse()) 12388 return false; 12389 if (RightShift->getOpcode() != ISD::SRL || 12390 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 12391 RightShift->getConstantOperandVal(1) != 32 || 12392 !RightShift->hasOneUse()) 12393 return false; 12394 12395 SDNode *Trunc2 = *RightShift->use_begin(); 12396 if (Trunc2->getOpcode() != ISD::TRUNCATE || 12397 Trunc2->getValueType(0) != MVT::i32 || 12398 !Trunc2->hasOneUse()) 12399 return false; 12400 12401 SDNode *Bitcast = *Trunc->use_begin(); 12402 SDNode *Bitcast2 = *Trunc2->use_begin(); 12403 12404 if (Bitcast->getOpcode() != ISD::BITCAST || 12405 Bitcast->getValueType(0) != MVT::f32) 12406 return false; 12407 if (Bitcast2->getOpcode() != ISD::BITCAST || 12408 Bitcast2->getValueType(0) != MVT::f32) 12409 return false; 12410 12411 if (Subtarget.isLittleEndian()) 12412 std::swap(Bitcast, Bitcast2); 12413 12414 // Bitcast has the second float (in memory-layout order) and Bitcast2 12415 // has the first one. 12416 12417 SDValue BasePtr = LD->getBasePtr(); 12418 if (LD->isIndexed()) { 12419 assert(LD->getAddressingMode() == ISD::PRE_INC && 12420 "Non-pre-inc AM on PPC?"); 12421 BasePtr = 12422 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 12423 LD->getOffset()); 12424 } 12425 12426 auto MMOFlags = 12427 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 12428 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 12429 LD->getPointerInfo(), LD->getAlignment(), 12430 MMOFlags, LD->getAAInfo()); 12431 SDValue AddPtr = 12432 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 12433 BasePtr, DAG.getIntPtrConstant(4, dl)); 12434 SDValue FloatLoad2 = DAG.getLoad( 12435 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 12436 LD->getPointerInfo().getWithOffset(4), 12437 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 12438 12439 if (LD->isIndexed()) { 12440 // Note that DAGCombine should re-form any pre-increment load(s) from 12441 // what is produced here if that makes sense. 12442 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 12443 } 12444 12445 DCI.CombineTo(Bitcast2, FloatLoad); 12446 DCI.CombineTo(Bitcast, FloatLoad2); 12447 12448 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 12449 SDValue(FloatLoad2.getNode(), 1)); 12450 return true; 12451 }; 12452 12453 if (ReplaceTwoFloatLoad()) 12454 return SDValue(N, 0); 12455 12456 EVT MemVT = LD->getMemoryVT(); 12457 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 12458 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 12459 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 12460 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 12461 if (LD->isUnindexed() && VT.isVector() && 12462 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 12463 // P8 and later hardware should just use LOAD. 12464 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 12465 VT == MVT::v4i32 || VT == MVT::v4f32)) || 12466 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 12467 LD->getAlignment() >= ScalarABIAlignment)) && 12468 LD->getAlignment() < ABIAlignment) { 12469 // This is a type-legal unaligned Altivec or QPX load. 12470 SDValue Chain = LD->getChain(); 12471 SDValue Ptr = LD->getBasePtr(); 12472 bool isLittleEndian = Subtarget.isLittleEndian(); 12473 12474 // This implements the loading of unaligned vectors as described in 12475 // the venerable Apple Velocity Engine overview. Specifically: 12476 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 12477 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 12478 // 12479 // The general idea is to expand a sequence of one or more unaligned 12480 // loads into an alignment-based permutation-control instruction (lvsl 12481 // or lvsr), a series of regular vector loads (which always truncate 12482 // their input address to an aligned address), and a series of 12483 // permutations. The results of these permutations are the requested 12484 // loaded values. The trick is that the last "extra" load is not taken 12485 // from the address you might suspect (sizeof(vector) bytes after the 12486 // last requested load), but rather sizeof(vector) - 1 bytes after the 12487 // last requested vector. The point of this is to avoid a page fault if 12488 // the base address happened to be aligned. This works because if the 12489 // base address is aligned, then adding less than a full vector length 12490 // will cause the last vector in the sequence to be (re)loaded. 12491 // Otherwise, the next vector will be fetched as you might suspect was 12492 // necessary. 12493 12494 // We might be able to reuse the permutation generation from 12495 // a different base address offset from this one by an aligned amount. 12496 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 12497 // optimization later. 12498 Intrinsic::ID Intr, IntrLD, IntrPerm; 12499 MVT PermCntlTy, PermTy, LDTy; 12500 if (Subtarget.hasAltivec()) { 12501 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 12502 Intrinsic::ppc_altivec_lvsl; 12503 IntrLD = Intrinsic::ppc_altivec_lvx; 12504 IntrPerm = Intrinsic::ppc_altivec_vperm; 12505 PermCntlTy = MVT::v16i8; 12506 PermTy = MVT::v4i32; 12507 LDTy = MVT::v4i32; 12508 } else { 12509 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 12510 Intrinsic::ppc_qpx_qvlpcls; 12511 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 12512 Intrinsic::ppc_qpx_qvlfs; 12513 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 12514 PermCntlTy = MVT::v4f64; 12515 PermTy = MVT::v4f64; 12516 LDTy = MemVT.getSimpleVT(); 12517 } 12518 12519 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 12520 12521 // Create the new MMO for the new base load. It is like the original MMO, 12522 // but represents an area in memory almost twice the vector size centered 12523 // on the original address. If the address is unaligned, we might start 12524 // reading up to (sizeof(vector)-1) bytes below the address of the 12525 // original unaligned load. 12526 MachineFunction &MF = DAG.getMachineFunction(); 12527 MachineMemOperand *BaseMMO = 12528 MF.getMachineMemOperand(LD->getMemOperand(), 12529 -(long)MemVT.getStoreSize()+1, 12530 2*MemVT.getStoreSize()-1); 12531 12532 // Create the new base load. 12533 SDValue LDXIntID = 12534 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 12535 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 12536 SDValue BaseLoad = 12537 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12538 DAG.getVTList(PermTy, MVT::Other), 12539 BaseLoadOps, LDTy, BaseMMO); 12540 12541 // Note that the value of IncOffset (which is provided to the next 12542 // load's pointer info offset value, and thus used to calculate the 12543 // alignment), and the value of IncValue (which is actually used to 12544 // increment the pointer value) are different! This is because we 12545 // require the next load to appear to be aligned, even though it 12546 // is actually offset from the base pointer by a lesser amount. 12547 int IncOffset = VT.getSizeInBits() / 8; 12548 int IncValue = IncOffset; 12549 12550 // Walk (both up and down) the chain looking for another load at the real 12551 // (aligned) offset (the alignment of the other load does not matter in 12552 // this case). If found, then do not use the offset reduction trick, as 12553 // that will prevent the loads from being later combined (as they would 12554 // otherwise be duplicates). 12555 if (!findConsecutiveLoad(LD, DAG)) 12556 --IncValue; 12557 12558 SDValue Increment = 12559 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 12560 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 12561 12562 MachineMemOperand *ExtraMMO = 12563 MF.getMachineMemOperand(LD->getMemOperand(), 12564 1, 2*MemVT.getStoreSize()-1); 12565 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 12566 SDValue ExtraLoad = 12567 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12568 DAG.getVTList(PermTy, MVT::Other), 12569 ExtraLoadOps, LDTy, ExtraMMO); 12570 12571 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 12572 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 12573 12574 // Because vperm has a big-endian bias, we must reverse the order 12575 // of the input vectors and complement the permute control vector 12576 // when generating little endian code. We have already handled the 12577 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 12578 // and ExtraLoad here. 12579 SDValue Perm; 12580 if (isLittleEndian) 12581 Perm = BuildIntrinsicOp(IntrPerm, 12582 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 12583 else 12584 Perm = BuildIntrinsicOp(IntrPerm, 12585 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 12586 12587 if (VT != PermTy) 12588 Perm = Subtarget.hasAltivec() ? 12589 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 12590 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 12591 DAG.getTargetConstant(1, dl, MVT::i64)); 12592 // second argument is 1 because this rounding 12593 // is always exact. 12594 12595 // The output of the permutation is our loaded result, the TokenFactor is 12596 // our new chain. 12597 DCI.CombineTo(N, Perm, TF); 12598 return SDValue(N, 0); 12599 } 12600 } 12601 break; 12602 case ISD::INTRINSIC_WO_CHAIN: { 12603 bool isLittleEndian = Subtarget.isLittleEndian(); 12604 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 12605 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 12606 : Intrinsic::ppc_altivec_lvsl); 12607 if ((IID == Intr || 12608 IID == Intrinsic::ppc_qpx_qvlpcld || 12609 IID == Intrinsic::ppc_qpx_qvlpcls) && 12610 N->getOperand(1)->getOpcode() == ISD::ADD) { 12611 SDValue Add = N->getOperand(1); 12612 12613 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 12614 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 12615 12616 if (DAG.MaskedValueIsZero(Add->getOperand(1), 12617 APInt::getAllOnesValue(Bits /* alignment */) 12618 .zext(Add.getScalarValueSizeInBits()))) { 12619 SDNode *BasePtr = Add->getOperand(0).getNode(); 12620 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12621 UE = BasePtr->use_end(); 12622 UI != UE; ++UI) { 12623 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12624 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 12625 // We've found another LVSL/LVSR, and this address is an aligned 12626 // multiple of that one. The results will be the same, so use the 12627 // one we've just found instead. 12628 12629 return SDValue(*UI, 0); 12630 } 12631 } 12632 } 12633 12634 if (isa<ConstantSDNode>(Add->getOperand(1))) { 12635 SDNode *BasePtr = Add->getOperand(0).getNode(); 12636 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12637 UE = BasePtr->use_end(); UI != UE; ++UI) { 12638 if (UI->getOpcode() == ISD::ADD && 12639 isa<ConstantSDNode>(UI->getOperand(1)) && 12640 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 12641 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 12642 (1ULL << Bits) == 0) { 12643 SDNode *OtherAdd = *UI; 12644 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 12645 VE = OtherAdd->use_end(); VI != VE; ++VI) { 12646 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12647 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 12648 return SDValue(*VI, 0); 12649 } 12650 } 12651 } 12652 } 12653 } 12654 } 12655 } 12656 12657 break; 12658 case ISD::INTRINSIC_W_CHAIN: 12659 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12660 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12661 if (Subtarget.needsSwapsForVSXMemOps()) { 12662 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12663 default: 12664 break; 12665 case Intrinsic::ppc_vsx_lxvw4x: 12666 case Intrinsic::ppc_vsx_lxvd2x: 12667 return expandVSXLoadForLE(N, DCI); 12668 } 12669 } 12670 break; 12671 case ISD::INTRINSIC_VOID: 12672 // For little endian, VSX stores require generating xxswapd/stxvd2x. 12673 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12674 if (Subtarget.needsSwapsForVSXMemOps()) { 12675 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12676 default: 12677 break; 12678 case Intrinsic::ppc_vsx_stxvw4x: 12679 case Intrinsic::ppc_vsx_stxvd2x: 12680 return expandVSXStoreForLE(N, DCI); 12681 } 12682 } 12683 break; 12684 case ISD::BSWAP: 12685 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 12686 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 12687 N->getOperand(0).hasOneUse() && 12688 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 12689 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 12690 N->getValueType(0) == MVT::i64))) { 12691 SDValue Load = N->getOperand(0); 12692 LoadSDNode *LD = cast<LoadSDNode>(Load); 12693 // Create the byte-swapping load. 12694 SDValue Ops[] = { 12695 LD->getChain(), // Chain 12696 LD->getBasePtr(), // Ptr 12697 DAG.getValueType(N->getValueType(0)) // VT 12698 }; 12699 SDValue BSLoad = 12700 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 12701 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 12702 MVT::i64 : MVT::i32, MVT::Other), 12703 Ops, LD->getMemoryVT(), LD->getMemOperand()); 12704 12705 // If this is an i16 load, insert the truncate. 12706 SDValue ResVal = BSLoad; 12707 if (N->getValueType(0) == MVT::i16) 12708 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 12709 12710 // First, combine the bswap away. This makes the value produced by the 12711 // load dead. 12712 DCI.CombineTo(N, ResVal); 12713 12714 // Next, combine the load away, we give it a bogus result value but a real 12715 // chain result. The result value is dead because the bswap is dead. 12716 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 12717 12718 // Return N so it doesn't get rechecked! 12719 return SDValue(N, 0); 12720 } 12721 break; 12722 case PPCISD::VCMP: 12723 // If a VCMPo node already exists with exactly the same operands as this 12724 // node, use its result instead of this node (VCMPo computes both a CR6 and 12725 // a normal output). 12726 // 12727 if (!N->getOperand(0).hasOneUse() && 12728 !N->getOperand(1).hasOneUse() && 12729 !N->getOperand(2).hasOneUse()) { 12730 12731 // Scan all of the users of the LHS, looking for VCMPo's that match. 12732 SDNode *VCMPoNode = nullptr; 12733 12734 SDNode *LHSN = N->getOperand(0).getNode(); 12735 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 12736 UI != E; ++UI) 12737 if (UI->getOpcode() == PPCISD::VCMPo && 12738 UI->getOperand(1) == N->getOperand(1) && 12739 UI->getOperand(2) == N->getOperand(2) && 12740 UI->getOperand(0) == N->getOperand(0)) { 12741 VCMPoNode = *UI; 12742 break; 12743 } 12744 12745 // If there is no VCMPo node, or if the flag value has a single use, don't 12746 // transform this. 12747 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 12748 break; 12749 12750 // Look at the (necessarily single) use of the flag value. If it has a 12751 // chain, this transformation is more complex. Note that multiple things 12752 // could use the value result, which we should ignore. 12753 SDNode *FlagUser = nullptr; 12754 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 12755 FlagUser == nullptr; ++UI) { 12756 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 12757 SDNode *User = *UI; 12758 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 12759 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 12760 FlagUser = User; 12761 break; 12762 } 12763 } 12764 } 12765 12766 // If the user is a MFOCRF instruction, we know this is safe. 12767 // Otherwise we give up for right now. 12768 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 12769 return SDValue(VCMPoNode, 0); 12770 } 12771 break; 12772 case ISD::BRCOND: { 12773 SDValue Cond = N->getOperand(1); 12774 SDValue Target = N->getOperand(2); 12775 12776 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12777 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 12778 Intrinsic::ppc_is_decremented_ctr_nonzero) { 12779 12780 // We now need to make the intrinsic dead (it cannot be instruction 12781 // selected). 12782 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 12783 assert(Cond.getNode()->hasOneUse() && 12784 "Counter decrement has more than one use"); 12785 12786 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 12787 N->getOperand(0), Target); 12788 } 12789 } 12790 break; 12791 case ISD::BR_CC: { 12792 // If this is a branch on an altivec predicate comparison, lower this so 12793 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 12794 // lowering is done pre-legalize, because the legalizer lowers the predicate 12795 // compare down to code that is difficult to reassemble. 12796 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 12797 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 12798 12799 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 12800 // value. If so, pass-through the AND to get to the intrinsic. 12801 if (LHS.getOpcode() == ISD::AND && 12802 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 12803 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 12804 Intrinsic::ppc_is_decremented_ctr_nonzero && 12805 isa<ConstantSDNode>(LHS.getOperand(1)) && 12806 !isNullConstant(LHS.getOperand(1))) 12807 LHS = LHS.getOperand(0); 12808 12809 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12810 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 12811 Intrinsic::ppc_is_decremented_ctr_nonzero && 12812 isa<ConstantSDNode>(RHS)) { 12813 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 12814 "Counter decrement comparison is not EQ or NE"); 12815 12816 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12817 bool isBDNZ = (CC == ISD::SETEQ && Val) || 12818 (CC == ISD::SETNE && !Val); 12819 12820 // We now need to make the intrinsic dead (it cannot be instruction 12821 // selected). 12822 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 12823 assert(LHS.getNode()->hasOneUse() && 12824 "Counter decrement has more than one use"); 12825 12826 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 12827 N->getOperand(0), N->getOperand(4)); 12828 } 12829 12830 int CompareOpc; 12831 bool isDot; 12832 12833 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12834 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 12835 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 12836 assert(isDot && "Can't compare against a vector result!"); 12837 12838 // If this is a comparison against something other than 0/1, then we know 12839 // that the condition is never/always true. 12840 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12841 if (Val != 0 && Val != 1) { 12842 if (CC == ISD::SETEQ) // Cond never true, remove branch. 12843 return N->getOperand(0); 12844 // Always !=, turn it into an unconditional branch. 12845 return DAG.getNode(ISD::BR, dl, MVT::Other, 12846 N->getOperand(0), N->getOperand(4)); 12847 } 12848 12849 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 12850 12851 // Create the PPCISD altivec 'dot' comparison node. 12852 SDValue Ops[] = { 12853 LHS.getOperand(2), // LHS of compare 12854 LHS.getOperand(3), // RHS of compare 12855 DAG.getConstant(CompareOpc, dl, MVT::i32) 12856 }; 12857 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 12858 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 12859 12860 // Unpack the result based on how the target uses it. 12861 PPC::Predicate CompOpc; 12862 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 12863 default: // Can't happen, don't crash on invalid number though. 12864 case 0: // Branch on the value of the EQ bit of CR6. 12865 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 12866 break; 12867 case 1: // Branch on the inverted value of the EQ bit of CR6. 12868 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 12869 break; 12870 case 2: // Branch on the value of the LT bit of CR6. 12871 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 12872 break; 12873 case 3: // Branch on the inverted value of the LT bit of CR6. 12874 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 12875 break; 12876 } 12877 12878 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 12879 DAG.getConstant(CompOpc, dl, MVT::i32), 12880 DAG.getRegister(PPC::CR6, MVT::i32), 12881 N->getOperand(4), CompNode.getValue(1)); 12882 } 12883 break; 12884 } 12885 case ISD::BUILD_VECTOR: 12886 return DAGCombineBuildVector(N, DCI); 12887 } 12888 12889 return SDValue(); 12890 } 12891 12892 SDValue 12893 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 12894 SelectionDAG &DAG, 12895 std::vector<SDNode *> *Created) const { 12896 // fold (sdiv X, pow2) 12897 EVT VT = N->getValueType(0); 12898 if (VT == MVT::i64 && !Subtarget.isPPC64()) 12899 return SDValue(); 12900 if ((VT != MVT::i32 && VT != MVT::i64) || 12901 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 12902 return SDValue(); 12903 12904 SDLoc DL(N); 12905 SDValue N0 = N->getOperand(0); 12906 12907 bool IsNegPow2 = (-Divisor).isPowerOf2(); 12908 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 12909 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 12910 12911 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 12912 if (Created) 12913 Created->push_back(Op.getNode()); 12914 12915 if (IsNegPow2) { 12916 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 12917 if (Created) 12918 Created->push_back(Op.getNode()); 12919 } 12920 12921 return Op; 12922 } 12923 12924 //===----------------------------------------------------------------------===// 12925 // Inline Assembly Support 12926 //===----------------------------------------------------------------------===// 12927 12928 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 12929 KnownBits &Known, 12930 const APInt &DemandedElts, 12931 const SelectionDAG &DAG, 12932 unsigned Depth) const { 12933 Known.resetAll(); 12934 switch (Op.getOpcode()) { 12935 default: break; 12936 case PPCISD::LBRX: { 12937 // lhbrx is known to have the top bits cleared out. 12938 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 12939 Known.Zero = 0xFFFF0000; 12940 break; 12941 } 12942 case ISD::INTRINSIC_WO_CHAIN: { 12943 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 12944 default: break; 12945 case Intrinsic::ppc_altivec_vcmpbfp_p: 12946 case Intrinsic::ppc_altivec_vcmpeqfp_p: 12947 case Intrinsic::ppc_altivec_vcmpequb_p: 12948 case Intrinsic::ppc_altivec_vcmpequh_p: 12949 case Intrinsic::ppc_altivec_vcmpequw_p: 12950 case Intrinsic::ppc_altivec_vcmpequd_p: 12951 case Intrinsic::ppc_altivec_vcmpgefp_p: 12952 case Intrinsic::ppc_altivec_vcmpgtfp_p: 12953 case Intrinsic::ppc_altivec_vcmpgtsb_p: 12954 case Intrinsic::ppc_altivec_vcmpgtsh_p: 12955 case Intrinsic::ppc_altivec_vcmpgtsw_p: 12956 case Intrinsic::ppc_altivec_vcmpgtsd_p: 12957 case Intrinsic::ppc_altivec_vcmpgtub_p: 12958 case Intrinsic::ppc_altivec_vcmpgtuh_p: 12959 case Intrinsic::ppc_altivec_vcmpgtuw_p: 12960 case Intrinsic::ppc_altivec_vcmpgtud_p: 12961 Known.Zero = ~1U; // All bits but the low one are known to be zero. 12962 break; 12963 } 12964 } 12965 } 12966 } 12967 12968 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 12969 switch (Subtarget.getDarwinDirective()) { 12970 default: break; 12971 case PPC::DIR_970: 12972 case PPC::DIR_PWR4: 12973 case PPC::DIR_PWR5: 12974 case PPC::DIR_PWR5X: 12975 case PPC::DIR_PWR6: 12976 case PPC::DIR_PWR6X: 12977 case PPC::DIR_PWR7: 12978 case PPC::DIR_PWR8: 12979 case PPC::DIR_PWR9: { 12980 if (!ML) 12981 break; 12982 12983 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 12984 12985 // For small loops (between 5 and 8 instructions), align to a 32-byte 12986 // boundary so that the entire loop fits in one instruction-cache line. 12987 uint64_t LoopSize = 0; 12988 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 12989 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 12990 LoopSize += TII->getInstSizeInBytes(*J); 12991 if (LoopSize > 32) 12992 break; 12993 } 12994 12995 if (LoopSize > 16 && LoopSize <= 32) 12996 return 5; 12997 12998 break; 12999 } 13000 } 13001 13002 return TargetLowering::getPrefLoopAlignment(ML); 13003 } 13004 13005 /// getConstraintType - Given a constraint, return the type of 13006 /// constraint it is for this target. 13007 PPCTargetLowering::ConstraintType 13008 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 13009 if (Constraint.size() == 1) { 13010 switch (Constraint[0]) { 13011 default: break; 13012 case 'b': 13013 case 'r': 13014 case 'f': 13015 case 'd': 13016 case 'v': 13017 case 'y': 13018 return C_RegisterClass; 13019 case 'Z': 13020 // FIXME: While Z does indicate a memory constraint, it specifically 13021 // indicates an r+r address (used in conjunction with the 'y' modifier 13022 // in the replacement string). Currently, we're forcing the base 13023 // register to be r0 in the asm printer (which is interpreted as zero) 13024 // and forming the complete address in the second register. This is 13025 // suboptimal. 13026 return C_Memory; 13027 } 13028 } else if (Constraint == "wc") { // individual CR bits. 13029 return C_RegisterClass; 13030 } else if (Constraint == "wa" || Constraint == "wd" || 13031 Constraint == "wf" || Constraint == "ws") { 13032 return C_RegisterClass; // VSX registers. 13033 } 13034 return TargetLowering::getConstraintType(Constraint); 13035 } 13036 13037 /// Examine constraint type and operand type and determine a weight value. 13038 /// This object must already have been set up with the operand type 13039 /// and the current alternative constraint selected. 13040 TargetLowering::ConstraintWeight 13041 PPCTargetLowering::getSingleConstraintMatchWeight( 13042 AsmOperandInfo &info, const char *constraint) const { 13043 ConstraintWeight weight = CW_Invalid; 13044 Value *CallOperandVal = info.CallOperandVal; 13045 // If we don't have a value, we can't do a match, 13046 // but allow it at the lowest weight. 13047 if (!CallOperandVal) 13048 return CW_Default; 13049 Type *type = CallOperandVal->getType(); 13050 13051 // Look at the constraint type. 13052 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 13053 return CW_Register; // an individual CR bit. 13054 else if ((StringRef(constraint) == "wa" || 13055 StringRef(constraint) == "wd" || 13056 StringRef(constraint) == "wf") && 13057 type->isVectorTy()) 13058 return CW_Register; 13059 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 13060 return CW_Register; 13061 13062 switch (*constraint) { 13063 default: 13064 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 13065 break; 13066 case 'b': 13067 if (type->isIntegerTy()) 13068 weight = CW_Register; 13069 break; 13070 case 'f': 13071 if (type->isFloatTy()) 13072 weight = CW_Register; 13073 break; 13074 case 'd': 13075 if (type->isDoubleTy()) 13076 weight = CW_Register; 13077 break; 13078 case 'v': 13079 if (type->isVectorTy()) 13080 weight = CW_Register; 13081 break; 13082 case 'y': 13083 weight = CW_Register; 13084 break; 13085 case 'Z': 13086 weight = CW_Memory; 13087 break; 13088 } 13089 return weight; 13090 } 13091 13092 std::pair<unsigned, const TargetRegisterClass *> 13093 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 13094 StringRef Constraint, 13095 MVT VT) const { 13096 if (Constraint.size() == 1) { 13097 // GCC RS6000 Constraint Letters 13098 switch (Constraint[0]) { 13099 case 'b': // R1-R31 13100 if (VT == MVT::i64 && Subtarget.isPPC64()) 13101 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 13102 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 13103 case 'r': // R0-R31 13104 if (VT == MVT::i64 && Subtarget.isPPC64()) 13105 return std::make_pair(0U, &PPC::G8RCRegClass); 13106 return std::make_pair(0U, &PPC::GPRCRegClass); 13107 // 'd' and 'f' constraints are both defined to be "the floating point 13108 // registers", where one is for 32-bit and the other for 64-bit. We don't 13109 // really care overly much here so just give them all the same reg classes. 13110 case 'd': 13111 case 'f': 13112 if (VT == MVT::f32 || VT == MVT::i32) 13113 return std::make_pair(0U, &PPC::F4RCRegClass); 13114 if (VT == MVT::f64 || VT == MVT::i64) 13115 return std::make_pair(0U, &PPC::F8RCRegClass); 13116 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13117 return std::make_pair(0U, &PPC::QFRCRegClass); 13118 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13119 return std::make_pair(0U, &PPC::QSRCRegClass); 13120 break; 13121 case 'v': 13122 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13123 return std::make_pair(0U, &PPC::QFRCRegClass); 13124 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13125 return std::make_pair(0U, &PPC::QSRCRegClass); 13126 if (Subtarget.hasAltivec()) 13127 return std::make_pair(0U, &PPC::VRRCRegClass); 13128 break; 13129 case 'y': // crrc 13130 return std::make_pair(0U, &PPC::CRRCRegClass); 13131 } 13132 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 13133 // An individual CR bit. 13134 return std::make_pair(0U, &PPC::CRBITRCRegClass); 13135 } else if ((Constraint == "wa" || Constraint == "wd" || 13136 Constraint == "wf") && Subtarget.hasVSX()) { 13137 return std::make_pair(0U, &PPC::VSRCRegClass); 13138 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 13139 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 13140 return std::make_pair(0U, &PPC::VSSRCRegClass); 13141 else 13142 return std::make_pair(0U, &PPC::VSFRCRegClass); 13143 } 13144 13145 std::pair<unsigned, const TargetRegisterClass *> R = 13146 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 13147 13148 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 13149 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 13150 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 13151 // register. 13152 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 13153 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 13154 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 13155 PPC::GPRCRegClass.contains(R.first)) 13156 return std::make_pair(TRI->getMatchingSuperReg(R.first, 13157 PPC::sub_32, &PPC::G8RCRegClass), 13158 &PPC::G8RCRegClass); 13159 13160 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 13161 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 13162 R.first = PPC::CR0; 13163 R.second = &PPC::CRRCRegClass; 13164 } 13165 13166 return R; 13167 } 13168 13169 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 13170 /// vector. If it is invalid, don't add anything to Ops. 13171 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 13172 std::string &Constraint, 13173 std::vector<SDValue>&Ops, 13174 SelectionDAG &DAG) const { 13175 SDValue Result; 13176 13177 // Only support length 1 constraints. 13178 if (Constraint.length() > 1) return; 13179 13180 char Letter = Constraint[0]; 13181 switch (Letter) { 13182 default: break; 13183 case 'I': 13184 case 'J': 13185 case 'K': 13186 case 'L': 13187 case 'M': 13188 case 'N': 13189 case 'O': 13190 case 'P': { 13191 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 13192 if (!CST) return; // Must be an immediate to match. 13193 SDLoc dl(Op); 13194 int64_t Value = CST->getSExtValue(); 13195 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 13196 // numbers are printed as such. 13197 switch (Letter) { 13198 default: llvm_unreachable("Unknown constraint letter!"); 13199 case 'I': // "I" is a signed 16-bit constant. 13200 if (isInt<16>(Value)) 13201 Result = DAG.getTargetConstant(Value, dl, TCVT); 13202 break; 13203 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 13204 if (isShiftedUInt<16, 16>(Value)) 13205 Result = DAG.getTargetConstant(Value, dl, TCVT); 13206 break; 13207 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 13208 if (isShiftedInt<16, 16>(Value)) 13209 Result = DAG.getTargetConstant(Value, dl, TCVT); 13210 break; 13211 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 13212 if (isUInt<16>(Value)) 13213 Result = DAG.getTargetConstant(Value, dl, TCVT); 13214 break; 13215 case 'M': // "M" is a constant that is greater than 31. 13216 if (Value > 31) 13217 Result = DAG.getTargetConstant(Value, dl, TCVT); 13218 break; 13219 case 'N': // "N" is a positive constant that is an exact power of two. 13220 if (Value > 0 && isPowerOf2_64(Value)) 13221 Result = DAG.getTargetConstant(Value, dl, TCVT); 13222 break; 13223 case 'O': // "O" is the constant zero. 13224 if (Value == 0) 13225 Result = DAG.getTargetConstant(Value, dl, TCVT); 13226 break; 13227 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 13228 if (isInt<16>(-Value)) 13229 Result = DAG.getTargetConstant(Value, dl, TCVT); 13230 break; 13231 } 13232 break; 13233 } 13234 } 13235 13236 if (Result.getNode()) { 13237 Ops.push_back(Result); 13238 return; 13239 } 13240 13241 // Handle standard constraint letters. 13242 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 13243 } 13244 13245 // isLegalAddressingMode - Return true if the addressing mode represented 13246 // by AM is legal for this target, for a load/store of the specified type. 13247 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 13248 const AddrMode &AM, Type *Ty, 13249 unsigned AS, Instruction *I) const { 13250 // PPC does not allow r+i addressing modes for vectors! 13251 if (Ty->isVectorTy() && AM.BaseOffs != 0) 13252 return false; 13253 13254 // PPC allows a sign-extended 16-bit immediate field. 13255 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 13256 return false; 13257 13258 // No global is ever allowed as a base. 13259 if (AM.BaseGV) 13260 return false; 13261 13262 // PPC only support r+r, 13263 switch (AM.Scale) { 13264 case 0: // "r+i" or just "i", depending on HasBaseReg. 13265 break; 13266 case 1: 13267 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 13268 return false; 13269 // Otherwise we have r+r or r+i. 13270 break; 13271 case 2: 13272 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 13273 return false; 13274 // Allow 2*r as r+r. 13275 break; 13276 default: 13277 // No other scales are supported. 13278 return false; 13279 } 13280 13281 return true; 13282 } 13283 13284 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 13285 SelectionDAG &DAG) const { 13286 MachineFunction &MF = DAG.getMachineFunction(); 13287 MachineFrameInfo &MFI = MF.getFrameInfo(); 13288 MFI.setReturnAddressIsTaken(true); 13289 13290 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 13291 return SDValue(); 13292 13293 SDLoc dl(Op); 13294 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13295 13296 // Make sure the function does not optimize away the store of the RA to 13297 // the stack. 13298 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 13299 FuncInfo->setLRStoreRequired(); 13300 bool isPPC64 = Subtarget.isPPC64(); 13301 auto PtrVT = getPointerTy(MF.getDataLayout()); 13302 13303 if (Depth > 0) { 13304 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 13305 SDValue Offset = 13306 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 13307 isPPC64 ? MVT::i64 : MVT::i32); 13308 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 13309 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 13310 MachinePointerInfo()); 13311 } 13312 13313 // Just load the return address off the stack. 13314 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 13315 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 13316 MachinePointerInfo()); 13317 } 13318 13319 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 13320 SelectionDAG &DAG) const { 13321 SDLoc dl(Op); 13322 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13323 13324 MachineFunction &MF = DAG.getMachineFunction(); 13325 MachineFrameInfo &MFI = MF.getFrameInfo(); 13326 MFI.setFrameAddressIsTaken(true); 13327 13328 EVT PtrVT = getPointerTy(MF.getDataLayout()); 13329 bool isPPC64 = PtrVT == MVT::i64; 13330 13331 // Naked functions never have a frame pointer, and so we use r1. For all 13332 // other functions, this decision must be delayed until during PEI. 13333 unsigned FrameReg; 13334 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 13335 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 13336 else 13337 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 13338 13339 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 13340 PtrVT); 13341 while (Depth--) 13342 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 13343 FrameAddr, MachinePointerInfo()); 13344 return FrameAddr; 13345 } 13346 13347 // FIXME? Maybe this could be a TableGen attribute on some registers and 13348 // this table could be generated automatically from RegInfo. 13349 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 13350 SelectionDAG &DAG) const { 13351 bool isPPC64 = Subtarget.isPPC64(); 13352 bool isDarwinABI = Subtarget.isDarwinABI(); 13353 13354 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 13355 (!isPPC64 && VT != MVT::i32)) 13356 report_fatal_error("Invalid register global variable type"); 13357 13358 bool is64Bit = isPPC64 && VT == MVT::i64; 13359 unsigned Reg = StringSwitch<unsigned>(RegName) 13360 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 13361 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 13362 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 13363 (is64Bit ? PPC::X13 : PPC::R13)) 13364 .Default(0); 13365 13366 if (Reg) 13367 return Reg; 13368 report_fatal_error("Invalid register name global variable"); 13369 } 13370 13371 bool 13372 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 13373 // The PowerPC target isn't yet aware of offsets. 13374 return false; 13375 } 13376 13377 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 13378 const CallInst &I, 13379 MachineFunction &MF, 13380 unsigned Intrinsic) const { 13381 switch (Intrinsic) { 13382 case Intrinsic::ppc_qpx_qvlfd: 13383 case Intrinsic::ppc_qpx_qvlfs: 13384 case Intrinsic::ppc_qpx_qvlfcd: 13385 case Intrinsic::ppc_qpx_qvlfcs: 13386 case Intrinsic::ppc_qpx_qvlfiwa: 13387 case Intrinsic::ppc_qpx_qvlfiwz: 13388 case Intrinsic::ppc_altivec_lvx: 13389 case Intrinsic::ppc_altivec_lvxl: 13390 case Intrinsic::ppc_altivec_lvebx: 13391 case Intrinsic::ppc_altivec_lvehx: 13392 case Intrinsic::ppc_altivec_lvewx: 13393 case Intrinsic::ppc_vsx_lxvd2x: 13394 case Intrinsic::ppc_vsx_lxvw4x: { 13395 EVT VT; 13396 switch (Intrinsic) { 13397 case Intrinsic::ppc_altivec_lvebx: 13398 VT = MVT::i8; 13399 break; 13400 case Intrinsic::ppc_altivec_lvehx: 13401 VT = MVT::i16; 13402 break; 13403 case Intrinsic::ppc_altivec_lvewx: 13404 VT = MVT::i32; 13405 break; 13406 case Intrinsic::ppc_vsx_lxvd2x: 13407 VT = MVT::v2f64; 13408 break; 13409 case Intrinsic::ppc_qpx_qvlfd: 13410 VT = MVT::v4f64; 13411 break; 13412 case Intrinsic::ppc_qpx_qvlfs: 13413 VT = MVT::v4f32; 13414 break; 13415 case Intrinsic::ppc_qpx_qvlfcd: 13416 VT = MVT::v2f64; 13417 break; 13418 case Intrinsic::ppc_qpx_qvlfcs: 13419 VT = MVT::v2f32; 13420 break; 13421 default: 13422 VT = MVT::v4i32; 13423 break; 13424 } 13425 13426 Info.opc = ISD::INTRINSIC_W_CHAIN; 13427 Info.memVT = VT; 13428 Info.ptrVal = I.getArgOperand(0); 13429 Info.offset = -VT.getStoreSize()+1; 13430 Info.size = 2*VT.getStoreSize()-1; 13431 Info.align = 1; 13432 Info.flags = MachineMemOperand::MOLoad; 13433 return true; 13434 } 13435 case Intrinsic::ppc_qpx_qvlfda: 13436 case Intrinsic::ppc_qpx_qvlfsa: 13437 case Intrinsic::ppc_qpx_qvlfcda: 13438 case Intrinsic::ppc_qpx_qvlfcsa: 13439 case Intrinsic::ppc_qpx_qvlfiwaa: 13440 case Intrinsic::ppc_qpx_qvlfiwza: { 13441 EVT VT; 13442 switch (Intrinsic) { 13443 case Intrinsic::ppc_qpx_qvlfda: 13444 VT = MVT::v4f64; 13445 break; 13446 case Intrinsic::ppc_qpx_qvlfsa: 13447 VT = MVT::v4f32; 13448 break; 13449 case Intrinsic::ppc_qpx_qvlfcda: 13450 VT = MVT::v2f64; 13451 break; 13452 case Intrinsic::ppc_qpx_qvlfcsa: 13453 VT = MVT::v2f32; 13454 break; 13455 default: 13456 VT = MVT::v4i32; 13457 break; 13458 } 13459 13460 Info.opc = ISD::INTRINSIC_W_CHAIN; 13461 Info.memVT = VT; 13462 Info.ptrVal = I.getArgOperand(0); 13463 Info.offset = 0; 13464 Info.size = VT.getStoreSize(); 13465 Info.align = 1; 13466 Info.flags = MachineMemOperand::MOLoad; 13467 return true; 13468 } 13469 case Intrinsic::ppc_qpx_qvstfd: 13470 case Intrinsic::ppc_qpx_qvstfs: 13471 case Intrinsic::ppc_qpx_qvstfcd: 13472 case Intrinsic::ppc_qpx_qvstfcs: 13473 case Intrinsic::ppc_qpx_qvstfiw: 13474 case Intrinsic::ppc_altivec_stvx: 13475 case Intrinsic::ppc_altivec_stvxl: 13476 case Intrinsic::ppc_altivec_stvebx: 13477 case Intrinsic::ppc_altivec_stvehx: 13478 case Intrinsic::ppc_altivec_stvewx: 13479 case Intrinsic::ppc_vsx_stxvd2x: 13480 case Intrinsic::ppc_vsx_stxvw4x: { 13481 EVT VT; 13482 switch (Intrinsic) { 13483 case Intrinsic::ppc_altivec_stvebx: 13484 VT = MVT::i8; 13485 break; 13486 case Intrinsic::ppc_altivec_stvehx: 13487 VT = MVT::i16; 13488 break; 13489 case Intrinsic::ppc_altivec_stvewx: 13490 VT = MVT::i32; 13491 break; 13492 case Intrinsic::ppc_vsx_stxvd2x: 13493 VT = MVT::v2f64; 13494 break; 13495 case Intrinsic::ppc_qpx_qvstfd: 13496 VT = MVT::v4f64; 13497 break; 13498 case Intrinsic::ppc_qpx_qvstfs: 13499 VT = MVT::v4f32; 13500 break; 13501 case Intrinsic::ppc_qpx_qvstfcd: 13502 VT = MVT::v2f64; 13503 break; 13504 case Intrinsic::ppc_qpx_qvstfcs: 13505 VT = MVT::v2f32; 13506 break; 13507 default: 13508 VT = MVT::v4i32; 13509 break; 13510 } 13511 13512 Info.opc = ISD::INTRINSIC_VOID; 13513 Info.memVT = VT; 13514 Info.ptrVal = I.getArgOperand(1); 13515 Info.offset = -VT.getStoreSize()+1; 13516 Info.size = 2*VT.getStoreSize()-1; 13517 Info.align = 1; 13518 Info.flags = MachineMemOperand::MOStore; 13519 return true; 13520 } 13521 case Intrinsic::ppc_qpx_qvstfda: 13522 case Intrinsic::ppc_qpx_qvstfsa: 13523 case Intrinsic::ppc_qpx_qvstfcda: 13524 case Intrinsic::ppc_qpx_qvstfcsa: 13525 case Intrinsic::ppc_qpx_qvstfiwa: { 13526 EVT VT; 13527 switch (Intrinsic) { 13528 case Intrinsic::ppc_qpx_qvstfda: 13529 VT = MVT::v4f64; 13530 break; 13531 case Intrinsic::ppc_qpx_qvstfsa: 13532 VT = MVT::v4f32; 13533 break; 13534 case Intrinsic::ppc_qpx_qvstfcda: 13535 VT = MVT::v2f64; 13536 break; 13537 case Intrinsic::ppc_qpx_qvstfcsa: 13538 VT = MVT::v2f32; 13539 break; 13540 default: 13541 VT = MVT::v4i32; 13542 break; 13543 } 13544 13545 Info.opc = ISD::INTRINSIC_VOID; 13546 Info.memVT = VT; 13547 Info.ptrVal = I.getArgOperand(1); 13548 Info.offset = 0; 13549 Info.size = VT.getStoreSize(); 13550 Info.align = 1; 13551 Info.flags = MachineMemOperand::MOStore; 13552 return true; 13553 } 13554 default: 13555 break; 13556 } 13557 13558 return false; 13559 } 13560 13561 /// getOptimalMemOpType - Returns the target specific optimal type for load 13562 /// and store operations as a result of memset, memcpy, and memmove 13563 /// lowering. If DstAlign is zero that means it's safe to destination 13564 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 13565 /// means there isn't a need to check it against alignment requirement, 13566 /// probably because the source does not need to be loaded. If 'IsMemset' is 13567 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 13568 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 13569 /// source is constant so it does not need to be loaded. 13570 /// It returns EVT::Other if the type should be determined using generic 13571 /// target-independent logic. 13572 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 13573 unsigned DstAlign, unsigned SrcAlign, 13574 bool IsMemset, bool ZeroMemset, 13575 bool MemcpyStrSrc, 13576 MachineFunction &MF) const { 13577 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 13578 const Function &F = MF.getFunction(); 13579 // When expanding a memset, require at least two QPX instructions to cover 13580 // the cost of loading the value to be stored from the constant pool. 13581 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 13582 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 13583 !F.hasFnAttribute(Attribute::NoImplicitFloat)) { 13584 return MVT::v4f64; 13585 } 13586 13587 // We should use Altivec/VSX loads and stores when available. For unaligned 13588 // addresses, unaligned VSX loads are only fast starting with the P8. 13589 if (Subtarget.hasAltivec() && Size >= 16 && 13590 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 13591 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 13592 return MVT::v4i32; 13593 } 13594 13595 if (Subtarget.isPPC64()) { 13596 return MVT::i64; 13597 } 13598 13599 return MVT::i32; 13600 } 13601 13602 /// \brief Returns true if it is beneficial to convert a load of a constant 13603 /// to just the constant itself. 13604 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 13605 Type *Ty) const { 13606 assert(Ty->isIntegerTy()); 13607 13608 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 13609 return !(BitSize == 0 || BitSize > 64); 13610 } 13611 13612 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 13613 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 13614 return false; 13615 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 13616 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 13617 return NumBits1 == 64 && NumBits2 == 32; 13618 } 13619 13620 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 13621 if (!VT1.isInteger() || !VT2.isInteger()) 13622 return false; 13623 unsigned NumBits1 = VT1.getSizeInBits(); 13624 unsigned NumBits2 = VT2.getSizeInBits(); 13625 return NumBits1 == 64 && NumBits2 == 32; 13626 } 13627 13628 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 13629 // Generally speaking, zexts are not free, but they are free when they can be 13630 // folded with other operations. 13631 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 13632 EVT MemVT = LD->getMemoryVT(); 13633 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 13634 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 13635 (LD->getExtensionType() == ISD::NON_EXTLOAD || 13636 LD->getExtensionType() == ISD::ZEXTLOAD)) 13637 return true; 13638 } 13639 13640 // FIXME: Add other cases... 13641 // - 32-bit shifts with a zext to i64 13642 // - zext after ctlz, bswap, etc. 13643 // - zext after and by a constant mask 13644 13645 return TargetLowering::isZExtFree(Val, VT2); 13646 } 13647 13648 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 13649 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 13650 "invalid fpext types"); 13651 return true; 13652 } 13653 13654 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 13655 return isInt<16>(Imm) || isUInt<16>(Imm); 13656 } 13657 13658 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 13659 return isInt<16>(Imm) || isUInt<16>(Imm); 13660 } 13661 13662 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 13663 unsigned, 13664 unsigned, 13665 bool *Fast) const { 13666 if (DisablePPCUnaligned) 13667 return false; 13668 13669 // PowerPC supports unaligned memory access for simple non-vector types. 13670 // Although accessing unaligned addresses is not as efficient as accessing 13671 // aligned addresses, it is generally more efficient than manual expansion, 13672 // and generally only traps for software emulation when crossing page 13673 // boundaries. 13674 13675 if (!VT.isSimple()) 13676 return false; 13677 13678 if (VT.getSimpleVT().isVector()) { 13679 if (Subtarget.hasVSX()) { 13680 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 13681 VT != MVT::v4f32 && VT != MVT::v4i32) 13682 return false; 13683 } else { 13684 return false; 13685 } 13686 } 13687 13688 if (VT == MVT::ppcf128) 13689 return false; 13690 13691 if (Fast) 13692 *Fast = true; 13693 13694 return true; 13695 } 13696 13697 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 13698 VT = VT.getScalarType(); 13699 13700 if (!VT.isSimple()) 13701 return false; 13702 13703 switch (VT.getSimpleVT().SimpleTy) { 13704 case MVT::f32: 13705 case MVT::f64: 13706 return true; 13707 default: 13708 break; 13709 } 13710 13711 return false; 13712 } 13713 13714 const MCPhysReg * 13715 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 13716 // LR is a callee-save register, but we must treat it as clobbered by any call 13717 // site. Hence we include LR in the scratch registers, which are in turn added 13718 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 13719 // to CTR, which is used by any indirect call. 13720 static const MCPhysReg ScratchRegs[] = { 13721 PPC::X12, PPC::LR8, PPC::CTR8, 0 13722 }; 13723 13724 return ScratchRegs; 13725 } 13726 13727 unsigned PPCTargetLowering::getExceptionPointerRegister( 13728 const Constant *PersonalityFn) const { 13729 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 13730 } 13731 13732 unsigned PPCTargetLowering::getExceptionSelectorRegister( 13733 const Constant *PersonalityFn) const { 13734 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 13735 } 13736 13737 bool 13738 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 13739 EVT VT , unsigned DefinedValues) const { 13740 if (VT == MVT::v2i64) 13741 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 13742 13743 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 13744 return true; 13745 13746 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 13747 } 13748 13749 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 13750 if (DisableILPPref || Subtarget.enableMachineScheduler()) 13751 return TargetLowering::getSchedulingPreference(N); 13752 13753 return Sched::ILP; 13754 } 13755 13756 // Create a fast isel object. 13757 FastISel * 13758 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 13759 const TargetLibraryInfo *LibInfo) const { 13760 return PPC::createFastISel(FuncInfo, LibInfo); 13761 } 13762 13763 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 13764 if (Subtarget.isDarwinABI()) return; 13765 if (!Subtarget.isPPC64()) return; 13766 13767 // Update IsSplitCSR in PPCFunctionInfo 13768 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 13769 PFI->setIsSplitCSR(true); 13770 } 13771 13772 void PPCTargetLowering::insertCopiesSplitCSR( 13773 MachineBasicBlock *Entry, 13774 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 13775 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 13776 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 13777 if (!IStart) 13778 return; 13779 13780 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 13781 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 13782 MachineBasicBlock::iterator MBBI = Entry->begin(); 13783 for (const MCPhysReg *I = IStart; *I; ++I) { 13784 const TargetRegisterClass *RC = nullptr; 13785 if (PPC::G8RCRegClass.contains(*I)) 13786 RC = &PPC::G8RCRegClass; 13787 else if (PPC::F8RCRegClass.contains(*I)) 13788 RC = &PPC::F8RCRegClass; 13789 else if (PPC::CRRCRegClass.contains(*I)) 13790 RC = &PPC::CRRCRegClass; 13791 else if (PPC::VRRCRegClass.contains(*I)) 13792 RC = &PPC::VRRCRegClass; 13793 else 13794 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 13795 13796 unsigned NewVR = MRI->createVirtualRegister(RC); 13797 // Create copy from CSR to a virtual register. 13798 // FIXME: this currently does not emit CFI pseudo-instructions, it works 13799 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 13800 // nounwind. If we want to generalize this later, we may need to emit 13801 // CFI pseudo-instructions. 13802 assert(Entry->getParent()->getFunction().hasFnAttribute( 13803 Attribute::NoUnwind) && 13804 "Function should be nounwind in insertCopiesSplitCSR!"); 13805 Entry->addLiveIn(*I); 13806 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 13807 .addReg(*I); 13808 13809 // Insert the copy-back instructions right before the terminator 13810 for (auto *Exit : Exits) 13811 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 13812 TII->get(TargetOpcode::COPY), *I) 13813 .addReg(NewVR); 13814 } 13815 } 13816 13817 // Override to enable LOAD_STACK_GUARD lowering on Linux. 13818 bool PPCTargetLowering::useLoadStackGuardNode() const { 13819 if (!Subtarget.isTargetLinux()) 13820 return TargetLowering::useLoadStackGuardNode(); 13821 return true; 13822 } 13823 13824 // Override to disable global variable loading on Linux. 13825 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 13826 if (!Subtarget.isTargetLinux()) 13827 return TargetLowering::insertSSPDeclarations(M); 13828 } 13829 13830 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 13831 if (!VT.isSimple() || !Subtarget.hasVSX()) 13832 return false; 13833 13834 switch(VT.getSimpleVT().SimpleTy) { 13835 default: 13836 // For FP types that are currently not supported by PPC backend, return 13837 // false. Examples: f16, f80. 13838 return false; 13839 case MVT::f32: 13840 case MVT::f64: 13841 case MVT::ppcf128: 13842 return Imm.isPosZero(); 13843 } 13844 } 13845 13846 // For vector shift operation op, fold 13847 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 13848 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 13849 SelectionDAG &DAG) { 13850 SDValue N0 = N->getOperand(0); 13851 SDValue N1 = N->getOperand(1); 13852 EVT VT = N0.getValueType(); 13853 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 13854 unsigned Opcode = N->getOpcode(); 13855 unsigned TargetOpcode; 13856 13857 switch (Opcode) { 13858 default: 13859 llvm_unreachable("Unexpected shift operation"); 13860 case ISD::SHL: 13861 TargetOpcode = PPCISD::SHL; 13862 break; 13863 case ISD::SRL: 13864 TargetOpcode = PPCISD::SRL; 13865 break; 13866 case ISD::SRA: 13867 TargetOpcode = PPCISD::SRA; 13868 break; 13869 } 13870 13871 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 13872 N1->getOpcode() == ISD::AND) 13873 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 13874 if (Mask->getZExtValue() == OpSizeInBits - 1) 13875 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 13876 13877 return SDValue(); 13878 } 13879 13880 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 13881 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13882 return Value; 13883 13884 return SDValue(); 13885 } 13886 13887 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 13888 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13889 return Value; 13890 13891 return SDValue(); 13892 } 13893 13894 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 13895 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13896 return Value; 13897 13898 return SDValue(); 13899 } 13900 13901 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 13902 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 13903 if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64()) 13904 return false; 13905 13906 // If not a tail call then no need to proceed. 13907 if (!CI->isTailCall()) 13908 return false; 13909 13910 // If tail calls are disabled for the caller then we are done. 13911 const Function *Caller = CI->getParent()->getParent(); 13912 auto Attr = Caller->getFnAttribute("disable-tail-calls"); 13913 if (Attr.getValueAsString() == "true") 13914 return false; 13915 13916 // If sibling calls have been disabled and tail-calls aren't guaranteed 13917 // there is no reason to duplicate. 13918 auto &TM = getTargetMachine(); 13919 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 13920 return false; 13921 13922 // Can't tail call a function called indirectly, or if it has variadic args. 13923 const Function *Callee = CI->getCalledFunction(); 13924 if (!Callee || Callee->isVarArg()) 13925 return false; 13926 13927 // Make sure the callee and caller calling conventions are eligible for tco. 13928 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 13929 CI->getCallingConv())) 13930 return false; 13931 13932 // If the function is local then we have a good chance at tail-calling it 13933 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 13934 } 13935