1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCCCState.h" 18 #include "PPCCallingConv.h" 19 #include "PPCFrameLowering.h" 20 #include "PPCInstrInfo.h" 21 #include "PPCMachineFunctionInfo.h" 22 #include "PPCPerfectShuffle.h" 23 #include "PPCRegisterInfo.h" 24 #include "PPCSubtarget.h" 25 #include "PPCTargetMachine.h" 26 #include "llvm/ADT/APFloat.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallPtrSet.h" 33 #include "llvm/ADT/SmallSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/ISDOpcodes.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFrameInfo.h" 42 #include "llvm/CodeGen/MachineFunction.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineJumpTableInfo.h" 46 #include "llvm/CodeGen/MachineLoopInfo.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetRegisterInfo.h" 56 #include "llvm/CodeGen/ValueTypes.h" 57 #include "llvm/IR/CallSite.h" 58 #include "llvm/IR/CallingConv.h" 59 #include "llvm/IR/Constant.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/DataLayout.h" 62 #include "llvm/IR/DebugLoc.h" 63 #include "llvm/IR/DerivedTypes.h" 64 #include "llvm/IR/Function.h" 65 #include "llvm/IR/GlobalValue.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/Intrinsics.h" 69 #include "llvm/IR/Module.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/MC/MCExpr.h" 74 #include "llvm/MC/MCRegisterInfo.h" 75 #include "llvm/Support/AtomicOrdering.h" 76 #include "llvm/Support/BranchProbability.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CodeGen.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/Debug.h" 82 #include "llvm/Support/ErrorHandling.h" 83 #include "llvm/Support/Format.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MachineValueType.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include "llvm/Target/TargetMachine.h" 89 #include "llvm/Target/TargetOptions.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 100 #define DEBUG_TYPE "ppc-lowering" 101 102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 104 105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 107 108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 110 111 static cl::opt<bool> DisableSCO("disable-ppc-sco", 112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 113 114 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision", 115 cl::desc("enable quad precision float support on ppc"), cl::Hidden); 116 117 STATISTIC(NumTailCalls, "Number of tail calls"); 118 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 119 120 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 121 122 // FIXME: Remove this once the bug has been fixed! 123 extern cl::opt<bool> ANDIGlueBug; 124 125 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 126 const PPCSubtarget &STI) 127 : TargetLowering(TM), Subtarget(STI) { 128 // Use _setjmp/_longjmp instead of setjmp/longjmp. 129 setUseUnderscoreSetJmp(true); 130 setUseUnderscoreLongJmp(true); 131 132 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 133 // arguments are at least 4/8 bytes aligned. 134 bool isPPC64 = Subtarget.isPPC64(); 135 setMinStackArgumentAlignment(isPPC64 ? 8:4); 136 137 // Set up the register classes. 138 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 139 if (!useSoftFloat()) { 140 if (hasSPE()) { 141 addRegisterClass(MVT::f32, &PPC::SPE4RCRegClass); 142 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 143 } else { 144 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 145 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 146 } 147 } 148 149 // Match BITREVERSE to customized fast code sequence in the td file. 150 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 151 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 152 153 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 154 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 155 156 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 157 for (MVT VT : MVT::integer_valuetypes()) { 158 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 159 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 160 } 161 162 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 163 164 // PowerPC has pre-inc load and store's. 165 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 166 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 167 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 168 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 169 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 170 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 171 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 172 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 173 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 174 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 175 if (!Subtarget.hasSPE()) { 176 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 177 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 178 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 179 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 180 } 181 182 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 183 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 184 for (MVT VT : ScalarIntVTs) { 185 setOperationAction(ISD::ADDC, VT, Legal); 186 setOperationAction(ISD::ADDE, VT, Legal); 187 setOperationAction(ISD::SUBC, VT, Legal); 188 setOperationAction(ISD::SUBE, VT, Legal); 189 } 190 191 if (Subtarget.useCRBits()) { 192 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 193 194 if (isPPC64 || Subtarget.hasFPCVT()) { 195 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 196 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 197 isPPC64 ? MVT::i64 : MVT::i32); 198 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 199 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 200 isPPC64 ? MVT::i64 : MVT::i32); 201 } else { 202 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 203 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 204 } 205 206 // PowerPC does not support direct load/store of condition registers. 207 setOperationAction(ISD::LOAD, MVT::i1, Custom); 208 setOperationAction(ISD::STORE, MVT::i1, Custom); 209 210 // FIXME: Remove this once the ANDI glue bug is fixed: 211 if (ANDIGlueBug) 212 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 213 214 for (MVT VT : MVT::integer_valuetypes()) { 215 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 216 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 217 setTruncStoreAction(VT, MVT::i1, Expand); 218 } 219 220 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 221 } 222 223 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 224 // PPC (the libcall is not available). 225 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 226 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 227 228 // We do not currently implement these libm ops for PowerPC. 229 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 230 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 231 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 232 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 233 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 234 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 235 236 // PowerPC has no SREM/UREM instructions unless we are on P9 237 // On P9 we may use a hardware instruction to compute the remainder. 238 // The instructions are not legalized directly because in the cases where the 239 // result of both the remainder and the division is required it is more 240 // efficient to compute the remainder from the result of the division rather 241 // than use the remainder instruction. 242 if (Subtarget.isISA3_0()) { 243 setOperationAction(ISD::SREM, MVT::i32, Custom); 244 setOperationAction(ISD::UREM, MVT::i32, Custom); 245 setOperationAction(ISD::SREM, MVT::i64, Custom); 246 setOperationAction(ISD::UREM, MVT::i64, Custom); 247 } else { 248 setOperationAction(ISD::SREM, MVT::i32, Expand); 249 setOperationAction(ISD::UREM, MVT::i32, Expand); 250 setOperationAction(ISD::SREM, MVT::i64, Expand); 251 setOperationAction(ISD::UREM, MVT::i64, Expand); 252 } 253 254 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 255 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 256 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 257 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 258 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 259 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 260 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 261 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 262 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 263 264 // We don't support sin/cos/sqrt/fmod/pow 265 setOperationAction(ISD::FSIN , MVT::f64, Expand); 266 setOperationAction(ISD::FCOS , MVT::f64, Expand); 267 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 268 setOperationAction(ISD::FREM , MVT::f64, Expand); 269 setOperationAction(ISD::FPOW , MVT::f64, Expand); 270 setOperationAction(ISD::FSIN , MVT::f32, Expand); 271 setOperationAction(ISD::FCOS , MVT::f32, Expand); 272 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 273 setOperationAction(ISD::FREM , MVT::f32, Expand); 274 setOperationAction(ISD::FPOW , MVT::f32, Expand); 275 if (Subtarget.hasSPE()) { 276 setOperationAction(ISD::FMA , MVT::f64, Expand); 277 setOperationAction(ISD::FMA , MVT::f32, Expand); 278 } else { 279 setOperationAction(ISD::FMA , MVT::f64, Legal); 280 setOperationAction(ISD::FMA , MVT::f32, Legal); 281 } 282 283 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 284 285 // If we're enabling GP optimizations, use hardware square root 286 if (!Subtarget.hasFSQRT() && 287 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 288 Subtarget.hasFRE())) 289 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 290 291 if (!Subtarget.hasFSQRT() && 292 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 293 Subtarget.hasFRES())) 294 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 295 296 if (Subtarget.hasFCPSGN()) { 297 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 298 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 299 } else { 300 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 301 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 302 } 303 304 if (Subtarget.hasFPRND()) { 305 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 306 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 307 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 308 setOperationAction(ISD::FROUND, MVT::f64, Legal); 309 310 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 311 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 312 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 313 setOperationAction(ISD::FROUND, MVT::f32, Legal); 314 } 315 316 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 317 // to speed up scalar BSWAP64. 318 // CTPOP or CTTZ were introduced in P8/P9 respectively 319 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 320 if (Subtarget.hasP9Vector()) 321 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 322 else 323 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 324 if (Subtarget.isISA3_0()) { 325 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 326 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 327 } else { 328 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 329 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 330 } 331 332 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 333 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 334 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 335 } else { 336 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 337 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 338 } 339 340 // PowerPC does not have ROTR 341 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 342 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 343 344 if (!Subtarget.useCRBits()) { 345 // PowerPC does not have Select 346 setOperationAction(ISD::SELECT, MVT::i32, Expand); 347 setOperationAction(ISD::SELECT, MVT::i64, Expand); 348 setOperationAction(ISD::SELECT, MVT::f32, Expand); 349 setOperationAction(ISD::SELECT, MVT::f64, Expand); 350 } 351 352 // PowerPC wants to turn select_cc of FP into fsel when possible. 353 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 354 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 355 356 // PowerPC wants to optimize integer setcc a bit 357 if (!Subtarget.useCRBits()) 358 setOperationAction(ISD::SETCC, MVT::i32, Custom); 359 360 // PowerPC does not have BRCOND which requires SetCC 361 if (!Subtarget.useCRBits()) 362 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 363 364 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 365 366 if (Subtarget.hasSPE()) { 367 // SPE has built-in conversions 368 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 369 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 370 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 371 } else { 372 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 373 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 374 375 // PowerPC does not have [U|S]INT_TO_FP 376 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 377 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 378 } 379 380 if (Subtarget.hasDirectMove() && isPPC64) { 381 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 382 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 383 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 384 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 385 } else { 386 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 387 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 388 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 389 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 390 } 391 392 // We cannot sextinreg(i1). Expand to shifts. 393 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 394 395 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 396 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 397 // support continuation, user-level threading, and etc.. As a result, no 398 // other SjLj exception interfaces are implemented and please don't build 399 // your own exception handling based on them. 400 // LLVM/Clang supports zero-cost DWARF exception handling. 401 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 402 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 403 404 // We want to legalize GlobalAddress and ConstantPool nodes into the 405 // appropriate instructions to materialize the address. 406 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 407 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 408 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 409 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 410 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 411 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 412 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 413 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 414 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 415 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 416 417 // TRAP is legal. 418 setOperationAction(ISD::TRAP, MVT::Other, Legal); 419 420 // TRAMPOLINE is custom lowered. 421 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 422 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 423 424 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 425 setOperationAction(ISD::VASTART , MVT::Other, Custom); 426 427 if (Subtarget.isSVR4ABI()) { 428 if (isPPC64) { 429 // VAARG always uses double-word chunks, so promote anything smaller. 430 setOperationAction(ISD::VAARG, MVT::i1, Promote); 431 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 432 setOperationAction(ISD::VAARG, MVT::i8, Promote); 433 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 434 setOperationAction(ISD::VAARG, MVT::i16, Promote); 435 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 436 setOperationAction(ISD::VAARG, MVT::i32, Promote); 437 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 438 setOperationAction(ISD::VAARG, MVT::Other, Expand); 439 } else { 440 // VAARG is custom lowered with the 32-bit SVR4 ABI. 441 setOperationAction(ISD::VAARG, MVT::Other, Custom); 442 setOperationAction(ISD::VAARG, MVT::i64, Custom); 443 } 444 } else 445 setOperationAction(ISD::VAARG, MVT::Other, Expand); 446 447 if (Subtarget.isSVR4ABI() && !isPPC64) 448 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 449 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 450 else 451 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 452 453 // Use the default implementation. 454 setOperationAction(ISD::VAEND , MVT::Other, Expand); 455 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 456 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 457 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 458 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 459 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 460 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 461 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 462 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 463 464 // We want to custom lower some of our intrinsics. 465 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 466 467 // To handle counter-based loop conditions. 468 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 469 470 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 471 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 472 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 473 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 474 475 // Comparisons that require checking two conditions. 476 if (Subtarget.hasSPE()) { 477 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 478 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 479 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 480 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 481 } 482 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 483 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 484 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 485 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 486 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 487 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 488 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 489 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 490 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 491 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 492 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 493 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 494 495 if (Subtarget.has64BitSupport()) { 496 // They also have instructions for converting between i64 and fp. 497 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 498 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 499 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 500 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 501 // This is just the low 32 bits of a (signed) fp->i64 conversion. 502 // We cannot do this with Promote because i64 is not a legal type. 503 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 504 505 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 506 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 507 } else { 508 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 509 if (Subtarget.hasSPE()) 510 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 511 else 512 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 513 } 514 515 // With the instructions enabled under FPCVT, we can do everything. 516 if (Subtarget.hasFPCVT()) { 517 if (Subtarget.has64BitSupport()) { 518 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 519 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 520 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 521 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 522 } 523 524 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 525 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 526 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 527 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 528 } 529 530 if (Subtarget.use64BitRegs()) { 531 // 64-bit PowerPC implementations can support i64 types directly 532 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 533 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 534 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 535 // 64-bit PowerPC wants to expand i128 shifts itself. 536 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 537 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 538 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 539 } else { 540 // 32-bit PowerPC wants to expand i64 shifts itself. 541 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 542 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 543 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 544 } 545 546 if (Subtarget.hasAltivec()) { 547 // First set operation action for all vector types to expand. Then we 548 // will selectively turn on ones that can be effectively codegen'd. 549 for (MVT VT : MVT::vector_valuetypes()) { 550 // add/sub are legal for all supported vector VT's. 551 setOperationAction(ISD::ADD, VT, Legal); 552 setOperationAction(ISD::SUB, VT, Legal); 553 setOperationAction(ISD::ABS, VT, Custom); 554 555 // Vector instructions introduced in P8 556 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 557 setOperationAction(ISD::CTPOP, VT, Legal); 558 setOperationAction(ISD::CTLZ, VT, Legal); 559 } 560 else { 561 setOperationAction(ISD::CTPOP, VT, Expand); 562 setOperationAction(ISD::CTLZ, VT, Expand); 563 } 564 565 // Vector instructions introduced in P9 566 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 567 setOperationAction(ISD::CTTZ, VT, Legal); 568 else 569 setOperationAction(ISD::CTTZ, VT, Expand); 570 571 // We promote all shuffles to v16i8. 572 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 573 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 574 575 // We promote all non-typed operations to v4i32. 576 setOperationAction(ISD::AND , VT, Promote); 577 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 578 setOperationAction(ISD::OR , VT, Promote); 579 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 580 setOperationAction(ISD::XOR , VT, Promote); 581 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 582 setOperationAction(ISD::LOAD , VT, Promote); 583 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 584 setOperationAction(ISD::SELECT, VT, Promote); 585 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 586 setOperationAction(ISD::VSELECT, VT, Legal); 587 setOperationAction(ISD::SELECT_CC, VT, Promote); 588 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 589 setOperationAction(ISD::STORE, VT, Promote); 590 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 591 592 // No other operations are legal. 593 setOperationAction(ISD::MUL , VT, Expand); 594 setOperationAction(ISD::SDIV, VT, Expand); 595 setOperationAction(ISD::SREM, VT, Expand); 596 setOperationAction(ISD::UDIV, VT, Expand); 597 setOperationAction(ISD::UREM, VT, Expand); 598 setOperationAction(ISD::FDIV, VT, Expand); 599 setOperationAction(ISD::FREM, VT, Expand); 600 setOperationAction(ISD::FNEG, VT, Expand); 601 setOperationAction(ISD::FSQRT, VT, Expand); 602 setOperationAction(ISD::FLOG, VT, Expand); 603 setOperationAction(ISD::FLOG10, VT, Expand); 604 setOperationAction(ISD::FLOG2, VT, Expand); 605 setOperationAction(ISD::FEXP, VT, Expand); 606 setOperationAction(ISD::FEXP2, VT, Expand); 607 setOperationAction(ISD::FSIN, VT, Expand); 608 setOperationAction(ISD::FCOS, VT, Expand); 609 setOperationAction(ISD::FABS, VT, Expand); 610 setOperationAction(ISD::FFLOOR, VT, Expand); 611 setOperationAction(ISD::FCEIL, VT, Expand); 612 setOperationAction(ISD::FTRUNC, VT, Expand); 613 setOperationAction(ISD::FRINT, VT, Expand); 614 setOperationAction(ISD::FNEARBYINT, VT, Expand); 615 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 616 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 617 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 618 setOperationAction(ISD::MULHU, VT, Expand); 619 setOperationAction(ISD::MULHS, VT, Expand); 620 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 621 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 622 setOperationAction(ISD::UDIVREM, VT, Expand); 623 setOperationAction(ISD::SDIVREM, VT, Expand); 624 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 625 setOperationAction(ISD::FPOW, VT, Expand); 626 setOperationAction(ISD::BSWAP, VT, Expand); 627 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 628 setOperationAction(ISD::ROTL, VT, Expand); 629 setOperationAction(ISD::ROTR, VT, Expand); 630 631 for (MVT InnerVT : MVT::vector_valuetypes()) { 632 setTruncStoreAction(VT, InnerVT, Expand); 633 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 634 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 635 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 636 } 637 } 638 639 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 640 // with merges, splats, etc. 641 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 642 643 setOperationAction(ISD::AND , MVT::v4i32, Legal); 644 setOperationAction(ISD::OR , MVT::v4i32, Legal); 645 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 646 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 647 setOperationAction(ISD::SELECT, MVT::v4i32, 648 Subtarget.useCRBits() ? Legal : Expand); 649 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 650 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 651 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 652 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 653 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 654 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 655 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 656 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 657 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 658 659 // Without hasP8Altivec set, v2i64 SMAX isn't available. 660 // But ABS custom lowering requires SMAX support. 661 if (!Subtarget.hasP8Altivec()) 662 setOperationAction(ISD::ABS, MVT::v2i64, Expand); 663 664 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 665 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 666 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 667 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 668 669 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 670 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 671 672 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 673 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 674 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 675 } 676 677 if (Subtarget.hasP8Altivec()) 678 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 679 else 680 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 681 682 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 683 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 684 685 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 686 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 687 688 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 689 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 690 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 691 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 692 693 // Altivec does not contain unordered floating-point compare instructions 694 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 695 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 696 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 697 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 698 699 if (Subtarget.hasVSX()) { 700 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 701 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 702 if (Subtarget.hasP8Vector()) { 703 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 704 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 705 } 706 if (Subtarget.hasDirectMove() && isPPC64) { 707 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 708 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 709 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 710 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 711 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 712 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 713 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 714 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 715 } 716 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 717 718 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 719 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 720 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 721 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 722 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 723 724 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 725 726 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 727 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 728 729 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 730 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 731 732 // Share the Altivec comparison restrictions. 733 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 734 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 735 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 736 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 737 738 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 739 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 740 741 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 742 743 if (Subtarget.hasP8Vector()) 744 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 745 746 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 747 748 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 749 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 750 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 751 752 if (Subtarget.hasP8Altivec()) { 753 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 754 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 755 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 756 757 // 128 bit shifts can be accomplished via 3 instructions for SHL and 758 // SRL, but not for SRA because of the instructions available: 759 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 760 // doing 761 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 762 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 763 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 764 765 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 766 } 767 else { 768 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 769 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 770 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 771 772 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 773 774 // VSX v2i64 only supports non-arithmetic operations. 775 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 776 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 777 } 778 779 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 780 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 781 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 782 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 783 784 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 785 786 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 787 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 788 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 789 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 790 791 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); 792 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); 793 794 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 795 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 796 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 797 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 798 799 if (Subtarget.hasDirectMove()) 800 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 801 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 802 803 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 804 } 805 806 if (Subtarget.hasP8Altivec()) { 807 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 808 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 809 } 810 811 if (Subtarget.hasP9Vector()) { 812 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 813 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 814 815 // 128 bit shifts can be accomplished via 3 instructions for SHL and 816 // SRL, but not for SRA because of the instructions available: 817 // VS{RL} and VS{RL}O. 818 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 819 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 820 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 821 822 if (EnableQuadPrecision) { 823 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 824 setOperationAction(ISD::FADD, MVT::f128, Legal); 825 setOperationAction(ISD::FSUB, MVT::f128, Legal); 826 setOperationAction(ISD::FDIV, MVT::f128, Legal); 827 setOperationAction(ISD::FMUL, MVT::f128, Legal); 828 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 829 // No extending loads to f128 on PPC. 830 for (MVT FPT : MVT::fp_valuetypes()) 831 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 832 setOperationAction(ISD::FMA, MVT::f128, Legal); 833 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 834 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 835 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 836 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 837 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 838 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 839 840 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 841 setOperationAction(ISD::FRINT, MVT::f128, Legal); 842 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 843 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 844 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 845 setOperationAction(ISD::FROUND, MVT::f128, Legal); 846 847 setOperationAction(ISD::SELECT, MVT::f128, Expand); 848 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 849 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 850 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 851 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 852 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 853 // No implementation for these ops for PowerPC. 854 setOperationAction(ISD::FSIN , MVT::f128, Expand); 855 setOperationAction(ISD::FCOS , MVT::f128, Expand); 856 setOperationAction(ISD::FPOW, MVT::f128, Expand); 857 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 858 setOperationAction(ISD::FREM, MVT::f128, Expand); 859 } 860 861 } 862 863 if (Subtarget.hasP9Altivec()) { 864 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 865 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 866 } 867 } 868 869 if (Subtarget.hasQPX()) { 870 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 871 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 872 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 873 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 874 875 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 876 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 877 878 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 879 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 880 881 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 882 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 883 884 if (!Subtarget.useCRBits()) 885 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 886 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 887 888 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 889 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 890 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 891 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 892 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 893 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 894 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 895 896 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 897 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 898 899 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 900 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 901 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 902 903 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 904 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 905 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 906 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 907 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 908 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 909 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 910 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 911 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 912 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 913 914 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 915 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 916 917 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 918 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 919 920 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 921 922 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 923 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 924 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 925 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 926 927 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 928 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 929 930 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 931 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 932 933 if (!Subtarget.useCRBits()) 934 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 935 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 936 937 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 938 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 939 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 940 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 941 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 942 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 943 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 944 945 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 946 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 947 948 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 949 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 950 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 951 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 952 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 953 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 954 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 955 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 956 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 957 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 958 959 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 960 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 961 962 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 963 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 964 965 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 966 967 setOperationAction(ISD::AND , MVT::v4i1, Legal); 968 setOperationAction(ISD::OR , MVT::v4i1, Legal); 969 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 970 971 if (!Subtarget.useCRBits()) 972 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 973 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 974 975 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 976 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 977 978 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 979 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 980 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 981 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 982 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 983 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 984 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 985 986 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 987 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 988 989 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 990 991 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 992 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 993 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 994 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 995 996 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 997 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 998 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 999 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 1000 1001 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 1002 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 1003 1004 // These need to set FE_INEXACT, and so cannot be vectorized here. 1005 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 1006 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 1007 1008 if (TM.Options.UnsafeFPMath) { 1009 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1010 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1011 1012 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 1013 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 1014 } else { 1015 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 1016 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 1017 1018 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 1019 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 1020 } 1021 } 1022 1023 if (Subtarget.has64BitSupport()) 1024 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1025 1026 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1027 1028 if (!isPPC64) { 1029 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1030 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1031 } 1032 1033 setBooleanContents(ZeroOrOneBooleanContent); 1034 1035 if (Subtarget.hasAltivec()) { 1036 // Altivec instructions set fields to all zeros or all ones. 1037 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1038 } 1039 1040 if (!isPPC64) { 1041 // These libcalls are not available in 32-bit. 1042 setLibcallName(RTLIB::SHL_I128, nullptr); 1043 setLibcallName(RTLIB::SRL_I128, nullptr); 1044 setLibcallName(RTLIB::SRA_I128, nullptr); 1045 } 1046 1047 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1048 1049 // We have target-specific dag combine patterns for the following nodes: 1050 setTargetDAGCombine(ISD::ADD); 1051 setTargetDAGCombine(ISD::SHL); 1052 setTargetDAGCombine(ISD::SRA); 1053 setTargetDAGCombine(ISD::SRL); 1054 setTargetDAGCombine(ISD::SINT_TO_FP); 1055 setTargetDAGCombine(ISD::BUILD_VECTOR); 1056 if (Subtarget.hasFPCVT()) 1057 setTargetDAGCombine(ISD::UINT_TO_FP); 1058 setTargetDAGCombine(ISD::LOAD); 1059 setTargetDAGCombine(ISD::STORE); 1060 setTargetDAGCombine(ISD::BR_CC); 1061 if (Subtarget.useCRBits()) 1062 setTargetDAGCombine(ISD::BRCOND); 1063 setTargetDAGCombine(ISD::BSWAP); 1064 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1065 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1066 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1067 1068 setTargetDAGCombine(ISD::SIGN_EXTEND); 1069 setTargetDAGCombine(ISD::ZERO_EXTEND); 1070 setTargetDAGCombine(ISD::ANY_EXTEND); 1071 1072 setTargetDAGCombine(ISD::TRUNCATE); 1073 1074 if (Subtarget.useCRBits()) { 1075 setTargetDAGCombine(ISD::TRUNCATE); 1076 setTargetDAGCombine(ISD::SETCC); 1077 setTargetDAGCombine(ISD::SELECT_CC); 1078 } 1079 1080 // Use reciprocal estimates. 1081 if (TM.Options.UnsafeFPMath) { 1082 setTargetDAGCombine(ISD::FDIV); 1083 setTargetDAGCombine(ISD::FSQRT); 1084 } 1085 1086 if (Subtarget.hasP9Altivec()) { 1087 setTargetDAGCombine(ISD::ABS); 1088 } 1089 1090 // Darwin long double math library functions have $LDBL128 appended. 1091 if (Subtarget.isDarwin()) { 1092 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1093 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1094 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1095 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1096 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1097 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1098 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1099 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1100 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1101 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1102 } 1103 1104 if (EnableQuadPrecision) { 1105 setLibcallName(RTLIB::LOG_F128, "logf128"); 1106 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1107 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1108 setLibcallName(RTLIB::EXP_F128, "expf128"); 1109 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1110 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1111 setLibcallName(RTLIB::COS_F128, "cosf128"); 1112 setLibcallName(RTLIB::POW_F128, "powf128"); 1113 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1114 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1115 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 1116 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1117 } 1118 1119 // With 32 condition bits, we don't need to sink (and duplicate) compares 1120 // aggressively in CodeGenPrep. 1121 if (Subtarget.useCRBits()) { 1122 setHasMultipleConditionRegisters(); 1123 setJumpIsExpensive(); 1124 } 1125 1126 setMinFunctionAlignment(2); 1127 if (Subtarget.isDarwin()) 1128 setPrefFunctionAlignment(4); 1129 1130 switch (Subtarget.getDarwinDirective()) { 1131 default: break; 1132 case PPC::DIR_970: 1133 case PPC::DIR_A2: 1134 case PPC::DIR_E500: 1135 case PPC::DIR_E500mc: 1136 case PPC::DIR_E5500: 1137 case PPC::DIR_PWR4: 1138 case PPC::DIR_PWR5: 1139 case PPC::DIR_PWR5X: 1140 case PPC::DIR_PWR6: 1141 case PPC::DIR_PWR6X: 1142 case PPC::DIR_PWR7: 1143 case PPC::DIR_PWR8: 1144 case PPC::DIR_PWR9: 1145 setPrefFunctionAlignment(4); 1146 setPrefLoopAlignment(4); 1147 break; 1148 } 1149 1150 if (Subtarget.enableMachineScheduler()) 1151 setSchedulingPreference(Sched::Source); 1152 else 1153 setSchedulingPreference(Sched::Hybrid); 1154 1155 computeRegisterProperties(STI.getRegisterInfo()); 1156 1157 // The Freescale cores do better with aggressive inlining of memcpy and 1158 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1159 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1160 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1161 MaxStoresPerMemset = 32; 1162 MaxStoresPerMemsetOptSize = 16; 1163 MaxStoresPerMemcpy = 32; 1164 MaxStoresPerMemcpyOptSize = 8; 1165 MaxStoresPerMemmove = 32; 1166 MaxStoresPerMemmoveOptSize = 8; 1167 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1168 // The A2 also benefits from (very) aggressive inlining of memcpy and 1169 // friends. The overhead of a the function call, even when warm, can be 1170 // over one hundred cycles. 1171 MaxStoresPerMemset = 128; 1172 MaxStoresPerMemcpy = 128; 1173 MaxStoresPerMemmove = 128; 1174 MaxLoadsPerMemcmp = 128; 1175 } else { 1176 MaxLoadsPerMemcmp = 8; 1177 MaxLoadsPerMemcmpOptSize = 4; 1178 } 1179 } 1180 1181 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1182 /// the desired ByVal argument alignment. 1183 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1184 unsigned MaxMaxAlign) { 1185 if (MaxAlign == MaxMaxAlign) 1186 return; 1187 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1188 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1189 MaxAlign = 32; 1190 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1191 MaxAlign = 16; 1192 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1193 unsigned EltAlign = 0; 1194 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1195 if (EltAlign > MaxAlign) 1196 MaxAlign = EltAlign; 1197 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1198 for (auto *EltTy : STy->elements()) { 1199 unsigned EltAlign = 0; 1200 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1201 if (EltAlign > MaxAlign) 1202 MaxAlign = EltAlign; 1203 if (MaxAlign == MaxMaxAlign) 1204 break; 1205 } 1206 } 1207 } 1208 1209 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1210 /// function arguments in the caller parameter area. 1211 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1212 const DataLayout &DL) const { 1213 // Darwin passes everything on 4 byte boundary. 1214 if (Subtarget.isDarwin()) 1215 return 4; 1216 1217 // 16byte and wider vectors are passed on 16byte boundary. 1218 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1219 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1220 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1221 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1222 return Align; 1223 } 1224 1225 unsigned PPCTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 1226 CallingConv:: ID CC, 1227 EVT VT) const { 1228 if (Subtarget.hasSPE() && VT == MVT::f64) 1229 return 2; 1230 return PPCTargetLowering::getNumRegisters(Context, VT); 1231 } 1232 1233 MVT PPCTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 1234 CallingConv:: ID CC, 1235 EVT VT) const { 1236 if (Subtarget.hasSPE() && VT == MVT::f64) 1237 return MVT::i32; 1238 return PPCTargetLowering::getRegisterType(Context, VT); 1239 } 1240 1241 bool PPCTargetLowering::useSoftFloat() const { 1242 return Subtarget.useSoftFloat(); 1243 } 1244 1245 bool PPCTargetLowering::hasSPE() const { 1246 return Subtarget.hasSPE(); 1247 } 1248 1249 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1250 switch ((PPCISD::NodeType)Opcode) { 1251 case PPCISD::FIRST_NUMBER: break; 1252 case PPCISD::FSEL: return "PPCISD::FSEL"; 1253 case PPCISD::FCFID: return "PPCISD::FCFID"; 1254 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1255 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1256 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1257 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1258 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1259 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1260 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1261 case PPCISD::FP_TO_UINT_IN_VSR: 1262 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1263 case PPCISD::FP_TO_SINT_IN_VSR: 1264 return "PPCISD::FP_TO_SINT_IN_VSR"; 1265 case PPCISD::FRE: return "PPCISD::FRE"; 1266 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1267 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1268 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1269 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1270 case PPCISD::VPERM: return "PPCISD::VPERM"; 1271 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1272 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1273 case PPCISD::XXREVERSE: return "PPCISD::XXREVERSE"; 1274 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1275 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1276 case PPCISD::CMPB: return "PPCISD::CMPB"; 1277 case PPCISD::Hi: return "PPCISD::Hi"; 1278 case PPCISD::Lo: return "PPCISD::Lo"; 1279 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1280 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1281 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1282 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1283 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1284 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1285 case PPCISD::SRL: return "PPCISD::SRL"; 1286 case PPCISD::SRA: return "PPCISD::SRA"; 1287 case PPCISD::SHL: return "PPCISD::SHL"; 1288 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1289 case PPCISD::CALL: return "PPCISD::CALL"; 1290 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1291 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1292 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1293 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1294 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1295 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1296 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1297 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1298 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1299 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1300 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1301 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1302 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1303 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1304 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1305 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1306 case PPCISD::VCMP: return "PPCISD::VCMP"; 1307 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1308 case PPCISD::LBRX: return "PPCISD::LBRX"; 1309 case PPCISD::STBRX: return "PPCISD::STBRX"; 1310 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1311 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1312 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1313 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1314 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1315 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1316 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1317 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1318 case PPCISD::ST_VSR_SCAL_INT: 1319 return "PPCISD::ST_VSR_SCAL_INT"; 1320 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1321 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1322 case PPCISD::BDZ: return "PPCISD::BDZ"; 1323 case PPCISD::MFFS: return "PPCISD::MFFS"; 1324 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1325 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1326 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1327 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1328 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1329 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1330 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1331 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1332 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1333 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1334 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1335 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1336 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1337 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1338 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1339 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1340 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1341 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1342 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1343 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1344 case PPCISD::SC: return "PPCISD::SC"; 1345 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1346 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1347 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1348 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1349 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1350 case PPCISD::VABSD: return "PPCISD::VABSD"; 1351 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1352 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1353 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1354 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1355 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1356 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1357 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1358 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; 1359 } 1360 return nullptr; 1361 } 1362 1363 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1364 EVT VT) const { 1365 if (!VT.isVector()) 1366 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1367 1368 if (Subtarget.hasQPX()) 1369 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1370 1371 return VT.changeVectorElementTypeToInteger(); 1372 } 1373 1374 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1375 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1376 return true; 1377 } 1378 1379 //===----------------------------------------------------------------------===// 1380 // Node matching predicates, for use by the tblgen matching code. 1381 //===----------------------------------------------------------------------===// 1382 1383 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1384 static bool isFloatingPointZero(SDValue Op) { 1385 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1386 return CFP->getValueAPF().isZero(); 1387 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1388 // Maybe this has already been legalized into the constant pool? 1389 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1390 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1391 return CFP->getValueAPF().isZero(); 1392 } 1393 return false; 1394 } 1395 1396 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1397 /// true if Op is undef or if it matches the specified value. 1398 static bool isConstantOrUndef(int Op, int Val) { 1399 return Op < 0 || Op == Val; 1400 } 1401 1402 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1403 /// VPKUHUM instruction. 1404 /// The ShuffleKind distinguishes between big-endian operations with 1405 /// two different inputs (0), either-endian operations with two identical 1406 /// inputs (1), and little-endian operations with two different inputs (2). 1407 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1408 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1409 SelectionDAG &DAG) { 1410 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1411 if (ShuffleKind == 0) { 1412 if (IsLE) 1413 return false; 1414 for (unsigned i = 0; i != 16; ++i) 1415 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1416 return false; 1417 } else if (ShuffleKind == 2) { 1418 if (!IsLE) 1419 return false; 1420 for (unsigned i = 0; i != 16; ++i) 1421 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1422 return false; 1423 } else if (ShuffleKind == 1) { 1424 unsigned j = IsLE ? 0 : 1; 1425 for (unsigned i = 0; i != 8; ++i) 1426 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1427 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1428 return false; 1429 } 1430 return true; 1431 } 1432 1433 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1434 /// VPKUWUM instruction. 1435 /// The ShuffleKind distinguishes between big-endian operations with 1436 /// two different inputs (0), either-endian operations with two identical 1437 /// inputs (1), and little-endian operations with two different inputs (2). 1438 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1439 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1440 SelectionDAG &DAG) { 1441 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1442 if (ShuffleKind == 0) { 1443 if (IsLE) 1444 return false; 1445 for (unsigned i = 0; i != 16; i += 2) 1446 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1447 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1448 return false; 1449 } else if (ShuffleKind == 2) { 1450 if (!IsLE) 1451 return false; 1452 for (unsigned i = 0; i != 16; i += 2) 1453 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1454 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1455 return false; 1456 } else if (ShuffleKind == 1) { 1457 unsigned j = IsLE ? 0 : 2; 1458 for (unsigned i = 0; i != 8; i += 2) 1459 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1460 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1461 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1462 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1463 return false; 1464 } 1465 return true; 1466 } 1467 1468 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1469 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1470 /// current subtarget. 1471 /// 1472 /// The ShuffleKind distinguishes between big-endian operations with 1473 /// two different inputs (0), either-endian operations with two identical 1474 /// inputs (1), and little-endian operations with two different inputs (2). 1475 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1476 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1477 SelectionDAG &DAG) { 1478 const PPCSubtarget& Subtarget = 1479 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1480 if (!Subtarget.hasP8Vector()) 1481 return false; 1482 1483 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1484 if (ShuffleKind == 0) { 1485 if (IsLE) 1486 return false; 1487 for (unsigned i = 0; i != 16; i += 4) 1488 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1489 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1490 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1491 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1492 return false; 1493 } else if (ShuffleKind == 2) { 1494 if (!IsLE) 1495 return false; 1496 for (unsigned i = 0; i != 16; i += 4) 1497 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1498 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1499 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1500 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1501 return false; 1502 } else if (ShuffleKind == 1) { 1503 unsigned j = IsLE ? 0 : 4; 1504 for (unsigned i = 0; i != 8; i += 4) 1505 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1506 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1507 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1508 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1509 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1510 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1511 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1512 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1513 return false; 1514 } 1515 return true; 1516 } 1517 1518 /// isVMerge - Common function, used to match vmrg* shuffles. 1519 /// 1520 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1521 unsigned LHSStart, unsigned RHSStart) { 1522 if (N->getValueType(0) != MVT::v16i8) 1523 return false; 1524 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1525 "Unsupported merge size!"); 1526 1527 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1528 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1529 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1530 LHSStart+j+i*UnitSize) || 1531 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1532 RHSStart+j+i*UnitSize)) 1533 return false; 1534 } 1535 return true; 1536 } 1537 1538 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1539 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1540 /// The ShuffleKind distinguishes between big-endian merges with two 1541 /// different inputs (0), either-endian merges with two identical inputs (1), 1542 /// and little-endian merges with two different inputs (2). For the latter, 1543 /// the input operands are swapped (see PPCInstrAltivec.td). 1544 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1545 unsigned ShuffleKind, SelectionDAG &DAG) { 1546 if (DAG.getDataLayout().isLittleEndian()) { 1547 if (ShuffleKind == 1) // unary 1548 return isVMerge(N, UnitSize, 0, 0); 1549 else if (ShuffleKind == 2) // swapped 1550 return isVMerge(N, UnitSize, 0, 16); 1551 else 1552 return false; 1553 } else { 1554 if (ShuffleKind == 1) // unary 1555 return isVMerge(N, UnitSize, 8, 8); 1556 else if (ShuffleKind == 0) // normal 1557 return isVMerge(N, UnitSize, 8, 24); 1558 else 1559 return false; 1560 } 1561 } 1562 1563 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1564 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1565 /// The ShuffleKind distinguishes between big-endian merges with two 1566 /// different inputs (0), either-endian merges with two identical inputs (1), 1567 /// and little-endian merges with two different inputs (2). For the latter, 1568 /// the input operands are swapped (see PPCInstrAltivec.td). 1569 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1570 unsigned ShuffleKind, SelectionDAG &DAG) { 1571 if (DAG.getDataLayout().isLittleEndian()) { 1572 if (ShuffleKind == 1) // unary 1573 return isVMerge(N, UnitSize, 8, 8); 1574 else if (ShuffleKind == 2) // swapped 1575 return isVMerge(N, UnitSize, 8, 24); 1576 else 1577 return false; 1578 } else { 1579 if (ShuffleKind == 1) // unary 1580 return isVMerge(N, UnitSize, 0, 0); 1581 else if (ShuffleKind == 0) // normal 1582 return isVMerge(N, UnitSize, 0, 16); 1583 else 1584 return false; 1585 } 1586 } 1587 1588 /** 1589 * Common function used to match vmrgew and vmrgow shuffles 1590 * 1591 * The indexOffset determines whether to look for even or odd words in 1592 * the shuffle mask. This is based on the of the endianness of the target 1593 * machine. 1594 * - Little Endian: 1595 * - Use offset of 0 to check for odd elements 1596 * - Use offset of 4 to check for even elements 1597 * - Big Endian: 1598 * - Use offset of 0 to check for even elements 1599 * - Use offset of 4 to check for odd elements 1600 * A detailed description of the vector element ordering for little endian and 1601 * big endian can be found at 1602 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1603 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1604 * compiler differences mean to you 1605 * 1606 * The mask to the shuffle vector instruction specifies the indices of the 1607 * elements from the two input vectors to place in the result. The elements are 1608 * numbered in array-access order, starting with the first vector. These vectors 1609 * are always of type v16i8, thus each vector will contain 16 elements of size 1610 * 8. More info on the shuffle vector can be found in the 1611 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1612 * Language Reference. 1613 * 1614 * The RHSStartValue indicates whether the same input vectors are used (unary) 1615 * or two different input vectors are used, based on the following: 1616 * - If the instruction uses the same vector for both inputs, the range of the 1617 * indices will be 0 to 15. In this case, the RHSStart value passed should 1618 * be 0. 1619 * - If the instruction has two different vectors then the range of the 1620 * indices will be 0 to 31. In this case, the RHSStart value passed should 1621 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1622 * to 31 specify elements in the second vector). 1623 * 1624 * \param[in] N The shuffle vector SD Node to analyze 1625 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1626 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1627 * vector to the shuffle_vector instruction 1628 * \return true iff this shuffle vector represents an even or odd word merge 1629 */ 1630 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1631 unsigned RHSStartValue) { 1632 if (N->getValueType(0) != MVT::v16i8) 1633 return false; 1634 1635 for (unsigned i = 0; i < 2; ++i) 1636 for (unsigned j = 0; j < 4; ++j) 1637 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1638 i*RHSStartValue+j+IndexOffset) || 1639 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1640 i*RHSStartValue+j+IndexOffset+8)) 1641 return false; 1642 return true; 1643 } 1644 1645 /** 1646 * Determine if the specified shuffle mask is suitable for the vmrgew or 1647 * vmrgow instructions. 1648 * 1649 * \param[in] N The shuffle vector SD Node to analyze 1650 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1651 * \param[in] ShuffleKind Identify the type of merge: 1652 * - 0 = big-endian merge with two different inputs; 1653 * - 1 = either-endian merge with two identical inputs; 1654 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1655 * little-endian merges). 1656 * \param[in] DAG The current SelectionDAG 1657 * \return true iff this shuffle mask 1658 */ 1659 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1660 unsigned ShuffleKind, SelectionDAG &DAG) { 1661 if (DAG.getDataLayout().isLittleEndian()) { 1662 unsigned indexOffset = CheckEven ? 4 : 0; 1663 if (ShuffleKind == 1) // Unary 1664 return isVMerge(N, indexOffset, 0); 1665 else if (ShuffleKind == 2) // swapped 1666 return isVMerge(N, indexOffset, 16); 1667 else 1668 return false; 1669 } 1670 else { 1671 unsigned indexOffset = CheckEven ? 0 : 4; 1672 if (ShuffleKind == 1) // Unary 1673 return isVMerge(N, indexOffset, 0); 1674 else if (ShuffleKind == 0) // Normal 1675 return isVMerge(N, indexOffset, 16); 1676 else 1677 return false; 1678 } 1679 return false; 1680 } 1681 1682 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1683 /// amount, otherwise return -1. 1684 /// The ShuffleKind distinguishes between big-endian operations with two 1685 /// different inputs (0), either-endian operations with two identical inputs 1686 /// (1), and little-endian operations with two different inputs (2). For the 1687 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1688 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1689 SelectionDAG &DAG) { 1690 if (N->getValueType(0) != MVT::v16i8) 1691 return -1; 1692 1693 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1694 1695 // Find the first non-undef value in the shuffle mask. 1696 unsigned i; 1697 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1698 /*search*/; 1699 1700 if (i == 16) return -1; // all undef. 1701 1702 // Otherwise, check to see if the rest of the elements are consecutively 1703 // numbered from this value. 1704 unsigned ShiftAmt = SVOp->getMaskElt(i); 1705 if (ShiftAmt < i) return -1; 1706 1707 ShiftAmt -= i; 1708 bool isLE = DAG.getDataLayout().isLittleEndian(); 1709 1710 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1711 // Check the rest of the elements to see if they are consecutive. 1712 for (++i; i != 16; ++i) 1713 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1714 return -1; 1715 } else if (ShuffleKind == 1) { 1716 // Check the rest of the elements to see if they are consecutive. 1717 for (++i; i != 16; ++i) 1718 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1719 return -1; 1720 } else 1721 return -1; 1722 1723 if (isLE) 1724 ShiftAmt = 16 - ShiftAmt; 1725 1726 return ShiftAmt; 1727 } 1728 1729 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1730 /// specifies a splat of a single element that is suitable for input to 1731 /// VSPLTB/VSPLTH/VSPLTW. 1732 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1733 assert(N->getValueType(0) == MVT::v16i8 && 1734 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1735 1736 // The consecutive indices need to specify an element, not part of two 1737 // different elements. So abandon ship early if this isn't the case. 1738 if (N->getMaskElt(0) % EltSize != 0) 1739 return false; 1740 1741 // This is a splat operation if each element of the permute is the same, and 1742 // if the value doesn't reference the second vector. 1743 unsigned ElementBase = N->getMaskElt(0); 1744 1745 // FIXME: Handle UNDEF elements too! 1746 if (ElementBase >= 16) 1747 return false; 1748 1749 // Check that the indices are consecutive, in the case of a multi-byte element 1750 // splatted with a v16i8 mask. 1751 for (unsigned i = 1; i != EltSize; ++i) 1752 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1753 return false; 1754 1755 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1756 if (N->getMaskElt(i) < 0) continue; 1757 for (unsigned j = 0; j != EltSize; ++j) 1758 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1759 return false; 1760 } 1761 return true; 1762 } 1763 1764 /// Check that the mask is shuffling N byte elements. Within each N byte 1765 /// element of the mask, the indices could be either in increasing or 1766 /// decreasing order as long as they are consecutive. 1767 /// \param[in] N the shuffle vector SD Node to analyze 1768 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1769 /// Word/DoubleWord/QuadWord). 1770 /// \param[in] StepLen the delta indices number among the N byte element, if 1771 /// the mask is in increasing/decreasing order then it is 1/-1. 1772 /// \return true iff the mask is shuffling N byte elements. 1773 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1774 int StepLen) { 1775 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1776 "Unexpected element width."); 1777 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1778 1779 unsigned NumOfElem = 16 / Width; 1780 unsigned MaskVal[16]; // Width is never greater than 16 1781 for (unsigned i = 0; i < NumOfElem; ++i) { 1782 MaskVal[0] = N->getMaskElt(i * Width); 1783 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1784 return false; 1785 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1786 return false; 1787 } 1788 1789 for (unsigned int j = 1; j < Width; ++j) { 1790 MaskVal[j] = N->getMaskElt(i * Width + j); 1791 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1792 return false; 1793 } 1794 } 1795 } 1796 1797 return true; 1798 } 1799 1800 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1801 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1802 if (!isNByteElemShuffleMask(N, 4, 1)) 1803 return false; 1804 1805 // Now we look at mask elements 0,4,8,12 1806 unsigned M0 = N->getMaskElt(0) / 4; 1807 unsigned M1 = N->getMaskElt(4) / 4; 1808 unsigned M2 = N->getMaskElt(8) / 4; 1809 unsigned M3 = N->getMaskElt(12) / 4; 1810 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1811 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1812 1813 // Below, let H and L be arbitrary elements of the shuffle mask 1814 // where H is in the range [4,7] and L is in the range [0,3]. 1815 // H, 1, 2, 3 or L, 5, 6, 7 1816 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1817 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1818 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1819 InsertAtByte = IsLE ? 12 : 0; 1820 Swap = M0 < 4; 1821 return true; 1822 } 1823 // 0, H, 2, 3 or 4, L, 6, 7 1824 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1825 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1826 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1827 InsertAtByte = IsLE ? 8 : 4; 1828 Swap = M1 < 4; 1829 return true; 1830 } 1831 // 0, 1, H, 3 or 4, 5, L, 7 1832 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1833 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1834 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1835 InsertAtByte = IsLE ? 4 : 8; 1836 Swap = M2 < 4; 1837 return true; 1838 } 1839 // 0, 1, 2, H or 4, 5, 6, L 1840 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1841 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1842 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1843 InsertAtByte = IsLE ? 0 : 12; 1844 Swap = M3 < 4; 1845 return true; 1846 } 1847 1848 // If both vector operands for the shuffle are the same vector, the mask will 1849 // contain only elements from the first one and the second one will be undef. 1850 if (N->getOperand(1).isUndef()) { 1851 ShiftElts = 0; 1852 Swap = true; 1853 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1854 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1855 InsertAtByte = IsLE ? 12 : 0; 1856 return true; 1857 } 1858 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1859 InsertAtByte = IsLE ? 8 : 4; 1860 return true; 1861 } 1862 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1863 InsertAtByte = IsLE ? 4 : 8; 1864 return true; 1865 } 1866 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1867 InsertAtByte = IsLE ? 0 : 12; 1868 return true; 1869 } 1870 } 1871 1872 return false; 1873 } 1874 1875 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1876 bool &Swap, bool IsLE) { 1877 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1878 // Ensure each byte index of the word is consecutive. 1879 if (!isNByteElemShuffleMask(N, 4, 1)) 1880 return false; 1881 1882 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1883 unsigned M0 = N->getMaskElt(0) / 4; 1884 unsigned M1 = N->getMaskElt(4) / 4; 1885 unsigned M2 = N->getMaskElt(8) / 4; 1886 unsigned M3 = N->getMaskElt(12) / 4; 1887 1888 // If both vector operands for the shuffle are the same vector, the mask will 1889 // contain only elements from the first one and the second one will be undef. 1890 if (N->getOperand(1).isUndef()) { 1891 assert(M0 < 4 && "Indexing into an undef vector?"); 1892 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1893 return false; 1894 1895 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1896 Swap = false; 1897 return true; 1898 } 1899 1900 // Ensure each word index of the ShuffleVector Mask is consecutive. 1901 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1902 return false; 1903 1904 if (IsLE) { 1905 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1906 // Input vectors don't need to be swapped if the leading element 1907 // of the result is one of the 3 left elements of the second vector 1908 // (or if there is no shift to be done at all). 1909 Swap = false; 1910 ShiftElts = (8 - M0) % 8; 1911 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 1912 // Input vectors need to be swapped if the leading element 1913 // of the result is one of the 3 left elements of the first vector 1914 // (or if we're shifting by 4 - thereby simply swapping the vectors). 1915 Swap = true; 1916 ShiftElts = (4 - M0) % 4; 1917 } 1918 1919 return true; 1920 } else { // BE 1921 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 1922 // Input vectors don't need to be swapped if the leading element 1923 // of the result is one of the 4 elements of the first vector. 1924 Swap = false; 1925 ShiftElts = M0; 1926 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 1927 // Input vectors need to be swapped if the leading element 1928 // of the result is one of the 4 elements of the right vector. 1929 Swap = true; 1930 ShiftElts = M0 - 4; 1931 } 1932 1933 return true; 1934 } 1935 } 1936 1937 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 1938 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1939 1940 if (!isNByteElemShuffleMask(N, Width, -1)) 1941 return false; 1942 1943 for (int i = 0; i < 16; i += Width) 1944 if (N->getMaskElt(i) != i + Width - 1) 1945 return false; 1946 1947 return true; 1948 } 1949 1950 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 1951 return isXXBRShuffleMaskHelper(N, 2); 1952 } 1953 1954 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 1955 return isXXBRShuffleMaskHelper(N, 4); 1956 } 1957 1958 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 1959 return isXXBRShuffleMaskHelper(N, 8); 1960 } 1961 1962 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 1963 return isXXBRShuffleMaskHelper(N, 16); 1964 } 1965 1966 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 1967 /// if the inputs to the instruction should be swapped and set \p DM to the 1968 /// value for the immediate. 1969 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 1970 /// AND element 0 of the result comes from the first input (LE) or second input 1971 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 1972 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 1973 /// mask. 1974 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 1975 bool &Swap, bool IsLE) { 1976 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1977 1978 // Ensure each byte index of the double word is consecutive. 1979 if (!isNByteElemShuffleMask(N, 8, 1)) 1980 return false; 1981 1982 unsigned M0 = N->getMaskElt(0) / 8; 1983 unsigned M1 = N->getMaskElt(8) / 8; 1984 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 1985 1986 // If both vector operands for the shuffle are the same vector, the mask will 1987 // contain only elements from the first one and the second one will be undef. 1988 if (N->getOperand(1).isUndef()) { 1989 if ((M0 | M1) < 2) { 1990 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 1991 Swap = false; 1992 return true; 1993 } else 1994 return false; 1995 } 1996 1997 if (IsLE) { 1998 if (M0 > 1 && M1 < 2) { 1999 Swap = false; 2000 } else if (M0 < 2 && M1 > 1) { 2001 M0 = (M0 + 2) % 4; 2002 M1 = (M1 + 2) % 4; 2003 Swap = true; 2004 } else 2005 return false; 2006 2007 // Note: if control flow comes here that means Swap is already set above 2008 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2009 return true; 2010 } else { // BE 2011 if (M0 < 2 && M1 > 1) { 2012 Swap = false; 2013 } else if (M0 > 1 && M1 < 2) { 2014 M0 = (M0 + 2) % 4; 2015 M1 = (M1 + 2) % 4; 2016 Swap = true; 2017 } else 2018 return false; 2019 2020 // Note: if control flow comes here that means Swap is already set above 2021 DM = (M0 << 1) + (M1 & 1); 2022 return true; 2023 } 2024 } 2025 2026 2027 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 2028 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 2029 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 2030 SelectionDAG &DAG) { 2031 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2032 assert(isSplatShuffleMask(SVOp, EltSize)); 2033 if (DAG.getDataLayout().isLittleEndian()) 2034 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2035 else 2036 return SVOp->getMaskElt(0) / EltSize; 2037 } 2038 2039 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2040 /// by using a vspltis[bhw] instruction of the specified element size, return 2041 /// the constant being splatted. The ByteSize field indicates the number of 2042 /// bytes of each element [124] -> [bhw]. 2043 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2044 SDValue OpVal(nullptr, 0); 2045 2046 // If ByteSize of the splat is bigger than the element size of the 2047 // build_vector, then we have a case where we are checking for a splat where 2048 // multiple elements of the buildvector are folded together into a single 2049 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2050 unsigned EltSize = 16/N->getNumOperands(); 2051 if (EltSize < ByteSize) { 2052 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2053 SDValue UniquedVals[4]; 2054 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2055 2056 // See if all of the elements in the buildvector agree across. 2057 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2058 if (N->getOperand(i).isUndef()) continue; 2059 // If the element isn't a constant, bail fully out. 2060 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2061 2062 if (!UniquedVals[i&(Multiple-1)].getNode()) 2063 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2064 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2065 return SDValue(); // no match. 2066 } 2067 2068 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2069 // either constant or undef values that are identical for each chunk. See 2070 // if these chunks can form into a larger vspltis*. 2071 2072 // Check to see if all of the leading entries are either 0 or -1. If 2073 // neither, then this won't fit into the immediate field. 2074 bool LeadingZero = true; 2075 bool LeadingOnes = true; 2076 for (unsigned i = 0; i != Multiple-1; ++i) { 2077 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2078 2079 LeadingZero &= isNullConstant(UniquedVals[i]); 2080 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2081 } 2082 // Finally, check the least significant entry. 2083 if (LeadingZero) { 2084 if (!UniquedVals[Multiple-1].getNode()) 2085 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2086 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2087 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2088 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2089 } 2090 if (LeadingOnes) { 2091 if (!UniquedVals[Multiple-1].getNode()) 2092 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2093 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2094 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2095 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2096 } 2097 2098 return SDValue(); 2099 } 2100 2101 // Check to see if this buildvec has a single non-undef value in its elements. 2102 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2103 if (N->getOperand(i).isUndef()) continue; 2104 if (!OpVal.getNode()) 2105 OpVal = N->getOperand(i); 2106 else if (OpVal != N->getOperand(i)) 2107 return SDValue(); 2108 } 2109 2110 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2111 2112 unsigned ValSizeInBytes = EltSize; 2113 uint64_t Value = 0; 2114 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2115 Value = CN->getZExtValue(); 2116 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2117 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2118 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2119 } 2120 2121 // If the splat value is larger than the element value, then we can never do 2122 // this splat. The only case that we could fit the replicated bits into our 2123 // immediate field for would be zero, and we prefer to use vxor for it. 2124 if (ValSizeInBytes < ByteSize) return SDValue(); 2125 2126 // If the element value is larger than the splat value, check if it consists 2127 // of a repeated bit pattern of size ByteSize. 2128 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2129 return SDValue(); 2130 2131 // Properly sign extend the value. 2132 int MaskVal = SignExtend32(Value, ByteSize * 8); 2133 2134 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2135 if (MaskVal == 0) return SDValue(); 2136 2137 // Finally, if this value fits in a 5 bit sext field, return it 2138 if (SignExtend32<5>(MaskVal) == MaskVal) 2139 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2140 return SDValue(); 2141 } 2142 2143 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2144 /// amount, otherwise return -1. 2145 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2146 EVT VT = N->getValueType(0); 2147 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2148 return -1; 2149 2150 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2151 2152 // Find the first non-undef value in the shuffle mask. 2153 unsigned i; 2154 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2155 /*search*/; 2156 2157 if (i == 4) return -1; // all undef. 2158 2159 // Otherwise, check to see if the rest of the elements are consecutively 2160 // numbered from this value. 2161 unsigned ShiftAmt = SVOp->getMaskElt(i); 2162 if (ShiftAmt < i) return -1; 2163 ShiftAmt -= i; 2164 2165 // Check the rest of the elements to see if they are consecutive. 2166 for (++i; i != 4; ++i) 2167 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2168 return -1; 2169 2170 return ShiftAmt; 2171 } 2172 2173 //===----------------------------------------------------------------------===// 2174 // Addressing Mode Selection 2175 //===----------------------------------------------------------------------===// 2176 2177 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2178 /// or 64-bit immediate, and if the value can be accurately represented as a 2179 /// sign extension from a 16-bit value. If so, this returns true and the 2180 /// immediate. 2181 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2182 if (!isa<ConstantSDNode>(N)) 2183 return false; 2184 2185 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2186 if (N->getValueType(0) == MVT::i32) 2187 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2188 else 2189 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2190 } 2191 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2192 return isIntS16Immediate(Op.getNode(), Imm); 2193 } 2194 2195 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2196 /// can be represented as an indexed [r+r] operation. Returns false if it 2197 /// can be more efficiently represented with [r+imm]. 2198 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2199 SDValue &Index, 2200 SelectionDAG &DAG) const { 2201 int16_t imm = 0; 2202 if (N.getOpcode() == ISD::ADD) { 2203 if (isIntS16Immediate(N.getOperand(1), imm)) 2204 return false; // r+i 2205 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2206 return false; // r+i 2207 2208 Base = N.getOperand(0); 2209 Index = N.getOperand(1); 2210 return true; 2211 } else if (N.getOpcode() == ISD::OR) { 2212 if (isIntS16Immediate(N.getOperand(1), imm)) 2213 return false; // r+i can fold it if we can. 2214 2215 // If this is an or of disjoint bitfields, we can codegen this as an add 2216 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2217 // disjoint. 2218 KnownBits LHSKnown, RHSKnown; 2219 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2220 2221 if (LHSKnown.Zero.getBoolValue()) { 2222 DAG.computeKnownBits(N.getOperand(1), RHSKnown); 2223 // If all of the bits are known zero on the LHS or RHS, the add won't 2224 // carry. 2225 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2226 Base = N.getOperand(0); 2227 Index = N.getOperand(1); 2228 return true; 2229 } 2230 } 2231 } 2232 2233 return false; 2234 } 2235 2236 // If we happen to be doing an i64 load or store into a stack slot that has 2237 // less than a 4-byte alignment, then the frame-index elimination may need to 2238 // use an indexed load or store instruction (because the offset may not be a 2239 // multiple of 4). The extra register needed to hold the offset comes from the 2240 // register scavenger, and it is possible that the scavenger will need to use 2241 // an emergency spill slot. As a result, we need to make sure that a spill slot 2242 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2243 // stack slot. 2244 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2245 // FIXME: This does not handle the LWA case. 2246 if (VT != MVT::i64) 2247 return; 2248 2249 // NOTE: We'll exclude negative FIs here, which come from argument 2250 // lowering, because there are no known test cases triggering this problem 2251 // using packed structures (or similar). We can remove this exclusion if 2252 // we find such a test case. The reason why this is so test-case driven is 2253 // because this entire 'fixup' is only to prevent crashes (from the 2254 // register scavenger) on not-really-valid inputs. For example, if we have: 2255 // %a = alloca i1 2256 // %b = bitcast i1* %a to i64* 2257 // store i64* a, i64 b 2258 // then the store should really be marked as 'align 1', but is not. If it 2259 // were marked as 'align 1' then the indexed form would have been 2260 // instruction-selected initially, and the problem this 'fixup' is preventing 2261 // won't happen regardless. 2262 if (FrameIdx < 0) 2263 return; 2264 2265 MachineFunction &MF = DAG.getMachineFunction(); 2266 MachineFrameInfo &MFI = MF.getFrameInfo(); 2267 2268 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2269 if (Align >= 4) 2270 return; 2271 2272 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2273 FuncInfo->setHasNonRISpills(); 2274 } 2275 2276 /// Returns true if the address N can be represented by a base register plus 2277 /// a signed 16-bit displacement [r+imm], and if it is not better 2278 /// represented as reg+reg. If \p Alignment is non-zero, only accept 2279 /// displacements that are multiples of that value. 2280 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2281 SDValue &Base, 2282 SelectionDAG &DAG, 2283 unsigned Alignment) const { 2284 // FIXME dl should come from parent load or store, not from address 2285 SDLoc dl(N); 2286 // If this can be more profitably realized as r+r, fail. 2287 if (SelectAddressRegReg(N, Disp, Base, DAG)) 2288 return false; 2289 2290 if (N.getOpcode() == ISD::ADD) { 2291 int16_t imm = 0; 2292 if (isIntS16Immediate(N.getOperand(1), imm) && 2293 (!Alignment || (imm % Alignment) == 0)) { 2294 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2295 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2296 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2297 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2298 } else { 2299 Base = N.getOperand(0); 2300 } 2301 return true; // [r+i] 2302 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2303 // Match LOAD (ADD (X, Lo(G))). 2304 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2305 && "Cannot handle constant offsets yet!"); 2306 Disp = N.getOperand(1).getOperand(0); // The global address. 2307 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2308 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2309 Disp.getOpcode() == ISD::TargetConstantPool || 2310 Disp.getOpcode() == ISD::TargetJumpTable); 2311 Base = N.getOperand(0); 2312 return true; // [&g+r] 2313 } 2314 } else if (N.getOpcode() == ISD::OR) { 2315 int16_t imm = 0; 2316 if (isIntS16Immediate(N.getOperand(1), imm) && 2317 (!Alignment || (imm % Alignment) == 0)) { 2318 // If this is an or of disjoint bitfields, we can codegen this as an add 2319 // (for better address arithmetic) if the LHS and RHS of the OR are 2320 // provably disjoint. 2321 KnownBits LHSKnown; 2322 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2323 2324 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2325 // If all of the bits are known zero on the LHS or RHS, the add won't 2326 // carry. 2327 if (FrameIndexSDNode *FI = 2328 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2329 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2330 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2331 } else { 2332 Base = N.getOperand(0); 2333 } 2334 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2335 return true; 2336 } 2337 } 2338 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2339 // Loading from a constant address. 2340 2341 // If this address fits entirely in a 16-bit sext immediate field, codegen 2342 // this as "d, 0" 2343 int16_t Imm; 2344 if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) { 2345 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2346 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2347 CN->getValueType(0)); 2348 return true; 2349 } 2350 2351 // Handle 32-bit sext immediates with LIS + addr mode. 2352 if ((CN->getValueType(0) == MVT::i32 || 2353 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2354 (!Alignment || (CN->getZExtValue() % Alignment) == 0)) { 2355 int Addr = (int)CN->getZExtValue(); 2356 2357 // Otherwise, break this down into an LIS + disp. 2358 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2359 2360 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2361 MVT::i32); 2362 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2363 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2364 return true; 2365 } 2366 } 2367 2368 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2369 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2370 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2371 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2372 } else 2373 Base = N; 2374 return true; // [r+0] 2375 } 2376 2377 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2378 /// represented as an indexed [r+r] operation. 2379 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2380 SDValue &Index, 2381 SelectionDAG &DAG) const { 2382 // Check to see if we can easily represent this as an [r+r] address. This 2383 // will fail if it thinks that the address is more profitably represented as 2384 // reg+imm, e.g. where imm = 0. 2385 if (SelectAddressRegReg(N, Base, Index, DAG)) 2386 return true; 2387 2388 // If the address is the result of an add, we will utilize the fact that the 2389 // address calculation includes an implicit add. However, we can reduce 2390 // register pressure if we do not materialize a constant just for use as the 2391 // index register. We only get rid of the add if it is not an add of a 2392 // value and a 16-bit signed constant and both have a single use. 2393 int16_t imm = 0; 2394 if (N.getOpcode() == ISD::ADD && 2395 (!isIntS16Immediate(N.getOperand(1), imm) || 2396 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2397 Base = N.getOperand(0); 2398 Index = N.getOperand(1); 2399 return true; 2400 } 2401 2402 // Otherwise, do it the hard way, using R0 as the base register. 2403 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2404 N.getValueType()); 2405 Index = N; 2406 return true; 2407 } 2408 2409 /// Returns true if we should use a direct load into vector instruction 2410 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. 2411 static bool usePartialVectorLoads(SDNode *N) { 2412 if (!N->hasOneUse()) 2413 return false; 2414 2415 // If there are any other uses other than scalar to vector, then we should 2416 // keep it as a scalar load -> direct move pattern to prevent multiple 2417 // loads. Currently, only check for i64 since we have lxsd/lfd to do this 2418 // efficiently, but no update equivalent. 2419 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2420 EVT MemVT = LD->getMemoryVT(); 2421 if (MemVT.isSimple() && MemVT.getSimpleVT().SimpleTy == MVT::i64) { 2422 SDNode *User = *(LD->use_begin()); 2423 if (User->getOpcode() == ISD::SCALAR_TO_VECTOR) 2424 return true; 2425 } 2426 } 2427 2428 return false; 2429 } 2430 2431 /// getPreIndexedAddressParts - returns true by value, base pointer and 2432 /// offset pointer and addressing mode by reference if the node's address 2433 /// can be legally represented as pre-indexed load / store address. 2434 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2435 SDValue &Offset, 2436 ISD::MemIndexedMode &AM, 2437 SelectionDAG &DAG) const { 2438 if (DisablePPCPreinc) return false; 2439 2440 bool isLoad = true; 2441 SDValue Ptr; 2442 EVT VT; 2443 unsigned Alignment; 2444 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2445 Ptr = LD->getBasePtr(); 2446 VT = LD->getMemoryVT(); 2447 Alignment = LD->getAlignment(); 2448 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2449 Ptr = ST->getBasePtr(); 2450 VT = ST->getMemoryVT(); 2451 Alignment = ST->getAlignment(); 2452 isLoad = false; 2453 } else 2454 return false; 2455 2456 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector 2457 // instructions because we can fold these into a more efficient instruction 2458 // instead, (such as LXSD). 2459 if (isLoad && usePartialVectorLoads(N)) { 2460 return false; 2461 } 2462 2463 // PowerPC doesn't have preinc load/store instructions for vectors (except 2464 // for QPX, which does have preinc r+r forms). 2465 if (VT.isVector()) { 2466 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2467 return false; 2468 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2469 AM = ISD::PRE_INC; 2470 return true; 2471 } 2472 } 2473 2474 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2475 // Common code will reject creating a pre-inc form if the base pointer 2476 // is a frame index, or if N is a store and the base pointer is either 2477 // the same as or a predecessor of the value being stored. Check for 2478 // those situations here, and try with swapped Base/Offset instead. 2479 bool Swap = false; 2480 2481 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2482 Swap = true; 2483 else if (!isLoad) { 2484 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2485 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2486 Swap = true; 2487 } 2488 2489 if (Swap) 2490 std::swap(Base, Offset); 2491 2492 AM = ISD::PRE_INC; 2493 return true; 2494 } 2495 2496 // LDU/STU can only handle immediates that are a multiple of 4. 2497 if (VT != MVT::i64) { 2498 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2499 return false; 2500 } else { 2501 // LDU/STU need an address with at least 4-byte alignment. 2502 if (Alignment < 4) 2503 return false; 2504 2505 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2506 return false; 2507 } 2508 2509 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2510 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2511 // sext i32 to i64 when addr mode is r+i. 2512 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2513 LD->getExtensionType() == ISD::SEXTLOAD && 2514 isa<ConstantSDNode>(Offset)) 2515 return false; 2516 } 2517 2518 AM = ISD::PRE_INC; 2519 return true; 2520 } 2521 2522 //===----------------------------------------------------------------------===// 2523 // LowerOperation implementation 2524 //===----------------------------------------------------------------------===// 2525 2526 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2527 /// and LoOpFlags to the target MO flags. 2528 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2529 unsigned &HiOpFlags, unsigned &LoOpFlags, 2530 const GlobalValue *GV = nullptr) { 2531 HiOpFlags = PPCII::MO_HA; 2532 LoOpFlags = PPCII::MO_LO; 2533 2534 // Don't use the pic base if not in PIC relocation model. 2535 if (IsPIC) { 2536 HiOpFlags |= PPCII::MO_PIC_FLAG; 2537 LoOpFlags |= PPCII::MO_PIC_FLAG; 2538 } 2539 2540 // If this is a reference to a global value that requires a non-lazy-ptr, make 2541 // sure that instruction lowering adds it. 2542 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2543 HiOpFlags |= PPCII::MO_NLP_FLAG; 2544 LoOpFlags |= PPCII::MO_NLP_FLAG; 2545 2546 if (GV->hasHiddenVisibility()) { 2547 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2548 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2549 } 2550 } 2551 } 2552 2553 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2554 SelectionDAG &DAG) { 2555 SDLoc DL(HiPart); 2556 EVT PtrVT = HiPart.getValueType(); 2557 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2558 2559 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2560 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2561 2562 // With PIC, the first instruction is actually "GR+hi(&G)". 2563 if (isPIC) 2564 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2565 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2566 2567 // Generate non-pic code that has direct accesses to the constant pool. 2568 // The address of the global is just (hi(&g)+lo(&g)). 2569 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2570 } 2571 2572 static void setUsesTOCBasePtr(MachineFunction &MF) { 2573 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2574 FuncInfo->setUsesTOCBasePtr(); 2575 } 2576 2577 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2578 setUsesTOCBasePtr(DAG.getMachineFunction()); 2579 } 2580 2581 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2582 SDValue GA) { 2583 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2584 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2585 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2586 2587 SDValue Ops[] = { GA, Reg }; 2588 return DAG.getMemIntrinsicNode( 2589 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2590 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, 2591 MachineMemOperand::MOLoad); 2592 } 2593 2594 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2595 SelectionDAG &DAG) const { 2596 EVT PtrVT = Op.getValueType(); 2597 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2598 const Constant *C = CP->getConstVal(); 2599 2600 // 64-bit SVR4 ABI code is always position-independent. 2601 // The actual address of the GlobalValue is stored in the TOC. 2602 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2603 setUsesTOCBasePtr(DAG); 2604 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2605 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2606 } 2607 2608 unsigned MOHiFlag, MOLoFlag; 2609 bool IsPIC = isPositionIndependent(); 2610 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2611 2612 if (IsPIC && Subtarget.isSVR4ABI()) { 2613 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2614 PPCII::MO_PIC_FLAG); 2615 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2616 } 2617 2618 SDValue CPIHi = 2619 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2620 SDValue CPILo = 2621 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2622 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2623 } 2624 2625 // For 64-bit PowerPC, prefer the more compact relative encodings. 2626 // This trades 32 bits per jump table entry for one or two instructions 2627 // on the jump site. 2628 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2629 if (isJumpTableRelative()) 2630 return MachineJumpTableInfo::EK_LabelDifference32; 2631 2632 return TargetLowering::getJumpTableEncoding(); 2633 } 2634 2635 bool PPCTargetLowering::isJumpTableRelative() const { 2636 if (Subtarget.isPPC64()) 2637 return true; 2638 return TargetLowering::isJumpTableRelative(); 2639 } 2640 2641 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2642 SelectionDAG &DAG) const { 2643 if (!Subtarget.isPPC64()) 2644 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2645 2646 switch (getTargetMachine().getCodeModel()) { 2647 case CodeModel::Small: 2648 case CodeModel::Medium: 2649 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2650 default: 2651 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2652 getPointerTy(DAG.getDataLayout())); 2653 } 2654 } 2655 2656 const MCExpr * 2657 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2658 unsigned JTI, 2659 MCContext &Ctx) const { 2660 if (!Subtarget.isPPC64()) 2661 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2662 2663 switch (getTargetMachine().getCodeModel()) { 2664 case CodeModel::Small: 2665 case CodeModel::Medium: 2666 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2667 default: 2668 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2669 } 2670 } 2671 2672 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2673 EVT PtrVT = Op.getValueType(); 2674 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2675 2676 // 64-bit SVR4 ABI code is always position-independent. 2677 // The actual address of the GlobalValue is stored in the TOC. 2678 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2679 setUsesTOCBasePtr(DAG); 2680 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2681 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2682 } 2683 2684 unsigned MOHiFlag, MOLoFlag; 2685 bool IsPIC = isPositionIndependent(); 2686 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2687 2688 if (IsPIC && Subtarget.isSVR4ABI()) { 2689 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2690 PPCII::MO_PIC_FLAG); 2691 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2692 } 2693 2694 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2695 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2696 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2697 } 2698 2699 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2700 SelectionDAG &DAG) const { 2701 EVT PtrVT = Op.getValueType(); 2702 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2703 const BlockAddress *BA = BASDN->getBlockAddress(); 2704 2705 // 64-bit SVR4 ABI code is always position-independent. 2706 // The actual BlockAddress is stored in the TOC. 2707 if (Subtarget.isSVR4ABI() && 2708 (Subtarget.isPPC64() || isPositionIndependent())) { 2709 if (Subtarget.isPPC64()) 2710 setUsesTOCBasePtr(DAG); 2711 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2712 return getTOCEntry(DAG, SDLoc(BASDN), Subtarget.isPPC64(), GA); 2713 } 2714 2715 unsigned MOHiFlag, MOLoFlag; 2716 bool IsPIC = isPositionIndependent(); 2717 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2718 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2719 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2720 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2721 } 2722 2723 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2724 SelectionDAG &DAG) const { 2725 // FIXME: TLS addresses currently use medium model code sequences, 2726 // which is the most useful form. Eventually support for small and 2727 // large models could be added if users need it, at the cost of 2728 // additional complexity. 2729 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2730 if (DAG.getTarget().useEmulatedTLS()) 2731 return LowerToTLSEmulatedModel(GA, DAG); 2732 2733 SDLoc dl(GA); 2734 const GlobalValue *GV = GA->getGlobal(); 2735 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2736 bool is64bit = Subtarget.isPPC64(); 2737 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 2738 PICLevel::Level picLevel = M->getPICLevel(); 2739 2740 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2741 2742 if (Model == TLSModel::LocalExec) { 2743 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2744 PPCII::MO_TPREL_HA); 2745 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2746 PPCII::MO_TPREL_LO); 2747 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2748 : DAG.getRegister(PPC::R2, MVT::i32); 2749 2750 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2751 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2752 } 2753 2754 if (Model == TLSModel::InitialExec) { 2755 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2756 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2757 PPCII::MO_TLS); 2758 SDValue GOTPtr; 2759 if (is64bit) { 2760 setUsesTOCBasePtr(DAG); 2761 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2762 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2763 PtrVT, GOTReg, TGA); 2764 } else 2765 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2766 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2767 PtrVT, TGA, GOTPtr); 2768 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2769 } 2770 2771 if (Model == TLSModel::GeneralDynamic) { 2772 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2773 SDValue GOTPtr; 2774 if (is64bit) { 2775 setUsesTOCBasePtr(DAG); 2776 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2777 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2778 GOTReg, TGA); 2779 } else { 2780 if (picLevel == PICLevel::SmallPIC) 2781 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2782 else 2783 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2784 } 2785 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2786 GOTPtr, TGA, TGA); 2787 } 2788 2789 if (Model == TLSModel::LocalDynamic) { 2790 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2791 SDValue GOTPtr; 2792 if (is64bit) { 2793 setUsesTOCBasePtr(DAG); 2794 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2795 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2796 GOTReg, TGA); 2797 } else { 2798 if (picLevel == PICLevel::SmallPIC) 2799 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2800 else 2801 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2802 } 2803 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2804 PtrVT, GOTPtr, TGA, TGA); 2805 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2806 PtrVT, TLSAddr, TGA); 2807 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2808 } 2809 2810 llvm_unreachable("Unknown TLS model!"); 2811 } 2812 2813 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2814 SelectionDAG &DAG) const { 2815 EVT PtrVT = Op.getValueType(); 2816 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2817 SDLoc DL(GSDN); 2818 const GlobalValue *GV = GSDN->getGlobal(); 2819 2820 // 64-bit SVR4 ABI code is always position-independent. 2821 // The actual address of the GlobalValue is stored in the TOC. 2822 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2823 setUsesTOCBasePtr(DAG); 2824 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2825 return getTOCEntry(DAG, DL, true, GA); 2826 } 2827 2828 unsigned MOHiFlag, MOLoFlag; 2829 bool IsPIC = isPositionIndependent(); 2830 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2831 2832 if (IsPIC && Subtarget.isSVR4ABI()) { 2833 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2834 GSDN->getOffset(), 2835 PPCII::MO_PIC_FLAG); 2836 return getTOCEntry(DAG, DL, false, GA); 2837 } 2838 2839 SDValue GAHi = 2840 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2841 SDValue GALo = 2842 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2843 2844 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2845 2846 // If the global reference is actually to a non-lazy-pointer, we have to do an 2847 // extra load to get the address of the global. 2848 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2849 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2850 return Ptr; 2851 } 2852 2853 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2854 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2855 SDLoc dl(Op); 2856 2857 if (Op.getValueType() == MVT::v2i64) { 2858 // When the operands themselves are v2i64 values, we need to do something 2859 // special because VSX has no underlying comparison operations for these. 2860 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2861 // Equality can be handled by casting to the legal type for Altivec 2862 // comparisons, everything else needs to be expanded. 2863 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2864 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2865 DAG.getSetCC(dl, MVT::v4i32, 2866 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2867 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2868 CC)); 2869 } 2870 2871 return SDValue(); 2872 } 2873 2874 // We handle most of these in the usual way. 2875 return Op; 2876 } 2877 2878 // If we're comparing for equality to zero, expose the fact that this is 2879 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2880 // fold the new nodes. 2881 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 2882 return V; 2883 2884 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2885 // Leave comparisons against 0 and -1 alone for now, since they're usually 2886 // optimized. FIXME: revisit this when we can custom lower all setcc 2887 // optimizations. 2888 if (C->isAllOnesValue() || C->isNullValue()) 2889 return SDValue(); 2890 } 2891 2892 // If we have an integer seteq/setne, turn it into a compare against zero 2893 // by xor'ing the rhs with the lhs, which is faster than setting a 2894 // condition register, reading it back out, and masking the correct bit. The 2895 // normal approach here uses sub to do this instead of xor. Using xor exposes 2896 // the result to other bit-twiddling opportunities. 2897 EVT LHSVT = Op.getOperand(0).getValueType(); 2898 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2899 EVT VT = Op.getValueType(); 2900 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2901 Op.getOperand(1)); 2902 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2903 } 2904 return SDValue(); 2905 } 2906 2907 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2908 SDNode *Node = Op.getNode(); 2909 EVT VT = Node->getValueType(0); 2910 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2911 SDValue InChain = Node->getOperand(0); 2912 SDValue VAListPtr = Node->getOperand(1); 2913 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2914 SDLoc dl(Node); 2915 2916 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2917 2918 // gpr_index 2919 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2920 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2921 InChain = GprIndex.getValue(1); 2922 2923 if (VT == MVT::i64) { 2924 // Check if GprIndex is even 2925 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2926 DAG.getConstant(1, dl, MVT::i32)); 2927 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2928 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2929 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2930 DAG.getConstant(1, dl, MVT::i32)); 2931 // Align GprIndex to be even if it isn't 2932 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2933 GprIndex); 2934 } 2935 2936 // fpr index is 1 byte after gpr 2937 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2938 DAG.getConstant(1, dl, MVT::i32)); 2939 2940 // fpr 2941 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2942 FprPtr, MachinePointerInfo(SV), MVT::i8); 2943 InChain = FprIndex.getValue(1); 2944 2945 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2946 DAG.getConstant(8, dl, MVT::i32)); 2947 2948 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2949 DAG.getConstant(4, dl, MVT::i32)); 2950 2951 // areas 2952 SDValue OverflowArea = 2953 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2954 InChain = OverflowArea.getValue(1); 2955 2956 SDValue RegSaveArea = 2957 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2958 InChain = RegSaveArea.getValue(1); 2959 2960 // select overflow_area if index > 8 2961 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2962 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2963 2964 // adjustment constant gpr_index * 4/8 2965 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2966 VT.isInteger() ? GprIndex : FprIndex, 2967 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2968 MVT::i32)); 2969 2970 // OurReg = RegSaveArea + RegConstant 2971 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2972 RegConstant); 2973 2974 // Floating types are 32 bytes into RegSaveArea 2975 if (VT.isFloatingPoint()) 2976 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2977 DAG.getConstant(32, dl, MVT::i32)); 2978 2979 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2980 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2981 VT.isInteger() ? GprIndex : FprIndex, 2982 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2983 MVT::i32)); 2984 2985 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2986 VT.isInteger() ? VAListPtr : FprPtr, 2987 MachinePointerInfo(SV), MVT::i8); 2988 2989 // determine if we should load from reg_save_area or overflow_area 2990 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2991 2992 // increase overflow_area by 4/8 if gpr/fpr > 8 2993 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2994 DAG.getConstant(VT.isInteger() ? 4 : 8, 2995 dl, MVT::i32)); 2996 2997 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2998 OverflowAreaPlusN); 2999 3000 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 3001 MachinePointerInfo(), MVT::i32); 3002 3003 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 3004 } 3005 3006 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 3007 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 3008 3009 // We have to copy the entire va_list struct: 3010 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 3011 return DAG.getMemcpy(Op.getOperand(0), Op, 3012 Op.getOperand(1), Op.getOperand(2), 3013 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 3014 false, MachinePointerInfo(), MachinePointerInfo()); 3015 } 3016 3017 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 3018 SelectionDAG &DAG) const { 3019 return Op.getOperand(0); 3020 } 3021 3022 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 3023 SelectionDAG &DAG) const { 3024 SDValue Chain = Op.getOperand(0); 3025 SDValue Trmp = Op.getOperand(1); // trampoline 3026 SDValue FPtr = Op.getOperand(2); // nested function 3027 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 3028 SDLoc dl(Op); 3029 3030 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3031 bool isPPC64 = (PtrVT == MVT::i64); 3032 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3033 3034 TargetLowering::ArgListTy Args; 3035 TargetLowering::ArgListEntry Entry; 3036 3037 Entry.Ty = IntPtrTy; 3038 Entry.Node = Trmp; Args.push_back(Entry); 3039 3040 // TrampSize == (isPPC64 ? 48 : 40); 3041 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3042 isPPC64 ? MVT::i64 : MVT::i32); 3043 Args.push_back(Entry); 3044 3045 Entry.Node = FPtr; Args.push_back(Entry); 3046 Entry.Node = Nest; Args.push_back(Entry); 3047 3048 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3049 TargetLowering::CallLoweringInfo CLI(DAG); 3050 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3051 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3052 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3053 3054 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3055 return CallResult.second; 3056 } 3057 3058 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3059 MachineFunction &MF = DAG.getMachineFunction(); 3060 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3061 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3062 3063 SDLoc dl(Op); 3064 3065 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 3066 // vastart just stores the address of the VarArgsFrameIndex slot into the 3067 // memory location argument. 3068 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3069 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3070 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3071 MachinePointerInfo(SV)); 3072 } 3073 3074 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3075 // We suppose the given va_list is already allocated. 3076 // 3077 // typedef struct { 3078 // char gpr; /* index into the array of 8 GPRs 3079 // * stored in the register save area 3080 // * gpr=0 corresponds to r3, 3081 // * gpr=1 to r4, etc. 3082 // */ 3083 // char fpr; /* index into the array of 8 FPRs 3084 // * stored in the register save area 3085 // * fpr=0 corresponds to f1, 3086 // * fpr=1 to f2, etc. 3087 // */ 3088 // char *overflow_arg_area; 3089 // /* location on stack that holds 3090 // * the next overflow argument 3091 // */ 3092 // char *reg_save_area; 3093 // /* where r3:r10 and f1:f8 (if saved) 3094 // * are stored 3095 // */ 3096 // } va_list[1]; 3097 3098 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3099 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3100 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3101 PtrVT); 3102 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3103 PtrVT); 3104 3105 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3106 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3107 3108 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3109 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3110 3111 uint64_t FPROffset = 1; 3112 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3113 3114 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3115 3116 // Store first byte : number of int regs 3117 SDValue firstStore = 3118 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3119 MachinePointerInfo(SV), MVT::i8); 3120 uint64_t nextOffset = FPROffset; 3121 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3122 ConstFPROffset); 3123 3124 // Store second byte : number of float regs 3125 SDValue secondStore = 3126 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3127 MachinePointerInfo(SV, nextOffset), MVT::i8); 3128 nextOffset += StackOffset; 3129 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3130 3131 // Store second word : arguments given on stack 3132 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3133 MachinePointerInfo(SV, nextOffset)); 3134 nextOffset += FrameOffset; 3135 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3136 3137 // Store third word : arguments given in registers 3138 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3139 MachinePointerInfo(SV, nextOffset)); 3140 } 3141 3142 #include "PPCGenCallingConv.inc" 3143 3144 // Function whose sole purpose is to kill compiler warnings 3145 // stemming from unused functions included from PPCGenCallingConv.inc. 3146 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 3147 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 3148 } 3149 3150 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 3151 CCValAssign::LocInfo &LocInfo, 3152 ISD::ArgFlagsTy &ArgFlags, 3153 CCState &State) { 3154 return true; 3155 } 3156 3157 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 3158 MVT &LocVT, 3159 CCValAssign::LocInfo &LocInfo, 3160 ISD::ArgFlagsTy &ArgFlags, 3161 CCState &State) { 3162 static const MCPhysReg ArgRegs[] = { 3163 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3164 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3165 }; 3166 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3167 3168 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3169 3170 // Skip one register if the first unallocated register has an even register 3171 // number and there are still argument registers available which have not been 3172 // allocated yet. RegNum is actually an index into ArgRegs, which means we 3173 // need to skip a register if RegNum is odd. 3174 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 3175 State.AllocateReg(ArgRegs[RegNum]); 3176 } 3177 3178 // Always return false here, as this function only makes sure that the first 3179 // unallocated register has an odd register number and does not actually 3180 // allocate a register for the current argument. 3181 return false; 3182 } 3183 3184 bool 3185 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 3186 MVT &LocVT, 3187 CCValAssign::LocInfo &LocInfo, 3188 ISD::ArgFlagsTy &ArgFlags, 3189 CCState &State) { 3190 static const MCPhysReg ArgRegs[] = { 3191 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3192 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3193 }; 3194 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3195 3196 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3197 int RegsLeft = NumArgRegs - RegNum; 3198 3199 // Skip if there is not enough registers left for long double type (4 gpr regs 3200 // in soft float mode) and put long double argument on the stack. 3201 if (RegNum != NumArgRegs && RegsLeft < 4) { 3202 for (int i = 0; i < RegsLeft; i++) { 3203 State.AllocateReg(ArgRegs[RegNum + i]); 3204 } 3205 } 3206 3207 return false; 3208 } 3209 3210 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 3211 MVT &LocVT, 3212 CCValAssign::LocInfo &LocInfo, 3213 ISD::ArgFlagsTy &ArgFlags, 3214 CCState &State) { 3215 static const MCPhysReg ArgRegs[] = { 3216 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3217 PPC::F8 3218 }; 3219 3220 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3221 3222 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3223 3224 // If there is only one Floating-point register left we need to put both f64 3225 // values of a split ppc_fp128 value on the stack. 3226 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 3227 State.AllocateReg(ArgRegs[RegNum]); 3228 } 3229 3230 // Always return false here, as this function only makes sure that the two f64 3231 // values a ppc_fp128 value is split into are both passed in registers or both 3232 // passed on the stack and does not actually allocate a register for the 3233 // current argument. 3234 return false; 3235 } 3236 3237 /// FPR - The set of FP registers that should be allocated for arguments, 3238 /// on Darwin. 3239 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3240 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3241 PPC::F11, PPC::F12, PPC::F13}; 3242 3243 /// QFPR - The set of QPX registers that should be allocated for arguments. 3244 static const MCPhysReg QFPR[] = { 3245 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3246 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3247 3248 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3249 /// the stack. 3250 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3251 unsigned PtrByteSize) { 3252 unsigned ArgSize = ArgVT.getStoreSize(); 3253 if (Flags.isByVal()) 3254 ArgSize = Flags.getByValSize(); 3255 3256 // Round up to multiples of the pointer size, except for array members, 3257 // which are always packed. 3258 if (!Flags.isInConsecutiveRegs()) 3259 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3260 3261 return ArgSize; 3262 } 3263 3264 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3265 /// on the stack. 3266 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3267 ISD::ArgFlagsTy Flags, 3268 unsigned PtrByteSize) { 3269 unsigned Align = PtrByteSize; 3270 3271 // Altivec parameters are padded to a 16 byte boundary. 3272 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3273 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3274 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3275 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3276 Align = 16; 3277 // QPX vector types stored in double-precision are padded to a 32 byte 3278 // boundary. 3279 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3280 Align = 32; 3281 3282 // ByVal parameters are aligned as requested. 3283 if (Flags.isByVal()) { 3284 unsigned BVAlign = Flags.getByValAlign(); 3285 if (BVAlign > PtrByteSize) { 3286 if (BVAlign % PtrByteSize != 0) 3287 llvm_unreachable( 3288 "ByVal alignment is not a multiple of the pointer size"); 3289 3290 Align = BVAlign; 3291 } 3292 } 3293 3294 // Array members are always packed to their original alignment. 3295 if (Flags.isInConsecutiveRegs()) { 3296 // If the array member was split into multiple registers, the first 3297 // needs to be aligned to the size of the full type. (Except for 3298 // ppcf128, which is only aligned as its f64 components.) 3299 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3300 Align = OrigVT.getStoreSize(); 3301 else 3302 Align = ArgVT.getStoreSize(); 3303 } 3304 3305 return Align; 3306 } 3307 3308 /// CalculateStackSlotUsed - Return whether this argument will use its 3309 /// stack slot (instead of being passed in registers). ArgOffset, 3310 /// AvailableFPRs, and AvailableVRs must hold the current argument 3311 /// position, and will be updated to account for this argument. 3312 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3313 ISD::ArgFlagsTy Flags, 3314 unsigned PtrByteSize, 3315 unsigned LinkageSize, 3316 unsigned ParamAreaSize, 3317 unsigned &ArgOffset, 3318 unsigned &AvailableFPRs, 3319 unsigned &AvailableVRs, bool HasQPX) { 3320 bool UseMemory = false; 3321 3322 // Respect alignment of argument on the stack. 3323 unsigned Align = 3324 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3325 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3326 // If there's no space left in the argument save area, we must 3327 // use memory (this check also catches zero-sized arguments). 3328 if (ArgOffset >= LinkageSize + ParamAreaSize) 3329 UseMemory = true; 3330 3331 // Allocate argument on the stack. 3332 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3333 if (Flags.isInConsecutiveRegsLast()) 3334 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3335 // If we overran the argument save area, we must use memory 3336 // (this check catches arguments passed partially in memory) 3337 if (ArgOffset > LinkageSize + ParamAreaSize) 3338 UseMemory = true; 3339 3340 // However, if the argument is actually passed in an FPR or a VR, 3341 // we don't use memory after all. 3342 if (!Flags.isByVal()) { 3343 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3344 // QPX registers overlap with the scalar FP registers. 3345 (HasQPX && (ArgVT == MVT::v4f32 || 3346 ArgVT == MVT::v4f64 || 3347 ArgVT == MVT::v4i1))) 3348 if (AvailableFPRs > 0) { 3349 --AvailableFPRs; 3350 return false; 3351 } 3352 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3353 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3354 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3355 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3356 if (AvailableVRs > 0) { 3357 --AvailableVRs; 3358 return false; 3359 } 3360 } 3361 3362 return UseMemory; 3363 } 3364 3365 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3366 /// ensure minimum alignment required for target. 3367 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3368 unsigned NumBytes) { 3369 unsigned TargetAlign = Lowering->getStackAlignment(); 3370 unsigned AlignMask = TargetAlign - 1; 3371 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3372 return NumBytes; 3373 } 3374 3375 SDValue PPCTargetLowering::LowerFormalArguments( 3376 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3377 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3378 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3379 if (Subtarget.isSVR4ABI()) { 3380 if (Subtarget.isPPC64()) 3381 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 3382 dl, DAG, InVals); 3383 else 3384 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 3385 dl, DAG, InVals); 3386 } else { 3387 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 3388 dl, DAG, InVals); 3389 } 3390 } 3391 3392 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3393 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3394 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3395 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3396 3397 // 32-bit SVR4 ABI Stack Frame Layout: 3398 // +-----------------------------------+ 3399 // +--> | Back chain | 3400 // | +-----------------------------------+ 3401 // | | Floating-point register save area | 3402 // | +-----------------------------------+ 3403 // | | General register save area | 3404 // | +-----------------------------------+ 3405 // | | CR save word | 3406 // | +-----------------------------------+ 3407 // | | VRSAVE save word | 3408 // | +-----------------------------------+ 3409 // | | Alignment padding | 3410 // | +-----------------------------------+ 3411 // | | Vector register save area | 3412 // | +-----------------------------------+ 3413 // | | Local variable space | 3414 // | +-----------------------------------+ 3415 // | | Parameter list area | 3416 // | +-----------------------------------+ 3417 // | | LR save word | 3418 // | +-----------------------------------+ 3419 // SP--> +--- | Back chain | 3420 // +-----------------------------------+ 3421 // 3422 // Specifications: 3423 // System V Application Binary Interface PowerPC Processor Supplement 3424 // AltiVec Technology Programming Interface Manual 3425 3426 MachineFunction &MF = DAG.getMachineFunction(); 3427 MachineFrameInfo &MFI = MF.getFrameInfo(); 3428 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3429 3430 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3431 // Potential tail calls could cause overwriting of argument stack slots. 3432 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3433 (CallConv == CallingConv::Fast)); 3434 unsigned PtrByteSize = 4; 3435 3436 // Assign locations to all of the incoming arguments. 3437 SmallVector<CCValAssign, 16> ArgLocs; 3438 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3439 *DAG.getContext()); 3440 3441 // Reserve space for the linkage area on the stack. 3442 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3443 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3444 if (useSoftFloat() || hasSPE()) 3445 CCInfo.PreAnalyzeFormalArguments(Ins); 3446 3447 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3448 CCInfo.clearWasPPCF128(); 3449 3450 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3451 CCValAssign &VA = ArgLocs[i]; 3452 3453 // Arguments stored in registers. 3454 if (VA.isRegLoc()) { 3455 const TargetRegisterClass *RC; 3456 EVT ValVT = VA.getValVT(); 3457 3458 switch (ValVT.getSimpleVT().SimpleTy) { 3459 default: 3460 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3461 case MVT::i1: 3462 case MVT::i32: 3463 RC = &PPC::GPRCRegClass; 3464 break; 3465 case MVT::f32: 3466 if (Subtarget.hasP8Vector()) 3467 RC = &PPC::VSSRCRegClass; 3468 else if (Subtarget.hasSPE()) 3469 RC = &PPC::SPE4RCRegClass; 3470 else 3471 RC = &PPC::F4RCRegClass; 3472 break; 3473 case MVT::f64: 3474 if (Subtarget.hasVSX()) 3475 RC = &PPC::VSFRCRegClass; 3476 else if (Subtarget.hasSPE()) 3477 RC = &PPC::SPERCRegClass; 3478 else 3479 RC = &PPC::F8RCRegClass; 3480 break; 3481 case MVT::v16i8: 3482 case MVT::v8i16: 3483 case MVT::v4i32: 3484 RC = &PPC::VRRCRegClass; 3485 break; 3486 case MVT::v4f32: 3487 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3488 break; 3489 case MVT::v2f64: 3490 case MVT::v2i64: 3491 RC = &PPC::VRRCRegClass; 3492 break; 3493 case MVT::v4f64: 3494 RC = &PPC::QFRCRegClass; 3495 break; 3496 case MVT::v4i1: 3497 RC = &PPC::QBRCRegClass; 3498 break; 3499 } 3500 3501 // Transform the arguments stored in physical registers into virtual ones. 3502 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3503 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3504 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3505 3506 if (ValVT == MVT::i1) 3507 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3508 3509 InVals.push_back(ArgValue); 3510 } else { 3511 // Argument stored in memory. 3512 assert(VA.isMemLoc()); 3513 3514 // Get the extended size of the argument type in stack 3515 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3516 // Get the actual size of the argument type 3517 unsigned ObjSize = VA.getValVT().getStoreSize(); 3518 unsigned ArgOffset = VA.getLocMemOffset(); 3519 // Stack objects in PPC32 are right justified. 3520 ArgOffset += ArgSize - ObjSize; 3521 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); 3522 3523 // Create load nodes to retrieve arguments from the stack. 3524 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3525 InVals.push_back( 3526 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3527 } 3528 } 3529 3530 // Assign locations to all of the incoming aggregate by value arguments. 3531 // Aggregates passed by value are stored in the local variable space of the 3532 // caller's stack frame, right above the parameter list area. 3533 SmallVector<CCValAssign, 16> ByValArgLocs; 3534 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3535 ByValArgLocs, *DAG.getContext()); 3536 3537 // Reserve stack space for the allocations in CCInfo. 3538 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3539 3540 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3541 3542 // Area that is at least reserved in the caller of this function. 3543 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3544 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3545 3546 // Set the size that is at least reserved in caller of this function. Tail 3547 // call optimized function's reserved stack space needs to be aligned so that 3548 // taking the difference between two stack areas will result in an aligned 3549 // stack. 3550 MinReservedArea = 3551 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3552 FuncInfo->setMinReservedArea(MinReservedArea); 3553 3554 SmallVector<SDValue, 8> MemOps; 3555 3556 // If the function takes variable number of arguments, make a frame index for 3557 // the start of the first vararg value... for expansion of llvm.va_start. 3558 if (isVarArg) { 3559 static const MCPhysReg GPArgRegs[] = { 3560 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3561 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3562 }; 3563 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3564 3565 static const MCPhysReg FPArgRegs[] = { 3566 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3567 PPC::F8 3568 }; 3569 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3570 3571 if (useSoftFloat() || hasSPE()) 3572 NumFPArgRegs = 0; 3573 3574 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3575 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3576 3577 // Make room for NumGPArgRegs and NumFPArgRegs. 3578 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3579 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3580 3581 FuncInfo->setVarArgsStackOffset( 3582 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3583 CCInfo.getNextStackOffset(), true)); 3584 3585 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3586 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3587 3588 // The fixed integer arguments of a variadic function are stored to the 3589 // VarArgsFrameIndex on the stack so that they may be loaded by 3590 // dereferencing the result of va_next. 3591 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3592 // Get an existing live-in vreg, or add a new one. 3593 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3594 if (!VReg) 3595 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3596 3597 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3598 SDValue Store = 3599 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3600 MemOps.push_back(Store); 3601 // Increment the address by four for the next argument to store 3602 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3603 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3604 } 3605 3606 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3607 // is set. 3608 // The double arguments are stored to the VarArgsFrameIndex 3609 // on the stack. 3610 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3611 // Get an existing live-in vreg, or add a new one. 3612 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3613 if (!VReg) 3614 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3615 3616 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3617 SDValue Store = 3618 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3619 MemOps.push_back(Store); 3620 // Increment the address by eight for the next argument to store 3621 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3622 PtrVT); 3623 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3624 } 3625 } 3626 3627 if (!MemOps.empty()) 3628 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3629 3630 return Chain; 3631 } 3632 3633 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3634 // value to MVT::i64 and then truncate to the correct register size. 3635 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3636 EVT ObjectVT, SelectionDAG &DAG, 3637 SDValue ArgVal, 3638 const SDLoc &dl) const { 3639 if (Flags.isSExt()) 3640 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3641 DAG.getValueType(ObjectVT)); 3642 else if (Flags.isZExt()) 3643 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3644 DAG.getValueType(ObjectVT)); 3645 3646 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3647 } 3648 3649 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3650 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3651 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3652 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3653 // TODO: add description of PPC stack frame format, or at least some docs. 3654 // 3655 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3656 bool isLittleEndian = Subtarget.isLittleEndian(); 3657 MachineFunction &MF = DAG.getMachineFunction(); 3658 MachineFrameInfo &MFI = MF.getFrameInfo(); 3659 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3660 3661 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3662 "fastcc not supported on varargs functions"); 3663 3664 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3665 // Potential tail calls could cause overwriting of argument stack slots. 3666 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3667 (CallConv == CallingConv::Fast)); 3668 unsigned PtrByteSize = 8; 3669 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3670 3671 static const MCPhysReg GPR[] = { 3672 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3673 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3674 }; 3675 static const MCPhysReg VR[] = { 3676 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3677 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3678 }; 3679 3680 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3681 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3682 const unsigned Num_VR_Regs = array_lengthof(VR); 3683 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3684 3685 // Do a first pass over the arguments to determine whether the ABI 3686 // guarantees that our caller has allocated the parameter save area 3687 // on its stack frame. In the ELFv1 ABI, this is always the case; 3688 // in the ELFv2 ABI, it is true if this is a vararg function or if 3689 // any parameter is located in a stack slot. 3690 3691 bool HasParameterArea = !isELFv2ABI || isVarArg; 3692 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3693 unsigned NumBytes = LinkageSize; 3694 unsigned AvailableFPRs = Num_FPR_Regs; 3695 unsigned AvailableVRs = Num_VR_Regs; 3696 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3697 if (Ins[i].Flags.isNest()) 3698 continue; 3699 3700 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3701 PtrByteSize, LinkageSize, ParamAreaSize, 3702 NumBytes, AvailableFPRs, AvailableVRs, 3703 Subtarget.hasQPX())) 3704 HasParameterArea = true; 3705 } 3706 3707 // Add DAG nodes to load the arguments or copy them out of registers. On 3708 // entry to a function on PPC, the arguments start after the linkage area, 3709 // although the first ones are often in registers. 3710 3711 unsigned ArgOffset = LinkageSize; 3712 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3713 unsigned &QFPR_idx = FPR_idx; 3714 SmallVector<SDValue, 8> MemOps; 3715 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3716 unsigned CurArgIdx = 0; 3717 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3718 SDValue ArgVal; 3719 bool needsLoad = false; 3720 EVT ObjectVT = Ins[ArgNo].VT; 3721 EVT OrigVT = Ins[ArgNo].ArgVT; 3722 unsigned ObjSize = ObjectVT.getStoreSize(); 3723 unsigned ArgSize = ObjSize; 3724 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3725 if (Ins[ArgNo].isOrigArg()) { 3726 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3727 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3728 } 3729 // We re-align the argument offset for each argument, except when using the 3730 // fast calling convention, when we need to make sure we do that only when 3731 // we'll actually use a stack slot. 3732 unsigned CurArgOffset, Align; 3733 auto ComputeArgOffset = [&]() { 3734 /* Respect alignment of argument on the stack. */ 3735 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3736 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3737 CurArgOffset = ArgOffset; 3738 }; 3739 3740 if (CallConv != CallingConv::Fast) { 3741 ComputeArgOffset(); 3742 3743 /* Compute GPR index associated with argument offset. */ 3744 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3745 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3746 } 3747 3748 // FIXME the codegen can be much improved in some cases. 3749 // We do not have to keep everything in memory. 3750 if (Flags.isByVal()) { 3751 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3752 3753 if (CallConv == CallingConv::Fast) 3754 ComputeArgOffset(); 3755 3756 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3757 ObjSize = Flags.getByValSize(); 3758 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3759 // Empty aggregate parameters do not take up registers. Examples: 3760 // struct { } a; 3761 // union { } b; 3762 // int c[0]; 3763 // etc. However, we have to provide a place-holder in InVals, so 3764 // pretend we have an 8-byte item at the current address for that 3765 // purpose. 3766 if (!ObjSize) { 3767 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3768 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3769 InVals.push_back(FIN); 3770 continue; 3771 } 3772 3773 // Create a stack object covering all stack doublewords occupied 3774 // by the argument. If the argument is (fully or partially) on 3775 // the stack, or if the argument is fully in registers but the 3776 // caller has allocated the parameter save anyway, we can refer 3777 // directly to the caller's stack frame. Otherwise, create a 3778 // local copy in our own frame. 3779 int FI; 3780 if (HasParameterArea || 3781 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3782 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3783 else 3784 FI = MFI.CreateStackObject(ArgSize, Align, false); 3785 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3786 3787 // Handle aggregates smaller than 8 bytes. 3788 if (ObjSize < PtrByteSize) { 3789 // The value of the object is its address, which differs from the 3790 // address of the enclosing doubleword on big-endian systems. 3791 SDValue Arg = FIN; 3792 if (!isLittleEndian) { 3793 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3794 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3795 } 3796 InVals.push_back(Arg); 3797 3798 if (GPR_idx != Num_GPR_Regs) { 3799 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3800 FuncInfo->addLiveInAttr(VReg, Flags); 3801 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3802 SDValue Store; 3803 3804 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3805 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3806 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3807 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3808 MachinePointerInfo(&*FuncArg), ObjType); 3809 } else { 3810 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3811 // store the whole register as-is to the parameter save area 3812 // slot. 3813 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3814 MachinePointerInfo(&*FuncArg)); 3815 } 3816 3817 MemOps.push_back(Store); 3818 } 3819 // Whether we copied from a register or not, advance the offset 3820 // into the parameter save area by a full doubleword. 3821 ArgOffset += PtrByteSize; 3822 continue; 3823 } 3824 3825 // The value of the object is its address, which is the address of 3826 // its first stack doubleword. 3827 InVals.push_back(FIN); 3828 3829 // Store whatever pieces of the object are in registers to memory. 3830 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3831 if (GPR_idx == Num_GPR_Regs) 3832 break; 3833 3834 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3835 FuncInfo->addLiveInAttr(VReg, Flags); 3836 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3837 SDValue Addr = FIN; 3838 if (j) { 3839 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3840 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3841 } 3842 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3843 MachinePointerInfo(&*FuncArg, j)); 3844 MemOps.push_back(Store); 3845 ++GPR_idx; 3846 } 3847 ArgOffset += ArgSize; 3848 continue; 3849 } 3850 3851 switch (ObjectVT.getSimpleVT().SimpleTy) { 3852 default: llvm_unreachable("Unhandled argument type!"); 3853 case MVT::i1: 3854 case MVT::i32: 3855 case MVT::i64: 3856 if (Flags.isNest()) { 3857 // The 'nest' parameter, if any, is passed in R11. 3858 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3859 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3860 3861 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3862 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3863 3864 break; 3865 } 3866 3867 // These can be scalar arguments or elements of an integer array type 3868 // passed directly. Clang may use those instead of "byval" aggregate 3869 // types to avoid forcing arguments to memory unnecessarily. 3870 if (GPR_idx != Num_GPR_Regs) { 3871 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3872 FuncInfo->addLiveInAttr(VReg, Flags); 3873 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3874 3875 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3876 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3877 // value to MVT::i64 and then truncate to the correct register size. 3878 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3879 } else { 3880 if (CallConv == CallingConv::Fast) 3881 ComputeArgOffset(); 3882 3883 needsLoad = true; 3884 ArgSize = PtrByteSize; 3885 } 3886 if (CallConv != CallingConv::Fast || needsLoad) 3887 ArgOffset += 8; 3888 break; 3889 3890 case MVT::f32: 3891 case MVT::f64: 3892 // These can be scalar arguments or elements of a float array type 3893 // passed directly. The latter are used to implement ELFv2 homogenous 3894 // float aggregates. 3895 if (FPR_idx != Num_FPR_Regs) { 3896 unsigned VReg; 3897 3898 if (ObjectVT == MVT::f32) 3899 VReg = MF.addLiveIn(FPR[FPR_idx], 3900 Subtarget.hasP8Vector() 3901 ? &PPC::VSSRCRegClass 3902 : &PPC::F4RCRegClass); 3903 else 3904 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3905 ? &PPC::VSFRCRegClass 3906 : &PPC::F8RCRegClass); 3907 3908 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3909 ++FPR_idx; 3910 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3911 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3912 // once we support fp <-> gpr moves. 3913 3914 // This can only ever happen in the presence of f32 array types, 3915 // since otherwise we never run out of FPRs before running out 3916 // of GPRs. 3917 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3918 FuncInfo->addLiveInAttr(VReg, Flags); 3919 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3920 3921 if (ObjectVT == MVT::f32) { 3922 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3923 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3924 DAG.getConstant(32, dl, MVT::i32)); 3925 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3926 } 3927 3928 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3929 } else { 3930 if (CallConv == CallingConv::Fast) 3931 ComputeArgOffset(); 3932 3933 needsLoad = true; 3934 } 3935 3936 // When passing an array of floats, the array occupies consecutive 3937 // space in the argument area; only round up to the next doubleword 3938 // at the end of the array. Otherwise, each float takes 8 bytes. 3939 if (CallConv != CallingConv::Fast || needsLoad) { 3940 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3941 ArgOffset += ArgSize; 3942 if (Flags.isInConsecutiveRegsLast()) 3943 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3944 } 3945 break; 3946 case MVT::v4f32: 3947 case MVT::v4i32: 3948 case MVT::v8i16: 3949 case MVT::v16i8: 3950 case MVT::v2f64: 3951 case MVT::v2i64: 3952 case MVT::v1i128: 3953 case MVT::f128: 3954 if (!Subtarget.hasQPX()) { 3955 // These can be scalar arguments or elements of a vector array type 3956 // passed directly. The latter are used to implement ELFv2 homogenous 3957 // vector aggregates. 3958 if (VR_idx != Num_VR_Regs) { 3959 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3960 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3961 ++VR_idx; 3962 } else { 3963 if (CallConv == CallingConv::Fast) 3964 ComputeArgOffset(); 3965 needsLoad = true; 3966 } 3967 if (CallConv != CallingConv::Fast || needsLoad) 3968 ArgOffset += 16; 3969 break; 3970 } // not QPX 3971 3972 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3973 "Invalid QPX parameter type"); 3974 LLVM_FALLTHROUGH; 3975 3976 case MVT::v4f64: 3977 case MVT::v4i1: 3978 // QPX vectors are treated like their scalar floating-point subregisters 3979 // (except that they're larger). 3980 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3981 if (QFPR_idx != Num_QFPR_Regs) { 3982 const TargetRegisterClass *RC; 3983 switch (ObjectVT.getSimpleVT().SimpleTy) { 3984 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3985 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3986 default: RC = &PPC::QBRCRegClass; break; 3987 } 3988 3989 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3990 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3991 ++QFPR_idx; 3992 } else { 3993 if (CallConv == CallingConv::Fast) 3994 ComputeArgOffset(); 3995 needsLoad = true; 3996 } 3997 if (CallConv != CallingConv::Fast || needsLoad) 3998 ArgOffset += Sz; 3999 break; 4000 } 4001 4002 // We need to load the argument to a virtual register if we determined 4003 // above that we ran out of physical registers of the appropriate type. 4004 if (needsLoad) { 4005 if (ObjSize < ArgSize && !isLittleEndian) 4006 CurArgOffset += ArgSize - ObjSize; 4007 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 4008 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4009 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4010 } 4011 4012 InVals.push_back(ArgVal); 4013 } 4014 4015 // Area that is at least reserved in the caller of this function. 4016 unsigned MinReservedArea; 4017 if (HasParameterArea) 4018 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 4019 else 4020 MinReservedArea = LinkageSize; 4021 4022 // Set the size that is at least reserved in caller of this function. Tail 4023 // call optimized functions' reserved stack space needs to be aligned so that 4024 // taking the difference between two stack areas will result in an aligned 4025 // stack. 4026 MinReservedArea = 4027 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4028 FuncInfo->setMinReservedArea(MinReservedArea); 4029 4030 // If the function takes variable number of arguments, make a frame index for 4031 // the start of the first vararg value... for expansion of llvm.va_start. 4032 if (isVarArg) { 4033 int Depth = ArgOffset; 4034 4035 FuncInfo->setVarArgsFrameIndex( 4036 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4037 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4038 4039 // If this function is vararg, store any remaining integer argument regs 4040 // to their spots on the stack so that they may be loaded by dereferencing 4041 // the result of va_next. 4042 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4043 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4044 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4045 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4046 SDValue Store = 4047 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4048 MemOps.push_back(Store); 4049 // Increment the address by four for the next argument to store 4050 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4051 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4052 } 4053 } 4054 4055 if (!MemOps.empty()) 4056 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4057 4058 return Chain; 4059 } 4060 4061 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 4062 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4063 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4064 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4065 // TODO: add description of PPC stack frame format, or at least some docs. 4066 // 4067 MachineFunction &MF = DAG.getMachineFunction(); 4068 MachineFrameInfo &MFI = MF.getFrameInfo(); 4069 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4070 4071 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4072 bool isPPC64 = PtrVT == MVT::i64; 4073 // Potential tail calls could cause overwriting of argument stack slots. 4074 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4075 (CallConv == CallingConv::Fast)); 4076 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4077 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4078 unsigned ArgOffset = LinkageSize; 4079 // Area that is at least reserved in caller of this function. 4080 unsigned MinReservedArea = ArgOffset; 4081 4082 static const MCPhysReg GPR_32[] = { // 32-bit registers. 4083 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4084 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4085 }; 4086 static const MCPhysReg GPR_64[] = { // 64-bit registers. 4087 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4088 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4089 }; 4090 static const MCPhysReg VR[] = { 4091 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4092 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4093 }; 4094 4095 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 4096 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4097 const unsigned Num_VR_Regs = array_lengthof( VR); 4098 4099 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4100 4101 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 4102 4103 // In 32-bit non-varargs functions, the stack space for vectors is after the 4104 // stack space for non-vectors. We do not use this space unless we have 4105 // too many vectors to fit in registers, something that only occurs in 4106 // constructed examples:), but we have to walk the arglist to figure 4107 // that out...for the pathological case, compute VecArgOffset as the 4108 // start of the vector parameter area. Computing VecArgOffset is the 4109 // entire point of the following loop. 4110 unsigned VecArgOffset = ArgOffset; 4111 if (!isVarArg && !isPPC64) { 4112 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 4113 ++ArgNo) { 4114 EVT ObjectVT = Ins[ArgNo].VT; 4115 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4116 4117 if (Flags.isByVal()) { 4118 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 4119 unsigned ObjSize = Flags.getByValSize(); 4120 unsigned ArgSize = 4121 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4122 VecArgOffset += ArgSize; 4123 continue; 4124 } 4125 4126 switch(ObjectVT.getSimpleVT().SimpleTy) { 4127 default: llvm_unreachable("Unhandled argument type!"); 4128 case MVT::i1: 4129 case MVT::i32: 4130 case MVT::f32: 4131 VecArgOffset += 4; 4132 break; 4133 case MVT::i64: // PPC64 4134 case MVT::f64: 4135 // FIXME: We are guaranteed to be !isPPC64 at this point. 4136 // Does MVT::i64 apply? 4137 VecArgOffset += 8; 4138 break; 4139 case MVT::v4f32: 4140 case MVT::v4i32: 4141 case MVT::v8i16: 4142 case MVT::v16i8: 4143 // Nothing to do, we're only looking at Nonvector args here. 4144 break; 4145 } 4146 } 4147 } 4148 // We've found where the vector parameter area in memory is. Skip the 4149 // first 12 parameters; these don't use that memory. 4150 VecArgOffset = ((VecArgOffset+15)/16)*16; 4151 VecArgOffset += 12*16; 4152 4153 // Add DAG nodes to load the arguments or copy them out of registers. On 4154 // entry to a function on PPC, the arguments start after the linkage area, 4155 // although the first ones are often in registers. 4156 4157 SmallVector<SDValue, 8> MemOps; 4158 unsigned nAltivecParamsAtEnd = 0; 4159 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4160 unsigned CurArgIdx = 0; 4161 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4162 SDValue ArgVal; 4163 bool needsLoad = false; 4164 EVT ObjectVT = Ins[ArgNo].VT; 4165 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 4166 unsigned ArgSize = ObjSize; 4167 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4168 if (Ins[ArgNo].isOrigArg()) { 4169 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4170 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4171 } 4172 unsigned CurArgOffset = ArgOffset; 4173 4174 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4175 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4176 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4177 if (isVarArg || isPPC64) { 4178 MinReservedArea = ((MinReservedArea+15)/16)*16; 4179 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4180 Flags, 4181 PtrByteSize); 4182 } else nAltivecParamsAtEnd++; 4183 } else 4184 // Calculate min reserved area. 4185 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4186 Flags, 4187 PtrByteSize); 4188 4189 // FIXME the codegen can be much improved in some cases. 4190 // We do not have to keep everything in memory. 4191 if (Flags.isByVal()) { 4192 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4193 4194 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4195 ObjSize = Flags.getByValSize(); 4196 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4197 // Objects of size 1 and 2 are right justified, everything else is 4198 // left justified. This means the memory address is adjusted forwards. 4199 if (ObjSize==1 || ObjSize==2) { 4200 CurArgOffset = CurArgOffset + (4 - ObjSize); 4201 } 4202 // The value of the object is its address. 4203 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4204 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4205 InVals.push_back(FIN); 4206 if (ObjSize==1 || ObjSize==2) { 4207 if (GPR_idx != Num_GPR_Regs) { 4208 unsigned VReg; 4209 if (isPPC64) 4210 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4211 else 4212 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4213 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4214 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4215 SDValue Store = 4216 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4217 MachinePointerInfo(&*FuncArg), ObjType); 4218 MemOps.push_back(Store); 4219 ++GPR_idx; 4220 } 4221 4222 ArgOffset += PtrByteSize; 4223 4224 continue; 4225 } 4226 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4227 // Store whatever pieces of the object are in registers 4228 // to memory. ArgOffset will be the address of the beginning 4229 // of the object. 4230 if (GPR_idx != Num_GPR_Regs) { 4231 unsigned VReg; 4232 if (isPPC64) 4233 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4234 else 4235 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4236 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4237 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4238 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4239 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4240 MachinePointerInfo(&*FuncArg, j)); 4241 MemOps.push_back(Store); 4242 ++GPR_idx; 4243 ArgOffset += PtrByteSize; 4244 } else { 4245 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4246 break; 4247 } 4248 } 4249 continue; 4250 } 4251 4252 switch (ObjectVT.getSimpleVT().SimpleTy) { 4253 default: llvm_unreachable("Unhandled argument type!"); 4254 case MVT::i1: 4255 case MVT::i32: 4256 if (!isPPC64) { 4257 if (GPR_idx != Num_GPR_Regs) { 4258 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4259 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4260 4261 if (ObjectVT == MVT::i1) 4262 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4263 4264 ++GPR_idx; 4265 } else { 4266 needsLoad = true; 4267 ArgSize = PtrByteSize; 4268 } 4269 // All int arguments reserve stack space in the Darwin ABI. 4270 ArgOffset += PtrByteSize; 4271 break; 4272 } 4273 LLVM_FALLTHROUGH; 4274 case MVT::i64: // PPC64 4275 if (GPR_idx != Num_GPR_Regs) { 4276 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4277 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4278 4279 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4280 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4281 // value to MVT::i64 and then truncate to the correct register size. 4282 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4283 4284 ++GPR_idx; 4285 } else { 4286 needsLoad = true; 4287 ArgSize = PtrByteSize; 4288 } 4289 // All int arguments reserve stack space in the Darwin ABI. 4290 ArgOffset += 8; 4291 break; 4292 4293 case MVT::f32: 4294 case MVT::f64: 4295 // Every 4 bytes of argument space consumes one of the GPRs available for 4296 // argument passing. 4297 if (GPR_idx != Num_GPR_Regs) { 4298 ++GPR_idx; 4299 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4300 ++GPR_idx; 4301 } 4302 if (FPR_idx != Num_FPR_Regs) { 4303 unsigned VReg; 4304 4305 if (ObjectVT == MVT::f32) 4306 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4307 else 4308 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4309 4310 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4311 ++FPR_idx; 4312 } else { 4313 needsLoad = true; 4314 } 4315 4316 // All FP arguments reserve stack space in the Darwin ABI. 4317 ArgOffset += isPPC64 ? 8 : ObjSize; 4318 break; 4319 case MVT::v4f32: 4320 case MVT::v4i32: 4321 case MVT::v8i16: 4322 case MVT::v16i8: 4323 // Note that vector arguments in registers don't reserve stack space, 4324 // except in varargs functions. 4325 if (VR_idx != Num_VR_Regs) { 4326 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4327 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4328 if (isVarArg) { 4329 while ((ArgOffset % 16) != 0) { 4330 ArgOffset += PtrByteSize; 4331 if (GPR_idx != Num_GPR_Regs) 4332 GPR_idx++; 4333 } 4334 ArgOffset += 16; 4335 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4336 } 4337 ++VR_idx; 4338 } else { 4339 if (!isVarArg && !isPPC64) { 4340 // Vectors go after all the nonvectors. 4341 CurArgOffset = VecArgOffset; 4342 VecArgOffset += 16; 4343 } else { 4344 // Vectors are aligned. 4345 ArgOffset = ((ArgOffset+15)/16)*16; 4346 CurArgOffset = ArgOffset; 4347 ArgOffset += 16; 4348 } 4349 needsLoad = true; 4350 } 4351 break; 4352 } 4353 4354 // We need to load the argument to a virtual register if we determined above 4355 // that we ran out of physical registers of the appropriate type. 4356 if (needsLoad) { 4357 int FI = MFI.CreateFixedObject(ObjSize, 4358 CurArgOffset + (ArgSize - ObjSize), 4359 isImmutable); 4360 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4361 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4362 } 4363 4364 InVals.push_back(ArgVal); 4365 } 4366 4367 // Allow for Altivec parameters at the end, if needed. 4368 if (nAltivecParamsAtEnd) { 4369 MinReservedArea = ((MinReservedArea+15)/16)*16; 4370 MinReservedArea += 16*nAltivecParamsAtEnd; 4371 } 4372 4373 // Area that is at least reserved in the caller of this function. 4374 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4375 4376 // Set the size that is at least reserved in caller of this function. Tail 4377 // call optimized functions' reserved stack space needs to be aligned so that 4378 // taking the difference between two stack areas will result in an aligned 4379 // stack. 4380 MinReservedArea = 4381 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4382 FuncInfo->setMinReservedArea(MinReservedArea); 4383 4384 // If the function takes variable number of arguments, make a frame index for 4385 // the start of the first vararg value... for expansion of llvm.va_start. 4386 if (isVarArg) { 4387 int Depth = ArgOffset; 4388 4389 FuncInfo->setVarArgsFrameIndex( 4390 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4391 Depth, true)); 4392 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4393 4394 // If this function is vararg, store any remaining integer argument regs 4395 // to their spots on the stack so that they may be loaded by dereferencing 4396 // the result of va_next. 4397 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4398 unsigned VReg; 4399 4400 if (isPPC64) 4401 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4402 else 4403 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4404 4405 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4406 SDValue Store = 4407 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4408 MemOps.push_back(Store); 4409 // Increment the address by four for the next argument to store 4410 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4411 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4412 } 4413 } 4414 4415 if (!MemOps.empty()) 4416 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4417 4418 return Chain; 4419 } 4420 4421 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4422 /// adjusted to accommodate the arguments for the tailcall. 4423 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4424 unsigned ParamSize) { 4425 4426 if (!isTailCall) return 0; 4427 4428 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4429 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4430 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4431 // Remember only if the new adjustment is bigger. 4432 if (SPDiff < FI->getTailCallSPDelta()) 4433 FI->setTailCallSPDelta(SPDiff); 4434 4435 return SPDiff; 4436 } 4437 4438 static bool isFunctionGlobalAddress(SDValue Callee); 4439 4440 static bool 4441 callsShareTOCBase(const Function *Caller, SDValue Callee, 4442 const TargetMachine &TM) { 4443 // If !G, Callee can be an external symbol. 4444 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4445 if (!G) 4446 return false; 4447 4448 // The medium and large code models are expected to provide a sufficiently 4449 // large TOC to provide all data addressing needs of a module with a 4450 // single TOC. Since each module will be addressed with a single TOC then we 4451 // only need to check that caller and callee don't cross dso boundaries. 4452 if (CodeModel::Medium == TM.getCodeModel() || 4453 CodeModel::Large == TM.getCodeModel()) 4454 return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal()); 4455 4456 // Otherwise we need to ensure callee and caller are in the same section, 4457 // since the linker may allocate multiple TOCs, and we don't know which 4458 // sections will belong to the same TOC base. 4459 4460 const GlobalValue *GV = G->getGlobal(); 4461 if (!GV->isStrongDefinitionForLinker()) 4462 return false; 4463 4464 // Any explicitly-specified sections and section prefixes must also match. 4465 // Also, if we're using -ffunction-sections, then each function is always in 4466 // a different section (the same is true for COMDAT functions). 4467 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4468 GV->getSection() != Caller->getSection()) 4469 return false; 4470 if (const auto *F = dyn_cast<Function>(GV)) { 4471 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4472 return false; 4473 } 4474 4475 // If the callee might be interposed, then we can't assume the ultimate call 4476 // target will be in the same section. Even in cases where we can assume that 4477 // interposition won't happen, in any case where the linker might insert a 4478 // stub to allow for interposition, we must generate code as though 4479 // interposition might occur. To understand why this matters, consider a 4480 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4481 // in the same section, but a is in a different module (i.e. has a different 4482 // TOC base pointer). If the linker allows for interposition between b and c, 4483 // then it will generate a stub for the call edge between b and c which will 4484 // save the TOC pointer into the designated stack slot allocated by b. If we 4485 // return true here, and therefore allow a tail call between b and c, that 4486 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4487 // pointer into the stack slot allocated by a (where the a -> b stub saved 4488 // a's TOC base pointer). If we're not considering a tail call, but rather, 4489 // whether a nop is needed after the call instruction in b, because the linker 4490 // will insert a stub, it might complain about a missing nop if we omit it 4491 // (although many don't complain in this case). 4492 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4493 return false; 4494 4495 return true; 4496 } 4497 4498 static bool 4499 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4500 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4501 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 4502 4503 const unsigned PtrByteSize = 8; 4504 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4505 4506 static const MCPhysReg GPR[] = { 4507 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4508 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4509 }; 4510 static const MCPhysReg VR[] = { 4511 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4512 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4513 }; 4514 4515 const unsigned NumGPRs = array_lengthof(GPR); 4516 const unsigned NumFPRs = 13; 4517 const unsigned NumVRs = array_lengthof(VR); 4518 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4519 4520 unsigned NumBytes = LinkageSize; 4521 unsigned AvailableFPRs = NumFPRs; 4522 unsigned AvailableVRs = NumVRs; 4523 4524 for (const ISD::OutputArg& Param : Outs) { 4525 if (Param.Flags.isNest()) continue; 4526 4527 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4528 PtrByteSize, LinkageSize, ParamAreaSize, 4529 NumBytes, AvailableFPRs, AvailableVRs, 4530 Subtarget.hasQPX())) 4531 return true; 4532 } 4533 return false; 4534 } 4535 4536 static bool 4537 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4538 if (CS.arg_size() != CallerFn->arg_size()) 4539 return false; 4540 4541 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4542 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4543 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4544 4545 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4546 const Value* CalleeArg = *CalleeArgIter; 4547 const Value* CallerArg = &(*CallerArgIter); 4548 if (CalleeArg == CallerArg) 4549 continue; 4550 4551 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4552 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4553 // } 4554 // 1st argument of callee is undef and has the same type as caller. 4555 if (CalleeArg->getType() == CallerArg->getType() && 4556 isa<UndefValue>(CalleeArg)) 4557 continue; 4558 4559 return false; 4560 } 4561 4562 return true; 4563 } 4564 4565 // Returns true if TCO is possible between the callers and callees 4566 // calling conventions. 4567 static bool 4568 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4569 CallingConv::ID CalleeCC) { 4570 // Tail calls are possible with fastcc and ccc. 4571 auto isTailCallableCC = [] (CallingConv::ID CC){ 4572 return CC == CallingConv::C || CC == CallingConv::Fast; 4573 }; 4574 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4575 return false; 4576 4577 // We can safely tail call both fastcc and ccc callees from a c calling 4578 // convention caller. If the caller is fastcc, we may have less stack space 4579 // than a non-fastcc caller with the same signature so disable tail-calls in 4580 // that case. 4581 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4582 } 4583 4584 bool 4585 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4586 SDValue Callee, 4587 CallingConv::ID CalleeCC, 4588 ImmutableCallSite CS, 4589 bool isVarArg, 4590 const SmallVectorImpl<ISD::OutputArg> &Outs, 4591 const SmallVectorImpl<ISD::InputArg> &Ins, 4592 SelectionDAG& DAG) const { 4593 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4594 4595 if (DisableSCO && !TailCallOpt) return false; 4596 4597 // Variadic argument functions are not supported. 4598 if (isVarArg) return false; 4599 4600 auto &Caller = DAG.getMachineFunction().getFunction(); 4601 // Check that the calling conventions are compatible for tco. 4602 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4603 return false; 4604 4605 // Caller contains any byval parameter is not supported. 4606 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4607 return false; 4608 4609 // Callee contains any byval parameter is not supported, too. 4610 // Note: This is a quick work around, because in some cases, e.g. 4611 // caller's stack size > callee's stack size, we are still able to apply 4612 // sibling call optimization. For example, gcc is able to do SCO for caller1 4613 // in the following example, but not for caller2. 4614 // struct test { 4615 // long int a; 4616 // char ary[56]; 4617 // } gTest; 4618 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4619 // b->a = v.a; 4620 // return 0; 4621 // } 4622 // void caller1(struct test a, struct test c, struct test *b) { 4623 // callee(gTest, b); } 4624 // void caller2(struct test *b) { callee(gTest, b); } 4625 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4626 return false; 4627 4628 // If callee and caller use different calling conventions, we cannot pass 4629 // parameters on stack since offsets for the parameter area may be different. 4630 if (Caller.getCallingConv() != CalleeCC && 4631 needStackSlotPassParameters(Subtarget, Outs)) 4632 return false; 4633 4634 // No TCO/SCO on indirect call because Caller have to restore its TOC 4635 if (!isFunctionGlobalAddress(Callee) && 4636 !isa<ExternalSymbolSDNode>(Callee)) 4637 return false; 4638 4639 // If the caller and callee potentially have different TOC bases then we 4640 // cannot tail call since we need to restore the TOC pointer after the call. 4641 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4642 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4643 return false; 4644 4645 // TCO allows altering callee ABI, so we don't have to check further. 4646 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4647 return true; 4648 4649 if (DisableSCO) return false; 4650 4651 // If callee use the same argument list that caller is using, then we can 4652 // apply SCO on this case. If it is not, then we need to check if callee needs 4653 // stack for passing arguments. 4654 if (!hasSameArgumentList(&Caller, CS) && 4655 needStackSlotPassParameters(Subtarget, Outs)) { 4656 return false; 4657 } 4658 4659 return true; 4660 } 4661 4662 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4663 /// for tail call optimization. Targets which want to do tail call 4664 /// optimization should implement this function. 4665 bool 4666 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4667 CallingConv::ID CalleeCC, 4668 bool isVarArg, 4669 const SmallVectorImpl<ISD::InputArg> &Ins, 4670 SelectionDAG& DAG) const { 4671 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4672 return false; 4673 4674 // Variable argument functions are not supported. 4675 if (isVarArg) 4676 return false; 4677 4678 MachineFunction &MF = DAG.getMachineFunction(); 4679 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4680 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4681 // Functions containing by val parameters are not supported. 4682 for (unsigned i = 0; i != Ins.size(); i++) { 4683 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4684 if (Flags.isByVal()) return false; 4685 } 4686 4687 // Non-PIC/GOT tail calls are supported. 4688 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4689 return true; 4690 4691 // At the moment we can only do local tail calls (in same module, hidden 4692 // or protected) if we are generating PIC. 4693 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4694 return G->getGlobal()->hasHiddenVisibility() 4695 || G->getGlobal()->hasProtectedVisibility(); 4696 } 4697 4698 return false; 4699 } 4700 4701 /// isCallCompatibleAddress - Return the immediate to use if the specified 4702 /// 32-bit value is representable in the immediate field of a BxA instruction. 4703 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4704 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4705 if (!C) return nullptr; 4706 4707 int Addr = C->getZExtValue(); 4708 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4709 SignExtend32<26>(Addr) != Addr) 4710 return nullptr; // Top 6 bits have to be sext of immediate. 4711 4712 return DAG 4713 .getConstant( 4714 (int)C->getZExtValue() >> 2, SDLoc(Op), 4715 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4716 .getNode(); 4717 } 4718 4719 namespace { 4720 4721 struct TailCallArgumentInfo { 4722 SDValue Arg; 4723 SDValue FrameIdxOp; 4724 int FrameIdx = 0; 4725 4726 TailCallArgumentInfo() = default; 4727 }; 4728 4729 } // end anonymous namespace 4730 4731 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4732 static void StoreTailCallArgumentsToStackSlot( 4733 SelectionDAG &DAG, SDValue Chain, 4734 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4735 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4736 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4737 SDValue Arg = TailCallArgs[i].Arg; 4738 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4739 int FI = TailCallArgs[i].FrameIdx; 4740 // Store relative to framepointer. 4741 MemOpChains.push_back(DAG.getStore( 4742 Chain, dl, Arg, FIN, 4743 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4744 } 4745 } 4746 4747 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4748 /// the appropriate stack slot for the tail call optimized function call. 4749 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4750 SDValue OldRetAddr, SDValue OldFP, 4751 int SPDiff, const SDLoc &dl) { 4752 if (SPDiff) { 4753 // Calculate the new stack slot for the return address. 4754 MachineFunction &MF = DAG.getMachineFunction(); 4755 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4756 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4757 bool isPPC64 = Subtarget.isPPC64(); 4758 int SlotSize = isPPC64 ? 8 : 4; 4759 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4760 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4761 NewRetAddrLoc, true); 4762 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4763 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4764 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4765 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4766 4767 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4768 // slot as the FP is never overwritten. 4769 if (Subtarget.isDarwinABI()) { 4770 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4771 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4772 true); 4773 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4774 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4775 MachinePointerInfo::getFixedStack( 4776 DAG.getMachineFunction(), NewFPIdx)); 4777 } 4778 } 4779 return Chain; 4780 } 4781 4782 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4783 /// the position of the argument. 4784 static void 4785 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4786 SDValue Arg, int SPDiff, unsigned ArgOffset, 4787 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4788 int Offset = ArgOffset + SPDiff; 4789 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4790 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4791 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4792 SDValue FIN = DAG.getFrameIndex(FI, VT); 4793 TailCallArgumentInfo Info; 4794 Info.Arg = Arg; 4795 Info.FrameIdxOp = FIN; 4796 Info.FrameIdx = FI; 4797 TailCallArguments.push_back(Info); 4798 } 4799 4800 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4801 /// stack slot. Returns the chain as result and the loaded frame pointers in 4802 /// LROpOut/FPOpout. Used when tail calling. 4803 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4804 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4805 SDValue &FPOpOut, const SDLoc &dl) const { 4806 if (SPDiff) { 4807 // Load the LR and FP stack slot for later adjusting. 4808 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4809 LROpOut = getReturnAddrFrameIndex(DAG); 4810 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4811 Chain = SDValue(LROpOut.getNode(), 1); 4812 4813 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4814 // slot as the FP is never overwritten. 4815 if (Subtarget.isDarwinABI()) { 4816 FPOpOut = getFramePointerFrameIndex(DAG); 4817 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4818 Chain = SDValue(FPOpOut.getNode(), 1); 4819 } 4820 } 4821 return Chain; 4822 } 4823 4824 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4825 /// by "Src" to address "Dst" of size "Size". Alignment information is 4826 /// specified by the specific parameter attribute. The copy will be passed as 4827 /// a byval function parameter. 4828 /// Sometimes what we are copying is the end of a larger object, the part that 4829 /// does not fit in registers. 4830 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4831 SDValue Chain, ISD::ArgFlagsTy Flags, 4832 SelectionDAG &DAG, const SDLoc &dl) { 4833 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4834 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4835 false, false, false, MachinePointerInfo(), 4836 MachinePointerInfo()); 4837 } 4838 4839 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4840 /// tail calls. 4841 static void LowerMemOpCallTo( 4842 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4843 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4844 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4845 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4846 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4847 if (!isTailCall) { 4848 if (isVector) { 4849 SDValue StackPtr; 4850 if (isPPC64) 4851 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4852 else 4853 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4854 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4855 DAG.getConstant(ArgOffset, dl, PtrVT)); 4856 } 4857 MemOpChains.push_back( 4858 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4859 // Calculate and remember argument location. 4860 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4861 TailCallArguments); 4862 } 4863 4864 static void 4865 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4866 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4867 SDValue FPOp, 4868 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4869 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4870 // might overwrite each other in case of tail call optimization. 4871 SmallVector<SDValue, 8> MemOpChains2; 4872 // Do not flag preceding copytoreg stuff together with the following stuff. 4873 InFlag = SDValue(); 4874 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4875 MemOpChains2, dl); 4876 if (!MemOpChains2.empty()) 4877 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4878 4879 // Store the return address to the appropriate stack slot. 4880 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4881 4882 // Emit callseq_end just before tailcall node. 4883 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4884 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4885 InFlag = Chain.getValue(1); 4886 } 4887 4888 // Is this global address that of a function that can be called by name? (as 4889 // opposed to something that must hold a descriptor for an indirect call). 4890 static bool isFunctionGlobalAddress(SDValue Callee) { 4891 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4892 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4893 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4894 return false; 4895 4896 return G->getGlobal()->getValueType()->isFunctionTy(); 4897 } 4898 4899 return false; 4900 } 4901 4902 static unsigned 4903 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4904 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4905 bool isPatchPoint, bool hasNest, 4906 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4907 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4908 ImmutableCallSite CS, const PPCSubtarget &Subtarget) { 4909 bool isPPC64 = Subtarget.isPPC64(); 4910 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4911 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4912 4913 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4914 NodeTys.push_back(MVT::Other); // Returns a chain 4915 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4916 4917 unsigned CallOpc = PPCISD::CALL; 4918 4919 bool needIndirectCall = true; 4920 if (!isSVR4ABI || !isPPC64) 4921 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4922 // If this is an absolute destination address, use the munged value. 4923 Callee = SDValue(Dest, 0); 4924 needIndirectCall = false; 4925 } 4926 4927 // PC-relative references to external symbols should go through $stub, unless 4928 // we're building with the leopard linker or later, which automatically 4929 // synthesizes these stubs. 4930 const TargetMachine &TM = DAG.getTarget(); 4931 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 4932 const GlobalValue *GV = nullptr; 4933 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4934 GV = G->getGlobal(); 4935 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4936 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4937 4938 if (isFunctionGlobalAddress(Callee)) { 4939 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4940 // A call to a TLS address is actually an indirect call to a 4941 // thread-specific pointer. 4942 unsigned OpFlags = 0; 4943 if (UsePlt) 4944 OpFlags = PPCII::MO_PLT; 4945 4946 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4947 // every direct call is) turn it into a TargetGlobalAddress / 4948 // TargetExternalSymbol node so that legalize doesn't hack it. 4949 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4950 Callee.getValueType(), 0, OpFlags); 4951 needIndirectCall = false; 4952 } 4953 4954 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4955 unsigned char OpFlags = 0; 4956 4957 if (UsePlt) 4958 OpFlags = PPCII::MO_PLT; 4959 4960 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4961 OpFlags); 4962 needIndirectCall = false; 4963 } 4964 4965 if (isPatchPoint) { 4966 // We'll form an invalid direct call when lowering a patchpoint; the full 4967 // sequence for an indirect call is complicated, and many of the 4968 // instructions introduced might have side effects (and, thus, can't be 4969 // removed later). The call itself will be removed as soon as the 4970 // argument/return lowering is complete, so the fact that it has the wrong 4971 // kind of operands should not really matter. 4972 needIndirectCall = false; 4973 } 4974 4975 if (needIndirectCall) { 4976 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4977 // to do the call, we can't use PPCISD::CALL. 4978 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4979 4980 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4981 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4982 // entry point, but to the function descriptor (the function entry point 4983 // address is part of the function descriptor though). 4984 // The function descriptor is a three doubleword structure with the 4985 // following fields: function entry point, TOC base address and 4986 // environment pointer. 4987 // Thus for a call through a function pointer, the following actions need 4988 // to be performed: 4989 // 1. Save the TOC of the caller in the TOC save area of its stack 4990 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4991 // 2. Load the address of the function entry point from the function 4992 // descriptor. 4993 // 3. Load the TOC of the callee from the function descriptor into r2. 4994 // 4. Load the environment pointer from the function descriptor into 4995 // r11. 4996 // 5. Branch to the function entry point address. 4997 // 6. On return of the callee, the TOC of the caller needs to be 4998 // restored (this is done in FinishCall()). 4999 // 5000 // The loads are scheduled at the beginning of the call sequence, and the 5001 // register copies are flagged together to ensure that no other 5002 // operations can be scheduled in between. E.g. without flagging the 5003 // copies together, a TOC access in the caller could be scheduled between 5004 // the assignment of the callee TOC and the branch to the callee, which 5005 // results in the TOC access going through the TOC of the callee instead 5006 // of going through the TOC of the caller, which leads to incorrect code. 5007 5008 // Load the address of the function entry point from the function 5009 // descriptor. 5010 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 5011 if (LDChain.getValueType() == MVT::Glue) 5012 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 5013 5014 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 5015 ? (MachineMemOperand::MODereferenceable | 5016 MachineMemOperand::MOInvariant) 5017 : MachineMemOperand::MONone; 5018 5019 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 5020 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 5021 /* Alignment = */ 8, MMOFlags); 5022 5023 // Load environment pointer into r11. 5024 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 5025 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 5026 SDValue LoadEnvPtr = 5027 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 5028 /* Alignment = */ 8, MMOFlags); 5029 5030 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 5031 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 5032 SDValue TOCPtr = 5033 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 5034 /* Alignment = */ 8, MMOFlags); 5035 5036 setUsesTOCBasePtr(DAG); 5037 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 5038 InFlag); 5039 Chain = TOCVal.getValue(0); 5040 InFlag = TOCVal.getValue(1); 5041 5042 // If the function call has an explicit 'nest' parameter, it takes the 5043 // place of the environment pointer. 5044 if (!hasNest) { 5045 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 5046 InFlag); 5047 5048 Chain = EnvVal.getValue(0); 5049 InFlag = EnvVal.getValue(1); 5050 } 5051 5052 MTCTROps[0] = Chain; 5053 MTCTROps[1] = LoadFuncPtr; 5054 MTCTROps[2] = InFlag; 5055 } 5056 5057 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 5058 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 5059 InFlag = Chain.getValue(1); 5060 5061 NodeTys.clear(); 5062 NodeTys.push_back(MVT::Other); 5063 NodeTys.push_back(MVT::Glue); 5064 Ops.push_back(Chain); 5065 CallOpc = PPCISD::BCTRL; 5066 Callee.setNode(nullptr); 5067 // Add use of X11 (holding environment pointer) 5068 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 5069 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 5070 // Add CTR register as callee so a bctr can be emitted later. 5071 if (isTailCall) 5072 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 5073 } 5074 5075 // If this is a direct call, pass the chain and the callee. 5076 if (Callee.getNode()) { 5077 Ops.push_back(Chain); 5078 Ops.push_back(Callee); 5079 } 5080 // If this is a tail call add stack pointer delta. 5081 if (isTailCall) 5082 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5083 5084 // Add argument registers to the end of the list so that they are known live 5085 // into the call. 5086 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5087 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5088 RegsToPass[i].second.getValueType())); 5089 5090 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 5091 // into the call. 5092 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 5093 setUsesTOCBasePtr(DAG); 5094 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 5095 } 5096 5097 return CallOpc; 5098 } 5099 5100 SDValue PPCTargetLowering::LowerCallResult( 5101 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 5102 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5103 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 5104 SmallVector<CCValAssign, 16> RVLocs; 5105 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5106 *DAG.getContext()); 5107 5108 CCRetInfo.AnalyzeCallResult( 5109 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 5110 ? RetCC_PPC_Cold 5111 : RetCC_PPC); 5112 5113 // Copy all of the result registers out of their specified physreg. 5114 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 5115 CCValAssign &VA = RVLocs[i]; 5116 assert(VA.isRegLoc() && "Can only return in registers!"); 5117 5118 SDValue Val = DAG.getCopyFromReg(Chain, dl, 5119 VA.getLocReg(), VA.getLocVT(), InFlag); 5120 Chain = Val.getValue(1); 5121 InFlag = Val.getValue(2); 5122 5123 switch (VA.getLocInfo()) { 5124 default: llvm_unreachable("Unknown loc info!"); 5125 case CCValAssign::Full: break; 5126 case CCValAssign::AExt: 5127 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5128 break; 5129 case CCValAssign::ZExt: 5130 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 5131 DAG.getValueType(VA.getValVT())); 5132 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5133 break; 5134 case CCValAssign::SExt: 5135 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 5136 DAG.getValueType(VA.getValVT())); 5137 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5138 break; 5139 } 5140 5141 InVals.push_back(Val); 5142 } 5143 5144 return Chain; 5145 } 5146 5147 SDValue PPCTargetLowering::FinishCall( 5148 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 5149 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 5150 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 5151 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5152 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5153 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 5154 std::vector<EVT> NodeTys; 5155 SmallVector<SDValue, 8> Ops; 5156 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 5157 SPDiff, isTailCall, isPatchPoint, hasNest, 5158 RegsToPass, Ops, NodeTys, CS, Subtarget); 5159 5160 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5161 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 5162 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5163 5164 // When performing tail call optimization the callee pops its arguments off 5165 // the stack. Account for this here so these bytes can be pushed back on in 5166 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5167 int BytesCalleePops = 5168 (CallConv == CallingConv::Fast && 5169 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 5170 5171 // Add a register mask operand representing the call-preserved registers. 5172 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5173 const uint32_t *Mask = 5174 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 5175 assert(Mask && "Missing call preserved mask for calling convention"); 5176 Ops.push_back(DAG.getRegisterMask(Mask)); 5177 5178 if (InFlag.getNode()) 5179 Ops.push_back(InFlag); 5180 5181 // Emit tail call. 5182 if (isTailCall) { 5183 assert(((Callee.getOpcode() == ISD::Register && 5184 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5185 Callee.getOpcode() == ISD::TargetExternalSymbol || 5186 Callee.getOpcode() == ISD::TargetGlobalAddress || 5187 isa<ConstantSDNode>(Callee)) && 5188 "Expecting an global address, external symbol, absolute value or register"); 5189 5190 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5191 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 5192 } 5193 5194 // Add a NOP immediately after the branch instruction when using the 64-bit 5195 // SVR4 ABI. At link time, if caller and callee are in a different module and 5196 // thus have a different TOC, the call will be replaced with a call to a stub 5197 // function which saves the current TOC, loads the TOC of the callee and 5198 // branches to the callee. The NOP will be replaced with a load instruction 5199 // which restores the TOC of the caller from the TOC save slot of the current 5200 // stack frame. If caller and callee belong to the same module (and have the 5201 // same TOC), the NOP will remain unchanged. 5202 5203 MachineFunction &MF = DAG.getMachineFunction(); 5204 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 5205 !isPatchPoint) { 5206 if (CallOpc == PPCISD::BCTRL) { 5207 // This is a call through a function pointer. 5208 // Restore the caller TOC from the save area into R2. 5209 // See PrepareCall() for more information about calls through function 5210 // pointers in the 64-bit SVR4 ABI. 5211 // We are using a target-specific load with r2 hard coded, because the 5212 // result of a target-independent load would never go directly into r2, 5213 // since r2 is a reserved register (which prevents the register allocator 5214 // from allocating it), resulting in an additional register being 5215 // allocated and an unnecessary move instruction being generated. 5216 CallOpc = PPCISD::BCTRL_LOAD_TOC; 5217 5218 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5219 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 5220 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5221 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5222 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 5223 5224 // The address needs to go after the chain input but before the flag (or 5225 // any other variadic arguments). 5226 Ops.insert(std::next(Ops.begin()), AddTOC); 5227 } else if (CallOpc == PPCISD::CALL && 5228 !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) { 5229 // Otherwise insert NOP for non-local calls. 5230 CallOpc = PPCISD::CALL_NOP; 5231 } 5232 } 5233 5234 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 5235 InFlag = Chain.getValue(1); 5236 5237 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5238 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5239 InFlag, dl); 5240 if (!Ins.empty()) 5241 InFlag = Chain.getValue(1); 5242 5243 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 5244 Ins, dl, DAG, InVals); 5245 } 5246 5247 SDValue 5248 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5249 SmallVectorImpl<SDValue> &InVals) const { 5250 SelectionDAG &DAG = CLI.DAG; 5251 SDLoc &dl = CLI.DL; 5252 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5253 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5254 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5255 SDValue Chain = CLI.Chain; 5256 SDValue Callee = CLI.Callee; 5257 bool &isTailCall = CLI.IsTailCall; 5258 CallingConv::ID CallConv = CLI.CallConv; 5259 bool isVarArg = CLI.IsVarArg; 5260 bool isPatchPoint = CLI.IsPatchPoint; 5261 ImmutableCallSite CS = CLI.CS; 5262 5263 if (isTailCall) { 5264 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5265 isTailCall = false; 5266 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5267 isTailCall = 5268 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5269 isVarArg, Outs, Ins, DAG); 5270 else 5271 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5272 Ins, DAG); 5273 if (isTailCall) { 5274 ++NumTailCalls; 5275 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5276 ++NumSiblingCalls; 5277 5278 assert(isa<GlobalAddressSDNode>(Callee) && 5279 "Callee should be an llvm::Function object."); 5280 LLVM_DEBUG( 5281 const GlobalValue *GV = 5282 cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5283 const unsigned Width = 5284 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0"); 5285 dbgs() << "TCO caller: " 5286 << left_justify(DAG.getMachineFunction().getName(), Width) 5287 << ", callee linkage: " << GV->getVisibility() << ", " 5288 << GV->getLinkage() << "\n"); 5289 } 5290 } 5291 5292 if (!isTailCall && CS && CS.isMustTailCall()) 5293 report_fatal_error("failed to perform tail call elimination on a call " 5294 "site marked musttail"); 5295 5296 // When long calls (i.e. indirect calls) are always used, calls are always 5297 // made via function pointer. If we have a function name, first translate it 5298 // into a pointer. 5299 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5300 !isTailCall) 5301 Callee = LowerGlobalAddress(Callee, DAG); 5302 5303 if (Subtarget.isSVR4ABI()) { 5304 if (Subtarget.isPPC64()) 5305 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5306 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5307 dl, DAG, InVals, CS); 5308 else 5309 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5310 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5311 dl, DAG, InVals, CS); 5312 } 5313 5314 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5315 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5316 dl, DAG, InVals, CS); 5317 } 5318 5319 SDValue PPCTargetLowering::LowerCall_32SVR4( 5320 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5321 bool isTailCall, bool isPatchPoint, 5322 const SmallVectorImpl<ISD::OutputArg> &Outs, 5323 const SmallVectorImpl<SDValue> &OutVals, 5324 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5325 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5326 ImmutableCallSite CS) const { 5327 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5328 // of the 32-bit SVR4 ABI stack frame layout. 5329 5330 assert((CallConv == CallingConv::C || 5331 CallConv == CallingConv::Cold || 5332 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5333 5334 unsigned PtrByteSize = 4; 5335 5336 MachineFunction &MF = DAG.getMachineFunction(); 5337 5338 // Mark this function as potentially containing a function that contains a 5339 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5340 // and restoring the callers stack pointer in this functions epilog. This is 5341 // done because by tail calling the called function might overwrite the value 5342 // in this function's (MF) stack pointer stack slot 0(SP). 5343 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5344 CallConv == CallingConv::Fast) 5345 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5346 5347 // Count how many bytes are to be pushed on the stack, including the linkage 5348 // area, parameter list area and the part of the local variable space which 5349 // contains copies of aggregates which are passed by value. 5350 5351 // Assign locations to all of the outgoing arguments. 5352 SmallVector<CCValAssign, 16> ArgLocs; 5353 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5354 5355 // Reserve space for the linkage area on the stack. 5356 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5357 PtrByteSize); 5358 if (useSoftFloat()) 5359 CCInfo.PreAnalyzeCallOperands(Outs); 5360 5361 if (isVarArg) { 5362 // Handle fixed and variable vector arguments differently. 5363 // Fixed vector arguments go into registers as long as registers are 5364 // available. Variable vector arguments always go into memory. 5365 unsigned NumArgs = Outs.size(); 5366 5367 for (unsigned i = 0; i != NumArgs; ++i) { 5368 MVT ArgVT = Outs[i].VT; 5369 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5370 bool Result; 5371 5372 if (Outs[i].IsFixed) { 5373 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5374 CCInfo); 5375 } else { 5376 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5377 ArgFlags, CCInfo); 5378 } 5379 5380 if (Result) { 5381 #ifndef NDEBUG 5382 errs() << "Call operand #" << i << " has unhandled type " 5383 << EVT(ArgVT).getEVTString() << "\n"; 5384 #endif 5385 llvm_unreachable(nullptr); 5386 } 5387 } 5388 } else { 5389 // All arguments are treated the same. 5390 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5391 } 5392 CCInfo.clearWasPPCF128(); 5393 5394 // Assign locations to all of the outgoing aggregate by value arguments. 5395 SmallVector<CCValAssign, 16> ByValArgLocs; 5396 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5397 5398 // Reserve stack space for the allocations in CCInfo. 5399 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5400 5401 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5402 5403 // Size of the linkage area, parameter list area and the part of the local 5404 // space variable where copies of aggregates which are passed by value are 5405 // stored. 5406 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5407 5408 // Calculate by how many bytes the stack has to be adjusted in case of tail 5409 // call optimization. 5410 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5411 5412 // Adjust the stack pointer for the new arguments... 5413 // These operations are automatically eliminated by the prolog/epilog pass 5414 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5415 SDValue CallSeqStart = Chain; 5416 5417 // Load the return address and frame pointer so it can be moved somewhere else 5418 // later. 5419 SDValue LROp, FPOp; 5420 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5421 5422 // Set up a copy of the stack pointer for use loading and storing any 5423 // arguments that may not fit in the registers available for argument 5424 // passing. 5425 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5426 5427 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5428 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5429 SmallVector<SDValue, 8> MemOpChains; 5430 5431 bool seenFloatArg = false; 5432 // Walk the register/memloc assignments, inserting copies/loads. 5433 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 5434 i != e; 5435 ++i) { 5436 CCValAssign &VA = ArgLocs[i]; 5437 SDValue Arg = OutVals[i]; 5438 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5439 5440 if (Flags.isByVal()) { 5441 // Argument is an aggregate which is passed by value, thus we need to 5442 // create a copy of it in the local variable space of the current stack 5443 // frame (which is the stack frame of the caller) and pass the address of 5444 // this copy to the callee. 5445 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5446 CCValAssign &ByValVA = ByValArgLocs[j++]; 5447 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5448 5449 // Memory reserved in the local variable space of the callers stack frame. 5450 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5451 5452 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5453 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5454 StackPtr, PtrOff); 5455 5456 // Create a copy of the argument in the local area of the current 5457 // stack frame. 5458 SDValue MemcpyCall = 5459 CreateCopyOfByValArgument(Arg, PtrOff, 5460 CallSeqStart.getNode()->getOperand(0), 5461 Flags, DAG, dl); 5462 5463 // This must go outside the CALLSEQ_START..END. 5464 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5465 SDLoc(MemcpyCall)); 5466 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5467 NewCallSeqStart.getNode()); 5468 Chain = CallSeqStart = NewCallSeqStart; 5469 5470 // Pass the address of the aggregate copy on the stack either in a 5471 // physical register or in the parameter list area of the current stack 5472 // frame to the callee. 5473 Arg = PtrOff; 5474 } 5475 5476 // When useCRBits() is true, there can be i1 arguments. 5477 // It is because getRegisterType(MVT::i1) => MVT::i1, 5478 // and for other integer types getRegisterType() => MVT::i32. 5479 // Extend i1 and ensure callee will get i32. 5480 if (Arg.getValueType() == MVT::i1) 5481 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 5482 dl, MVT::i32, Arg); 5483 5484 if (VA.isRegLoc()) { 5485 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5486 // Put argument in a physical register. 5487 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5488 } else { 5489 // Put argument in the parameter list area of the current stack frame. 5490 assert(VA.isMemLoc()); 5491 unsigned LocMemOffset = VA.getLocMemOffset(); 5492 5493 if (!isTailCall) { 5494 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5495 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5496 StackPtr, PtrOff); 5497 5498 MemOpChains.push_back( 5499 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5500 } else { 5501 // Calculate and remember argument location. 5502 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5503 TailCallArguments); 5504 } 5505 } 5506 } 5507 5508 if (!MemOpChains.empty()) 5509 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5510 5511 // Build a sequence of copy-to-reg nodes chained together with token chain 5512 // and flag operands which copy the outgoing args into the appropriate regs. 5513 SDValue InFlag; 5514 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5515 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5516 RegsToPass[i].second, InFlag); 5517 InFlag = Chain.getValue(1); 5518 } 5519 5520 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5521 // registers. 5522 if (isVarArg) { 5523 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5524 SDValue Ops[] = { Chain, InFlag }; 5525 5526 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5527 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5528 5529 InFlag = Chain.getValue(1); 5530 } 5531 5532 if (isTailCall) 5533 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5534 TailCallArguments); 5535 5536 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5537 /* unused except on PPC64 ELFv1 */ false, DAG, 5538 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5539 NumBytes, Ins, InVals, CS); 5540 } 5541 5542 // Copy an argument into memory, being careful to do this outside the 5543 // call sequence for the call to which the argument belongs. 5544 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5545 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5546 SelectionDAG &DAG, const SDLoc &dl) const { 5547 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5548 CallSeqStart.getNode()->getOperand(0), 5549 Flags, DAG, dl); 5550 // The MEMCPY must go outside the CALLSEQ_START..END. 5551 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5552 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5553 SDLoc(MemcpyCall)); 5554 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5555 NewCallSeqStart.getNode()); 5556 return NewCallSeqStart; 5557 } 5558 5559 SDValue PPCTargetLowering::LowerCall_64SVR4( 5560 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5561 bool isTailCall, bool isPatchPoint, 5562 const SmallVectorImpl<ISD::OutputArg> &Outs, 5563 const SmallVectorImpl<SDValue> &OutVals, 5564 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5565 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5566 ImmutableCallSite CS) const { 5567 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5568 bool isLittleEndian = Subtarget.isLittleEndian(); 5569 unsigned NumOps = Outs.size(); 5570 bool hasNest = false; 5571 bool IsSibCall = false; 5572 5573 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5574 unsigned PtrByteSize = 8; 5575 5576 MachineFunction &MF = DAG.getMachineFunction(); 5577 5578 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5579 IsSibCall = true; 5580 5581 // Mark this function as potentially containing a function that contains a 5582 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5583 // and restoring the callers stack pointer in this functions epilog. This is 5584 // done because by tail calling the called function might overwrite the value 5585 // in this function's (MF) stack pointer stack slot 0(SP). 5586 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5587 CallConv == CallingConv::Fast) 5588 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5589 5590 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5591 "fastcc not supported on varargs functions"); 5592 5593 // Count how many bytes are to be pushed on the stack, including the linkage 5594 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5595 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5596 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5597 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5598 unsigned NumBytes = LinkageSize; 5599 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5600 unsigned &QFPR_idx = FPR_idx; 5601 5602 static const MCPhysReg GPR[] = { 5603 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5604 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5605 }; 5606 static const MCPhysReg VR[] = { 5607 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5608 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5609 }; 5610 5611 const unsigned NumGPRs = array_lengthof(GPR); 5612 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5613 const unsigned NumVRs = array_lengthof(VR); 5614 const unsigned NumQFPRs = NumFPRs; 5615 5616 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5617 // can be passed to the callee in registers. 5618 // For the fast calling convention, there is another check below. 5619 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5620 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5621 if (!HasParameterArea) { 5622 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5623 unsigned AvailableFPRs = NumFPRs; 5624 unsigned AvailableVRs = NumVRs; 5625 unsigned NumBytesTmp = NumBytes; 5626 for (unsigned i = 0; i != NumOps; ++i) { 5627 if (Outs[i].Flags.isNest()) continue; 5628 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5629 PtrByteSize, LinkageSize, ParamAreaSize, 5630 NumBytesTmp, AvailableFPRs, AvailableVRs, 5631 Subtarget.hasQPX())) 5632 HasParameterArea = true; 5633 } 5634 } 5635 5636 // When using the fast calling convention, we don't provide backing for 5637 // arguments that will be in registers. 5638 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5639 5640 // Avoid allocating parameter area for fastcc functions if all the arguments 5641 // can be passed in the registers. 5642 if (CallConv == CallingConv::Fast) 5643 HasParameterArea = false; 5644 5645 // Add up all the space actually used. 5646 for (unsigned i = 0; i != NumOps; ++i) { 5647 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5648 EVT ArgVT = Outs[i].VT; 5649 EVT OrigVT = Outs[i].ArgVT; 5650 5651 if (Flags.isNest()) 5652 continue; 5653 5654 if (CallConv == CallingConv::Fast) { 5655 if (Flags.isByVal()) { 5656 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5657 if (NumGPRsUsed > NumGPRs) 5658 HasParameterArea = true; 5659 } else { 5660 switch (ArgVT.getSimpleVT().SimpleTy) { 5661 default: llvm_unreachable("Unexpected ValueType for argument!"); 5662 case MVT::i1: 5663 case MVT::i32: 5664 case MVT::i64: 5665 if (++NumGPRsUsed <= NumGPRs) 5666 continue; 5667 break; 5668 case MVT::v4i32: 5669 case MVT::v8i16: 5670 case MVT::v16i8: 5671 case MVT::v2f64: 5672 case MVT::v2i64: 5673 case MVT::v1i128: 5674 case MVT::f128: 5675 if (++NumVRsUsed <= NumVRs) 5676 continue; 5677 break; 5678 case MVT::v4f32: 5679 // When using QPX, this is handled like a FP register, otherwise, it 5680 // is an Altivec register. 5681 if (Subtarget.hasQPX()) { 5682 if (++NumFPRsUsed <= NumFPRs) 5683 continue; 5684 } else { 5685 if (++NumVRsUsed <= NumVRs) 5686 continue; 5687 } 5688 break; 5689 case MVT::f32: 5690 case MVT::f64: 5691 case MVT::v4f64: // QPX 5692 case MVT::v4i1: // QPX 5693 if (++NumFPRsUsed <= NumFPRs) 5694 continue; 5695 break; 5696 } 5697 HasParameterArea = true; 5698 } 5699 } 5700 5701 /* Respect alignment of argument on the stack. */ 5702 unsigned Align = 5703 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5704 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5705 5706 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5707 if (Flags.isInConsecutiveRegsLast()) 5708 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5709 } 5710 5711 unsigned NumBytesActuallyUsed = NumBytes; 5712 5713 // In the old ELFv1 ABI, 5714 // the prolog code of the callee may store up to 8 GPR argument registers to 5715 // the stack, allowing va_start to index over them in memory if its varargs. 5716 // Because we cannot tell if this is needed on the caller side, we have to 5717 // conservatively assume that it is needed. As such, make sure we have at 5718 // least enough stack space for the caller to store the 8 GPRs. 5719 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5720 // really requires memory operands, e.g. a vararg function. 5721 if (HasParameterArea) 5722 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5723 else 5724 NumBytes = LinkageSize; 5725 5726 // Tail call needs the stack to be aligned. 5727 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5728 CallConv == CallingConv::Fast) 5729 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5730 5731 int SPDiff = 0; 5732 5733 // Calculate by how many bytes the stack has to be adjusted in case of tail 5734 // call optimization. 5735 if (!IsSibCall) 5736 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5737 5738 // To protect arguments on the stack from being clobbered in a tail call, 5739 // force all the loads to happen before doing any other lowering. 5740 if (isTailCall) 5741 Chain = DAG.getStackArgumentTokenFactor(Chain); 5742 5743 // Adjust the stack pointer for the new arguments... 5744 // These operations are automatically eliminated by the prolog/epilog pass 5745 if (!IsSibCall) 5746 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5747 SDValue CallSeqStart = Chain; 5748 5749 // Load the return address and frame pointer so it can be move somewhere else 5750 // later. 5751 SDValue LROp, FPOp; 5752 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5753 5754 // Set up a copy of the stack pointer for use loading and storing any 5755 // arguments that may not fit in the registers available for argument 5756 // passing. 5757 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5758 5759 // Figure out which arguments are going to go in registers, and which in 5760 // memory. Also, if this is a vararg function, floating point operations 5761 // must be stored to our stack, and loaded into integer regs as well, if 5762 // any integer regs are available for argument passing. 5763 unsigned ArgOffset = LinkageSize; 5764 5765 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5766 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5767 5768 SmallVector<SDValue, 8> MemOpChains; 5769 for (unsigned i = 0; i != NumOps; ++i) { 5770 SDValue Arg = OutVals[i]; 5771 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5772 EVT ArgVT = Outs[i].VT; 5773 EVT OrigVT = Outs[i].ArgVT; 5774 5775 // PtrOff will be used to store the current argument to the stack if a 5776 // register cannot be found for it. 5777 SDValue PtrOff; 5778 5779 // We re-align the argument offset for each argument, except when using the 5780 // fast calling convention, when we need to make sure we do that only when 5781 // we'll actually use a stack slot. 5782 auto ComputePtrOff = [&]() { 5783 /* Respect alignment of argument on the stack. */ 5784 unsigned Align = 5785 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5786 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5787 5788 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5789 5790 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5791 }; 5792 5793 if (CallConv != CallingConv::Fast) { 5794 ComputePtrOff(); 5795 5796 /* Compute GPR index associated with argument offset. */ 5797 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5798 GPR_idx = std::min(GPR_idx, NumGPRs); 5799 } 5800 5801 // Promote integers to 64-bit values. 5802 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5803 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5804 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5805 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5806 } 5807 5808 // FIXME memcpy is used way more than necessary. Correctness first. 5809 // Note: "by value" is code for passing a structure by value, not 5810 // basic types. 5811 if (Flags.isByVal()) { 5812 // Note: Size includes alignment padding, so 5813 // struct x { short a; char b; } 5814 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5815 // These are the proper values we need for right-justifying the 5816 // aggregate in a parameter register. 5817 unsigned Size = Flags.getByValSize(); 5818 5819 // An empty aggregate parameter takes up no storage and no 5820 // registers. 5821 if (Size == 0) 5822 continue; 5823 5824 if (CallConv == CallingConv::Fast) 5825 ComputePtrOff(); 5826 5827 // All aggregates smaller than 8 bytes must be passed right-justified. 5828 if (Size==1 || Size==2 || Size==4) { 5829 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5830 if (GPR_idx != NumGPRs) { 5831 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5832 MachinePointerInfo(), VT); 5833 MemOpChains.push_back(Load.getValue(1)); 5834 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5835 5836 ArgOffset += PtrByteSize; 5837 continue; 5838 } 5839 } 5840 5841 if (GPR_idx == NumGPRs && Size < 8) { 5842 SDValue AddPtr = PtrOff; 5843 if (!isLittleEndian) { 5844 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5845 PtrOff.getValueType()); 5846 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5847 } 5848 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5849 CallSeqStart, 5850 Flags, DAG, dl); 5851 ArgOffset += PtrByteSize; 5852 continue; 5853 } 5854 // Copy entire object into memory. There are cases where gcc-generated 5855 // code assumes it is there, even if it could be put entirely into 5856 // registers. (This is not what the doc says.) 5857 5858 // FIXME: The above statement is likely due to a misunderstanding of the 5859 // documents. All arguments must be copied into the parameter area BY 5860 // THE CALLEE in the event that the callee takes the address of any 5861 // formal argument. That has not yet been implemented. However, it is 5862 // reasonable to use the stack area as a staging area for the register 5863 // load. 5864 5865 // Skip this for small aggregates, as we will use the same slot for a 5866 // right-justified copy, below. 5867 if (Size >= 8) 5868 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5869 CallSeqStart, 5870 Flags, DAG, dl); 5871 5872 // When a register is available, pass a small aggregate right-justified. 5873 if (Size < 8 && GPR_idx != NumGPRs) { 5874 // The easiest way to get this right-justified in a register 5875 // is to copy the structure into the rightmost portion of a 5876 // local variable slot, then load the whole slot into the 5877 // register. 5878 // FIXME: The memcpy seems to produce pretty awful code for 5879 // small aggregates, particularly for packed ones. 5880 // FIXME: It would be preferable to use the slot in the 5881 // parameter save area instead of a new local variable. 5882 SDValue AddPtr = PtrOff; 5883 if (!isLittleEndian) { 5884 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5885 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5886 } 5887 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5888 CallSeqStart, 5889 Flags, DAG, dl); 5890 5891 // Load the slot into the register. 5892 SDValue Load = 5893 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5894 MemOpChains.push_back(Load.getValue(1)); 5895 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5896 5897 // Done with this argument. 5898 ArgOffset += PtrByteSize; 5899 continue; 5900 } 5901 5902 // For aggregates larger than PtrByteSize, copy the pieces of the 5903 // object that fit into registers from the parameter save area. 5904 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5905 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5906 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5907 if (GPR_idx != NumGPRs) { 5908 SDValue Load = 5909 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5910 MemOpChains.push_back(Load.getValue(1)); 5911 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5912 ArgOffset += PtrByteSize; 5913 } else { 5914 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5915 break; 5916 } 5917 } 5918 continue; 5919 } 5920 5921 switch (Arg.getSimpleValueType().SimpleTy) { 5922 default: llvm_unreachable("Unexpected ValueType for argument!"); 5923 case MVT::i1: 5924 case MVT::i32: 5925 case MVT::i64: 5926 if (Flags.isNest()) { 5927 // The 'nest' parameter, if any, is passed in R11. 5928 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5929 hasNest = true; 5930 break; 5931 } 5932 5933 // These can be scalar arguments or elements of an integer array type 5934 // passed directly. Clang may use those instead of "byval" aggregate 5935 // types to avoid forcing arguments to memory unnecessarily. 5936 if (GPR_idx != NumGPRs) { 5937 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5938 } else { 5939 if (CallConv == CallingConv::Fast) 5940 ComputePtrOff(); 5941 5942 assert(HasParameterArea && 5943 "Parameter area must exist to pass an argument in memory."); 5944 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5945 true, isTailCall, false, MemOpChains, 5946 TailCallArguments, dl); 5947 if (CallConv == CallingConv::Fast) 5948 ArgOffset += PtrByteSize; 5949 } 5950 if (CallConv != CallingConv::Fast) 5951 ArgOffset += PtrByteSize; 5952 break; 5953 case MVT::f32: 5954 case MVT::f64: { 5955 // These can be scalar arguments or elements of a float array type 5956 // passed directly. The latter are used to implement ELFv2 homogenous 5957 // float aggregates. 5958 5959 // Named arguments go into FPRs first, and once they overflow, the 5960 // remaining arguments go into GPRs and then the parameter save area. 5961 // Unnamed arguments for vararg functions always go to GPRs and 5962 // then the parameter save area. For now, put all arguments to vararg 5963 // routines always in both locations (FPR *and* GPR or stack slot). 5964 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5965 bool NeededLoad = false; 5966 5967 // First load the argument into the next available FPR. 5968 if (FPR_idx != NumFPRs) 5969 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5970 5971 // Next, load the argument into GPR or stack slot if needed. 5972 if (!NeedGPROrStack) 5973 ; 5974 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5975 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5976 // once we support fp <-> gpr moves. 5977 5978 // In the non-vararg case, this can only ever happen in the 5979 // presence of f32 array types, since otherwise we never run 5980 // out of FPRs before running out of GPRs. 5981 SDValue ArgVal; 5982 5983 // Double values are always passed in a single GPR. 5984 if (Arg.getValueType() != MVT::f32) { 5985 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5986 5987 // Non-array float values are extended and passed in a GPR. 5988 } else if (!Flags.isInConsecutiveRegs()) { 5989 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5990 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5991 5992 // If we have an array of floats, we collect every odd element 5993 // together with its predecessor into one GPR. 5994 } else if (ArgOffset % PtrByteSize != 0) { 5995 SDValue Lo, Hi; 5996 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5997 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5998 if (!isLittleEndian) 5999 std::swap(Lo, Hi); 6000 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6001 6002 // The final element, if even, goes into the first half of a GPR. 6003 } else if (Flags.isInConsecutiveRegsLast()) { 6004 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6005 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6006 if (!isLittleEndian) 6007 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 6008 DAG.getConstant(32, dl, MVT::i32)); 6009 6010 // Non-final even elements are skipped; they will be handled 6011 // together the with subsequent argument on the next go-around. 6012 } else 6013 ArgVal = SDValue(); 6014 6015 if (ArgVal.getNode()) 6016 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 6017 } else { 6018 if (CallConv == CallingConv::Fast) 6019 ComputePtrOff(); 6020 6021 // Single-precision floating-point values are mapped to the 6022 // second (rightmost) word of the stack doubleword. 6023 if (Arg.getValueType() == MVT::f32 && 6024 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 6025 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6026 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6027 } 6028 6029 assert(HasParameterArea && 6030 "Parameter area must exist to pass an argument in memory."); 6031 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6032 true, isTailCall, false, MemOpChains, 6033 TailCallArguments, dl); 6034 6035 NeededLoad = true; 6036 } 6037 // When passing an array of floats, the array occupies consecutive 6038 // space in the argument area; only round up to the next doubleword 6039 // at the end of the array. Otherwise, each float takes 8 bytes. 6040 if (CallConv != CallingConv::Fast || NeededLoad) { 6041 ArgOffset += (Arg.getValueType() == MVT::f32 && 6042 Flags.isInConsecutiveRegs()) ? 4 : 8; 6043 if (Flags.isInConsecutiveRegsLast()) 6044 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6045 } 6046 break; 6047 } 6048 case MVT::v4f32: 6049 case MVT::v4i32: 6050 case MVT::v8i16: 6051 case MVT::v16i8: 6052 case MVT::v2f64: 6053 case MVT::v2i64: 6054 case MVT::v1i128: 6055 case MVT::f128: 6056 if (!Subtarget.hasQPX()) { 6057 // These can be scalar arguments or elements of a vector array type 6058 // passed directly. The latter are used to implement ELFv2 homogenous 6059 // vector aggregates. 6060 6061 // For a varargs call, named arguments go into VRs or on the stack as 6062 // usual; unnamed arguments always go to the stack or the corresponding 6063 // GPRs when within range. For now, we always put the value in both 6064 // locations (or even all three). 6065 if (isVarArg) { 6066 assert(HasParameterArea && 6067 "Parameter area must exist if we have a varargs call."); 6068 // We could elide this store in the case where the object fits 6069 // entirely in R registers. Maybe later. 6070 SDValue Store = 6071 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6072 MemOpChains.push_back(Store); 6073 if (VR_idx != NumVRs) { 6074 SDValue Load = 6075 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6076 MemOpChains.push_back(Load.getValue(1)); 6077 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6078 } 6079 ArgOffset += 16; 6080 for (unsigned i=0; i<16; i+=PtrByteSize) { 6081 if (GPR_idx == NumGPRs) 6082 break; 6083 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6084 DAG.getConstant(i, dl, PtrVT)); 6085 SDValue Load = 6086 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6087 MemOpChains.push_back(Load.getValue(1)); 6088 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6089 } 6090 break; 6091 } 6092 6093 // Non-varargs Altivec params go into VRs or on the stack. 6094 if (VR_idx != NumVRs) { 6095 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6096 } else { 6097 if (CallConv == CallingConv::Fast) 6098 ComputePtrOff(); 6099 6100 assert(HasParameterArea && 6101 "Parameter area must exist to pass an argument in memory."); 6102 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6103 true, isTailCall, true, MemOpChains, 6104 TailCallArguments, dl); 6105 if (CallConv == CallingConv::Fast) 6106 ArgOffset += 16; 6107 } 6108 6109 if (CallConv != CallingConv::Fast) 6110 ArgOffset += 16; 6111 break; 6112 } // not QPX 6113 6114 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 6115 "Invalid QPX parameter type"); 6116 6117 LLVM_FALLTHROUGH; 6118 case MVT::v4f64: 6119 case MVT::v4i1: { 6120 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 6121 if (isVarArg) { 6122 assert(HasParameterArea && 6123 "Parameter area must exist if we have a varargs call."); 6124 // We could elide this store in the case where the object fits 6125 // entirely in R registers. Maybe later. 6126 SDValue Store = 6127 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6128 MemOpChains.push_back(Store); 6129 if (QFPR_idx != NumQFPRs) { 6130 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 6131 PtrOff, MachinePointerInfo()); 6132 MemOpChains.push_back(Load.getValue(1)); 6133 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 6134 } 6135 ArgOffset += (IsF32 ? 16 : 32); 6136 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 6137 if (GPR_idx == NumGPRs) 6138 break; 6139 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6140 DAG.getConstant(i, dl, PtrVT)); 6141 SDValue Load = 6142 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6143 MemOpChains.push_back(Load.getValue(1)); 6144 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6145 } 6146 break; 6147 } 6148 6149 // Non-varargs QPX params go into registers or on the stack. 6150 if (QFPR_idx != NumQFPRs) { 6151 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 6152 } else { 6153 if (CallConv == CallingConv::Fast) 6154 ComputePtrOff(); 6155 6156 assert(HasParameterArea && 6157 "Parameter area must exist to pass an argument in memory."); 6158 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6159 true, isTailCall, true, MemOpChains, 6160 TailCallArguments, dl); 6161 if (CallConv == CallingConv::Fast) 6162 ArgOffset += (IsF32 ? 16 : 32); 6163 } 6164 6165 if (CallConv != CallingConv::Fast) 6166 ArgOffset += (IsF32 ? 16 : 32); 6167 break; 6168 } 6169 } 6170 } 6171 6172 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6173 "mismatch in size of parameter area"); 6174 (void)NumBytesActuallyUsed; 6175 6176 if (!MemOpChains.empty()) 6177 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6178 6179 // Check if this is an indirect call (MTCTR/BCTRL). 6180 // See PrepareCall() for more information about calls through function 6181 // pointers in the 64-bit SVR4 ABI. 6182 if (!isTailCall && !isPatchPoint && 6183 !isFunctionGlobalAddress(Callee) && 6184 !isa<ExternalSymbolSDNode>(Callee)) { 6185 // Load r2 into a virtual register and store it to the TOC save area. 6186 setUsesTOCBasePtr(DAG); 6187 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6188 // TOC save area offset. 6189 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6190 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6191 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6192 Chain = DAG.getStore( 6193 Val.getValue(1), dl, Val, AddPtr, 6194 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 6195 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6196 // This does not mean the MTCTR instruction must use R12; it's easier 6197 // to model this as an extra parameter, so do that. 6198 if (isELFv2ABI && !isPatchPoint) 6199 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6200 } 6201 6202 // Build a sequence of copy-to-reg nodes chained together with token chain 6203 // and flag operands which copy the outgoing args into the appropriate regs. 6204 SDValue InFlag; 6205 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6206 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6207 RegsToPass[i].second, InFlag); 6208 InFlag = Chain.getValue(1); 6209 } 6210 6211 if (isTailCall && !IsSibCall) 6212 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6213 TailCallArguments); 6214 6215 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 6216 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 6217 SPDiff, NumBytes, Ins, InVals, CS); 6218 } 6219 6220 SDValue PPCTargetLowering::LowerCall_Darwin( 6221 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6222 bool isTailCall, bool isPatchPoint, 6223 const SmallVectorImpl<ISD::OutputArg> &Outs, 6224 const SmallVectorImpl<SDValue> &OutVals, 6225 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6226 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6227 ImmutableCallSite CS) const { 6228 unsigned NumOps = Outs.size(); 6229 6230 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6231 bool isPPC64 = PtrVT == MVT::i64; 6232 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6233 6234 MachineFunction &MF = DAG.getMachineFunction(); 6235 6236 // Mark this function as potentially containing a function that contains a 6237 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6238 // and restoring the callers stack pointer in this functions epilog. This is 6239 // done because by tail calling the called function might overwrite the value 6240 // in this function's (MF) stack pointer stack slot 0(SP). 6241 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6242 CallConv == CallingConv::Fast) 6243 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6244 6245 // Count how many bytes are to be pushed on the stack, including the linkage 6246 // area, and parameter passing area. We start with 24/48 bytes, which is 6247 // prereserved space for [SP][CR][LR][3 x unused]. 6248 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6249 unsigned NumBytes = LinkageSize; 6250 6251 // Add up all the space actually used. 6252 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6253 // they all go in registers, but we must reserve stack space for them for 6254 // possible use by the caller. In varargs or 64-bit calls, parameters are 6255 // assigned stack space in order, with padding so Altivec parameters are 6256 // 16-byte aligned. 6257 unsigned nAltivecParamsAtEnd = 0; 6258 for (unsigned i = 0; i != NumOps; ++i) { 6259 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6260 EVT ArgVT = Outs[i].VT; 6261 // Varargs Altivec parameters are padded to a 16 byte boundary. 6262 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6263 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6264 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6265 if (!isVarArg && !isPPC64) { 6266 // Non-varargs Altivec parameters go after all the non-Altivec 6267 // parameters; handle those later so we know how much padding we need. 6268 nAltivecParamsAtEnd++; 6269 continue; 6270 } 6271 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6272 NumBytes = ((NumBytes+15)/16)*16; 6273 } 6274 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6275 } 6276 6277 // Allow for Altivec parameters at the end, if needed. 6278 if (nAltivecParamsAtEnd) { 6279 NumBytes = ((NumBytes+15)/16)*16; 6280 NumBytes += 16*nAltivecParamsAtEnd; 6281 } 6282 6283 // The prolog code of the callee may store up to 8 GPR argument registers to 6284 // the stack, allowing va_start to index over them in memory if its varargs. 6285 // Because we cannot tell if this is needed on the caller side, we have to 6286 // conservatively assume that it is needed. As such, make sure we have at 6287 // least enough stack space for the caller to store the 8 GPRs. 6288 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6289 6290 // Tail call needs the stack to be aligned. 6291 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6292 CallConv == CallingConv::Fast) 6293 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6294 6295 // Calculate by how many bytes the stack has to be adjusted in case of tail 6296 // call optimization. 6297 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6298 6299 // To protect arguments on the stack from being clobbered in a tail call, 6300 // force all the loads to happen before doing any other lowering. 6301 if (isTailCall) 6302 Chain = DAG.getStackArgumentTokenFactor(Chain); 6303 6304 // Adjust the stack pointer for the new arguments... 6305 // These operations are automatically eliminated by the prolog/epilog pass 6306 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6307 SDValue CallSeqStart = Chain; 6308 6309 // Load the return address and frame pointer so it can be move somewhere else 6310 // later. 6311 SDValue LROp, FPOp; 6312 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6313 6314 // Set up a copy of the stack pointer for use loading and storing any 6315 // arguments that may not fit in the registers available for argument 6316 // passing. 6317 SDValue StackPtr; 6318 if (isPPC64) 6319 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6320 else 6321 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6322 6323 // Figure out which arguments are going to go in registers, and which in 6324 // memory. Also, if this is a vararg function, floating point operations 6325 // must be stored to our stack, and loaded into integer regs as well, if 6326 // any integer regs are available for argument passing. 6327 unsigned ArgOffset = LinkageSize; 6328 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6329 6330 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6331 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6332 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6333 }; 6334 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6335 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6336 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6337 }; 6338 static const MCPhysReg VR[] = { 6339 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6340 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6341 }; 6342 const unsigned NumGPRs = array_lengthof(GPR_32); 6343 const unsigned NumFPRs = 13; 6344 const unsigned NumVRs = array_lengthof(VR); 6345 6346 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6347 6348 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6349 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6350 6351 SmallVector<SDValue, 8> MemOpChains; 6352 for (unsigned i = 0; i != NumOps; ++i) { 6353 SDValue Arg = OutVals[i]; 6354 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6355 6356 // PtrOff will be used to store the current argument to the stack if a 6357 // register cannot be found for it. 6358 SDValue PtrOff; 6359 6360 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6361 6362 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6363 6364 // On PPC64, promote integers to 64-bit values. 6365 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6366 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6367 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6368 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6369 } 6370 6371 // FIXME memcpy is used way more than necessary. Correctness first. 6372 // Note: "by value" is code for passing a structure by value, not 6373 // basic types. 6374 if (Flags.isByVal()) { 6375 unsigned Size = Flags.getByValSize(); 6376 // Very small objects are passed right-justified. Everything else is 6377 // passed left-justified. 6378 if (Size==1 || Size==2) { 6379 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6380 if (GPR_idx != NumGPRs) { 6381 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6382 MachinePointerInfo(), VT); 6383 MemOpChains.push_back(Load.getValue(1)); 6384 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6385 6386 ArgOffset += PtrByteSize; 6387 } else { 6388 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6389 PtrOff.getValueType()); 6390 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6391 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6392 CallSeqStart, 6393 Flags, DAG, dl); 6394 ArgOffset += PtrByteSize; 6395 } 6396 continue; 6397 } 6398 // Copy entire object into memory. There are cases where gcc-generated 6399 // code assumes it is there, even if it could be put entirely into 6400 // registers. (This is not what the doc says.) 6401 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6402 CallSeqStart, 6403 Flags, DAG, dl); 6404 6405 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6406 // copy the pieces of the object that fit into registers from the 6407 // parameter save area. 6408 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6409 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6410 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6411 if (GPR_idx != NumGPRs) { 6412 SDValue Load = 6413 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6414 MemOpChains.push_back(Load.getValue(1)); 6415 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6416 ArgOffset += PtrByteSize; 6417 } else { 6418 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6419 break; 6420 } 6421 } 6422 continue; 6423 } 6424 6425 switch (Arg.getSimpleValueType().SimpleTy) { 6426 default: llvm_unreachable("Unexpected ValueType for argument!"); 6427 case MVT::i1: 6428 case MVT::i32: 6429 case MVT::i64: 6430 if (GPR_idx != NumGPRs) { 6431 if (Arg.getValueType() == MVT::i1) 6432 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6433 6434 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6435 } else { 6436 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6437 isPPC64, isTailCall, false, MemOpChains, 6438 TailCallArguments, dl); 6439 } 6440 ArgOffset += PtrByteSize; 6441 break; 6442 case MVT::f32: 6443 case MVT::f64: 6444 if (FPR_idx != NumFPRs) { 6445 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6446 6447 if (isVarArg) { 6448 SDValue Store = 6449 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6450 MemOpChains.push_back(Store); 6451 6452 // Float varargs are always shadowed in available integer registers 6453 if (GPR_idx != NumGPRs) { 6454 SDValue Load = 6455 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6456 MemOpChains.push_back(Load.getValue(1)); 6457 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6458 } 6459 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6460 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6461 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6462 SDValue Load = 6463 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6464 MemOpChains.push_back(Load.getValue(1)); 6465 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6466 } 6467 } else { 6468 // If we have any FPRs remaining, we may also have GPRs remaining. 6469 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6470 // GPRs. 6471 if (GPR_idx != NumGPRs) 6472 ++GPR_idx; 6473 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6474 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6475 ++GPR_idx; 6476 } 6477 } else 6478 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6479 isPPC64, isTailCall, false, MemOpChains, 6480 TailCallArguments, dl); 6481 if (isPPC64) 6482 ArgOffset += 8; 6483 else 6484 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6485 break; 6486 case MVT::v4f32: 6487 case MVT::v4i32: 6488 case MVT::v8i16: 6489 case MVT::v16i8: 6490 if (isVarArg) { 6491 // These go aligned on the stack, or in the corresponding R registers 6492 // when within range. The Darwin PPC ABI doc claims they also go in 6493 // V registers; in fact gcc does this only for arguments that are 6494 // prototyped, not for those that match the ... We do it for all 6495 // arguments, seems to work. 6496 while (ArgOffset % 16 !=0) { 6497 ArgOffset += PtrByteSize; 6498 if (GPR_idx != NumGPRs) 6499 GPR_idx++; 6500 } 6501 // We could elide this store in the case where the object fits 6502 // entirely in R registers. Maybe later. 6503 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6504 DAG.getConstant(ArgOffset, dl, PtrVT)); 6505 SDValue Store = 6506 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6507 MemOpChains.push_back(Store); 6508 if (VR_idx != NumVRs) { 6509 SDValue Load = 6510 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6511 MemOpChains.push_back(Load.getValue(1)); 6512 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6513 } 6514 ArgOffset += 16; 6515 for (unsigned i=0; i<16; i+=PtrByteSize) { 6516 if (GPR_idx == NumGPRs) 6517 break; 6518 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6519 DAG.getConstant(i, dl, PtrVT)); 6520 SDValue Load = 6521 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6522 MemOpChains.push_back(Load.getValue(1)); 6523 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6524 } 6525 break; 6526 } 6527 6528 // Non-varargs Altivec params generally go in registers, but have 6529 // stack space allocated at the end. 6530 if (VR_idx != NumVRs) { 6531 // Doesn't have GPR space allocated. 6532 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6533 } else if (nAltivecParamsAtEnd==0) { 6534 // We are emitting Altivec params in order. 6535 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6536 isPPC64, isTailCall, true, MemOpChains, 6537 TailCallArguments, dl); 6538 ArgOffset += 16; 6539 } 6540 break; 6541 } 6542 } 6543 // If all Altivec parameters fit in registers, as they usually do, 6544 // they get stack space following the non-Altivec parameters. We 6545 // don't track this here because nobody below needs it. 6546 // If there are more Altivec parameters than fit in registers emit 6547 // the stores here. 6548 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6549 unsigned j = 0; 6550 // Offset is aligned; skip 1st 12 params which go in V registers. 6551 ArgOffset = ((ArgOffset+15)/16)*16; 6552 ArgOffset += 12*16; 6553 for (unsigned i = 0; i != NumOps; ++i) { 6554 SDValue Arg = OutVals[i]; 6555 EVT ArgType = Outs[i].VT; 6556 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6557 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6558 if (++j > NumVRs) { 6559 SDValue PtrOff; 6560 // We are emitting Altivec params in order. 6561 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6562 isPPC64, isTailCall, true, MemOpChains, 6563 TailCallArguments, dl); 6564 ArgOffset += 16; 6565 } 6566 } 6567 } 6568 } 6569 6570 if (!MemOpChains.empty()) 6571 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6572 6573 // On Darwin, R12 must contain the address of an indirect callee. This does 6574 // not mean the MTCTR instruction must use R12; it's easier to model this as 6575 // an extra parameter, so do that. 6576 if (!isTailCall && 6577 !isFunctionGlobalAddress(Callee) && 6578 !isa<ExternalSymbolSDNode>(Callee) && 6579 !isBLACompatibleAddress(Callee, DAG)) 6580 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6581 PPC::R12), Callee)); 6582 6583 // Build a sequence of copy-to-reg nodes chained together with token chain 6584 // and flag operands which copy the outgoing args into the appropriate regs. 6585 SDValue InFlag; 6586 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6587 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6588 RegsToPass[i].second, InFlag); 6589 InFlag = Chain.getValue(1); 6590 } 6591 6592 if (isTailCall) 6593 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6594 TailCallArguments); 6595 6596 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6597 /* unused except on PPC64 ELFv1 */ false, DAG, 6598 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6599 NumBytes, Ins, InVals, CS); 6600 } 6601 6602 bool 6603 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6604 MachineFunction &MF, bool isVarArg, 6605 const SmallVectorImpl<ISD::OutputArg> &Outs, 6606 LLVMContext &Context) const { 6607 SmallVector<CCValAssign, 16> RVLocs; 6608 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6609 return CCInfo.CheckReturn( 6610 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6611 ? RetCC_PPC_Cold 6612 : RetCC_PPC); 6613 } 6614 6615 SDValue 6616 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6617 bool isVarArg, 6618 const SmallVectorImpl<ISD::OutputArg> &Outs, 6619 const SmallVectorImpl<SDValue> &OutVals, 6620 const SDLoc &dl, SelectionDAG &DAG) const { 6621 SmallVector<CCValAssign, 16> RVLocs; 6622 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6623 *DAG.getContext()); 6624 CCInfo.AnalyzeReturn(Outs, 6625 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6626 ? RetCC_PPC_Cold 6627 : RetCC_PPC); 6628 6629 SDValue Flag; 6630 SmallVector<SDValue, 4> RetOps(1, Chain); 6631 6632 // Copy the result values into the output registers. 6633 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6634 CCValAssign &VA = RVLocs[i]; 6635 assert(VA.isRegLoc() && "Can only return in registers!"); 6636 6637 SDValue Arg = OutVals[i]; 6638 6639 switch (VA.getLocInfo()) { 6640 default: llvm_unreachable("Unknown loc info!"); 6641 case CCValAssign::Full: break; 6642 case CCValAssign::AExt: 6643 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6644 break; 6645 case CCValAssign::ZExt: 6646 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6647 break; 6648 case CCValAssign::SExt: 6649 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6650 break; 6651 } 6652 6653 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6654 Flag = Chain.getValue(1); 6655 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6656 } 6657 6658 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6659 const MCPhysReg *I = 6660 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6661 if (I) { 6662 for (; *I; ++I) { 6663 6664 if (PPC::G8RCRegClass.contains(*I)) 6665 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6666 else if (PPC::F8RCRegClass.contains(*I)) 6667 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6668 else if (PPC::CRRCRegClass.contains(*I)) 6669 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6670 else if (PPC::VRRCRegClass.contains(*I)) 6671 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6672 else 6673 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6674 } 6675 } 6676 6677 RetOps[0] = Chain; // Update chain. 6678 6679 // Add the flag if we have it. 6680 if (Flag.getNode()) 6681 RetOps.push_back(Flag); 6682 6683 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6684 } 6685 6686 SDValue 6687 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6688 SelectionDAG &DAG) const { 6689 SDLoc dl(Op); 6690 6691 // Get the correct type for integers. 6692 EVT IntVT = Op.getValueType(); 6693 6694 // Get the inputs. 6695 SDValue Chain = Op.getOperand(0); 6696 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6697 // Build a DYNAREAOFFSET node. 6698 SDValue Ops[2] = {Chain, FPSIdx}; 6699 SDVTList VTs = DAG.getVTList(IntVT); 6700 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6701 } 6702 6703 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6704 SelectionDAG &DAG) const { 6705 // When we pop the dynamic allocation we need to restore the SP link. 6706 SDLoc dl(Op); 6707 6708 // Get the correct type for pointers. 6709 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6710 6711 // Construct the stack pointer operand. 6712 bool isPPC64 = Subtarget.isPPC64(); 6713 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6714 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6715 6716 // Get the operands for the STACKRESTORE. 6717 SDValue Chain = Op.getOperand(0); 6718 SDValue SaveSP = Op.getOperand(1); 6719 6720 // Load the old link SP. 6721 SDValue LoadLinkSP = 6722 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6723 6724 // Restore the stack pointer. 6725 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6726 6727 // Store the old link SP. 6728 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6729 } 6730 6731 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6732 MachineFunction &MF = DAG.getMachineFunction(); 6733 bool isPPC64 = Subtarget.isPPC64(); 6734 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6735 6736 // Get current frame pointer save index. The users of this index will be 6737 // primarily DYNALLOC instructions. 6738 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6739 int RASI = FI->getReturnAddrSaveIndex(); 6740 6741 // If the frame pointer save index hasn't been defined yet. 6742 if (!RASI) { 6743 // Find out what the fix offset of the frame pointer save area. 6744 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6745 // Allocate the frame index for frame pointer save area. 6746 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6747 // Save the result. 6748 FI->setReturnAddrSaveIndex(RASI); 6749 } 6750 return DAG.getFrameIndex(RASI, PtrVT); 6751 } 6752 6753 SDValue 6754 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6755 MachineFunction &MF = DAG.getMachineFunction(); 6756 bool isPPC64 = Subtarget.isPPC64(); 6757 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6758 6759 // Get current frame pointer save index. The users of this index will be 6760 // primarily DYNALLOC instructions. 6761 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6762 int FPSI = FI->getFramePointerSaveIndex(); 6763 6764 // If the frame pointer save index hasn't been defined yet. 6765 if (!FPSI) { 6766 // Find out what the fix offset of the frame pointer save area. 6767 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6768 // Allocate the frame index for frame pointer save area. 6769 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6770 // Save the result. 6771 FI->setFramePointerSaveIndex(FPSI); 6772 } 6773 return DAG.getFrameIndex(FPSI, PtrVT); 6774 } 6775 6776 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6777 SelectionDAG &DAG) const { 6778 // Get the inputs. 6779 SDValue Chain = Op.getOperand(0); 6780 SDValue Size = Op.getOperand(1); 6781 SDLoc dl(Op); 6782 6783 // Get the correct type for pointers. 6784 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6785 // Negate the size. 6786 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6787 DAG.getConstant(0, dl, PtrVT), Size); 6788 // Construct a node for the frame pointer save index. 6789 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6790 // Build a DYNALLOC node. 6791 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6792 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6793 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6794 } 6795 6796 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 6797 SelectionDAG &DAG) const { 6798 MachineFunction &MF = DAG.getMachineFunction(); 6799 6800 bool isPPC64 = Subtarget.isPPC64(); 6801 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6802 6803 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 6804 return DAG.getFrameIndex(FI, PtrVT); 6805 } 6806 6807 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6808 SelectionDAG &DAG) const { 6809 SDLoc DL(Op); 6810 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6811 DAG.getVTList(MVT::i32, MVT::Other), 6812 Op.getOperand(0), Op.getOperand(1)); 6813 } 6814 6815 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6816 SelectionDAG &DAG) const { 6817 SDLoc DL(Op); 6818 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6819 Op.getOperand(0), Op.getOperand(1)); 6820 } 6821 6822 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6823 if (Op.getValueType().isVector()) 6824 return LowerVectorLoad(Op, DAG); 6825 6826 assert(Op.getValueType() == MVT::i1 && 6827 "Custom lowering only for i1 loads"); 6828 6829 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6830 6831 SDLoc dl(Op); 6832 LoadSDNode *LD = cast<LoadSDNode>(Op); 6833 6834 SDValue Chain = LD->getChain(); 6835 SDValue BasePtr = LD->getBasePtr(); 6836 MachineMemOperand *MMO = LD->getMemOperand(); 6837 6838 SDValue NewLD = 6839 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6840 BasePtr, MVT::i8, MMO); 6841 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6842 6843 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6844 return DAG.getMergeValues(Ops, dl); 6845 } 6846 6847 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6848 if (Op.getOperand(1).getValueType().isVector()) 6849 return LowerVectorStore(Op, DAG); 6850 6851 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6852 "Custom lowering only for i1 stores"); 6853 6854 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6855 6856 SDLoc dl(Op); 6857 StoreSDNode *ST = cast<StoreSDNode>(Op); 6858 6859 SDValue Chain = ST->getChain(); 6860 SDValue BasePtr = ST->getBasePtr(); 6861 SDValue Value = ST->getValue(); 6862 MachineMemOperand *MMO = ST->getMemOperand(); 6863 6864 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6865 Value); 6866 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6867 } 6868 6869 // FIXME: Remove this once the ANDI glue bug is fixed: 6870 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6871 assert(Op.getValueType() == MVT::i1 && 6872 "Custom lowering only for i1 results"); 6873 6874 SDLoc DL(Op); 6875 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6876 Op.getOperand(0)); 6877 } 6878 6879 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6880 /// possible. 6881 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6882 // Not FP? Not a fsel. 6883 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6884 !Op.getOperand(2).getValueType().isFloatingPoint()) 6885 return Op; 6886 6887 // We might be able to do better than this under some circumstances, but in 6888 // general, fsel-based lowering of select is a finite-math-only optimization. 6889 // For more information, see section F.3 of the 2.06 ISA specification. 6890 if (!DAG.getTarget().Options.NoInfsFPMath || 6891 !DAG.getTarget().Options.NoNaNsFPMath) 6892 return Op; 6893 // TODO: Propagate flags from the select rather than global settings. 6894 SDNodeFlags Flags; 6895 Flags.setNoInfs(true); 6896 Flags.setNoNaNs(true); 6897 6898 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6899 6900 EVT ResVT = Op.getValueType(); 6901 EVT CmpVT = Op.getOperand(0).getValueType(); 6902 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6903 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6904 SDLoc dl(Op); 6905 6906 // If the RHS of the comparison is a 0.0, we don't need to do the 6907 // subtraction at all. 6908 SDValue Sel1; 6909 if (isFloatingPointZero(RHS)) 6910 switch (CC) { 6911 default: break; // SETUO etc aren't handled by fsel. 6912 case ISD::SETNE: 6913 std::swap(TV, FV); 6914 LLVM_FALLTHROUGH; 6915 case ISD::SETEQ: 6916 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6917 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6918 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6919 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6920 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6921 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6922 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6923 case ISD::SETULT: 6924 case ISD::SETLT: 6925 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6926 LLVM_FALLTHROUGH; 6927 case ISD::SETOGE: 6928 case ISD::SETGE: 6929 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6930 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6931 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6932 case ISD::SETUGT: 6933 case ISD::SETGT: 6934 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6935 LLVM_FALLTHROUGH; 6936 case ISD::SETOLE: 6937 case ISD::SETLE: 6938 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6939 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6940 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6941 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6942 } 6943 6944 SDValue Cmp; 6945 switch (CC) { 6946 default: break; // SETUO etc aren't handled by fsel. 6947 case ISD::SETNE: 6948 std::swap(TV, FV); 6949 LLVM_FALLTHROUGH; 6950 case ISD::SETEQ: 6951 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6952 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6953 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6954 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6955 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6956 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6957 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6958 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6959 case ISD::SETULT: 6960 case ISD::SETLT: 6961 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6962 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6963 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6964 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6965 case ISD::SETOGE: 6966 case ISD::SETGE: 6967 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6968 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6969 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6970 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6971 case ISD::SETUGT: 6972 case ISD::SETGT: 6973 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6974 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6975 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6976 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6977 case ISD::SETOLE: 6978 case ISD::SETLE: 6979 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6980 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6981 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6982 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6983 } 6984 return Op; 6985 } 6986 6987 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6988 SelectionDAG &DAG, 6989 const SDLoc &dl) const { 6990 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6991 SDValue Src = Op.getOperand(0); 6992 if (Src.getValueType() == MVT::f32) 6993 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6994 6995 SDValue Tmp; 6996 switch (Op.getSimpleValueType().SimpleTy) { 6997 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6998 case MVT::i32: 6999 Tmp = DAG.getNode( 7000 Op.getOpcode() == ISD::FP_TO_SINT 7001 ? PPCISD::FCTIWZ 7002 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7003 dl, MVT::f64, Src); 7004 break; 7005 case MVT::i64: 7006 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7007 "i64 FP_TO_UINT is supported only with FPCVT"); 7008 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7009 PPCISD::FCTIDUZ, 7010 dl, MVT::f64, Src); 7011 break; 7012 } 7013 7014 // Convert the FP value to an int value through memory. 7015 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 7016 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 7017 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 7018 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 7019 MachinePointerInfo MPI = 7020 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 7021 7022 // Emit a store to the stack slot. 7023 SDValue Chain; 7024 if (i32Stack) { 7025 MachineFunction &MF = DAG.getMachineFunction(); 7026 MachineMemOperand *MMO = 7027 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 7028 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 7029 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7030 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 7031 } else 7032 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 7033 7034 // Result is a load from the stack slot. If loading 4 bytes, make sure to 7035 // add in a bias on big endian. 7036 if (Op.getValueType() == MVT::i32 && !i32Stack) { 7037 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 7038 DAG.getConstant(4, dl, FIPtr.getValueType())); 7039 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 7040 } 7041 7042 RLI.Chain = Chain; 7043 RLI.Ptr = FIPtr; 7044 RLI.MPI = MPI; 7045 } 7046 7047 /// Custom lowers floating point to integer conversions to use 7048 /// the direct move instructions available in ISA 2.07 to avoid the 7049 /// need for load/store combinations. 7050 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 7051 SelectionDAG &DAG, 7052 const SDLoc &dl) const { 7053 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7054 SDValue Src = Op.getOperand(0); 7055 7056 if (Src.getValueType() == MVT::f32) 7057 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7058 7059 SDValue Tmp; 7060 switch (Op.getSimpleValueType().SimpleTy) { 7061 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7062 case MVT::i32: 7063 Tmp = DAG.getNode( 7064 Op.getOpcode() == ISD::FP_TO_SINT 7065 ? PPCISD::FCTIWZ 7066 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7067 dl, MVT::f64, Src); 7068 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 7069 break; 7070 case MVT::i64: 7071 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7072 "i64 FP_TO_UINT is supported only with FPCVT"); 7073 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7074 PPCISD::FCTIDUZ, 7075 dl, MVT::f64, Src); 7076 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 7077 break; 7078 } 7079 return Tmp; 7080 } 7081 7082 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 7083 const SDLoc &dl) const { 7084 7085 // FP to INT conversions are legal for f128. 7086 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128)) 7087 return Op; 7088 7089 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 7090 // PPC (the libcall is not available). 7091 if (Op.getOperand(0).getValueType() == MVT::ppcf128) { 7092 if (Op.getValueType() == MVT::i32) { 7093 if (Op.getOpcode() == ISD::FP_TO_SINT) { 7094 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7095 MVT::f64, Op.getOperand(0), 7096 DAG.getIntPtrConstant(0, dl)); 7097 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7098 MVT::f64, Op.getOperand(0), 7099 DAG.getIntPtrConstant(1, dl)); 7100 7101 // Add the two halves of the long double in round-to-zero mode. 7102 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7103 7104 // Now use a smaller FP_TO_SINT. 7105 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 7106 } 7107 if (Op.getOpcode() == ISD::FP_TO_UINT) { 7108 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 7109 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 7110 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128); 7111 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 7112 // FIXME: generated code sucks. 7113 // TODO: Are there fast-math-flags to propagate to this FSUB? 7114 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, 7115 Op.getOperand(0), Tmp); 7116 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 7117 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, 7118 DAG.getConstant(0x80000000, dl, MVT::i32)); 7119 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, 7120 Op.getOperand(0)); 7121 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False, 7122 ISD::SETGE); 7123 } 7124 } 7125 7126 return SDValue(); 7127 } 7128 7129 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 7130 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 7131 7132 ReuseLoadInfo RLI; 7133 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7134 7135 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7136 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7137 } 7138 7139 // We're trying to insert a regular store, S, and then a load, L. If the 7140 // incoming value, O, is a load, we might just be able to have our load use the 7141 // address used by O. However, we don't know if anything else will store to 7142 // that address before we can load from it. To prevent this situation, we need 7143 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7144 // the same chain operand as O, we create a token factor from the chain results 7145 // of O and L, and we replace all uses of O's chain result with that token 7146 // factor (see spliceIntoChain below for this last part). 7147 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7148 ReuseLoadInfo &RLI, 7149 SelectionDAG &DAG, 7150 ISD::LoadExtType ET) const { 7151 SDLoc dl(Op); 7152 if (ET == ISD::NON_EXTLOAD && 7153 (Op.getOpcode() == ISD::FP_TO_UINT || 7154 Op.getOpcode() == ISD::FP_TO_SINT) && 7155 isOperationLegalOrCustom(Op.getOpcode(), 7156 Op.getOperand(0).getValueType())) { 7157 7158 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7159 return true; 7160 } 7161 7162 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7163 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7164 LD->isNonTemporal()) 7165 return false; 7166 if (LD->getMemoryVT() != MemVT) 7167 return false; 7168 7169 RLI.Ptr = LD->getBasePtr(); 7170 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7171 assert(LD->getAddressingMode() == ISD::PRE_INC && 7172 "Non-pre-inc AM on PPC?"); 7173 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7174 LD->getOffset()); 7175 } 7176 7177 RLI.Chain = LD->getChain(); 7178 RLI.MPI = LD->getPointerInfo(); 7179 RLI.IsDereferenceable = LD->isDereferenceable(); 7180 RLI.IsInvariant = LD->isInvariant(); 7181 RLI.Alignment = LD->getAlignment(); 7182 RLI.AAInfo = LD->getAAInfo(); 7183 RLI.Ranges = LD->getRanges(); 7184 7185 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7186 return true; 7187 } 7188 7189 // Given the head of the old chain, ResChain, insert a token factor containing 7190 // it and NewResChain, and make users of ResChain now be users of that token 7191 // factor. 7192 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7193 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7194 SDValue NewResChain, 7195 SelectionDAG &DAG) const { 7196 if (!ResChain) 7197 return; 7198 7199 SDLoc dl(NewResChain); 7200 7201 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7202 NewResChain, DAG.getUNDEF(MVT::Other)); 7203 assert(TF.getNode() != NewResChain.getNode() && 7204 "A new TF really is required here"); 7205 7206 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7207 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7208 } 7209 7210 /// Analyze profitability of direct move 7211 /// prefer float load to int load plus direct move 7212 /// when there is no integer use of int load 7213 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7214 SDNode *Origin = Op.getOperand(0).getNode(); 7215 if (Origin->getOpcode() != ISD::LOAD) 7216 return true; 7217 7218 // If there is no LXSIBZX/LXSIHZX, like Power8, 7219 // prefer direct move if the memory size is 1 or 2 bytes. 7220 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7221 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7222 return true; 7223 7224 for (SDNode::use_iterator UI = Origin->use_begin(), 7225 UE = Origin->use_end(); 7226 UI != UE; ++UI) { 7227 7228 // Only look at the users of the loaded value. 7229 if (UI.getUse().get().getResNo() != 0) 7230 continue; 7231 7232 if (UI->getOpcode() != ISD::SINT_TO_FP && 7233 UI->getOpcode() != ISD::UINT_TO_FP) 7234 return true; 7235 } 7236 7237 return false; 7238 } 7239 7240 /// Custom lowers integer to floating point conversions to use 7241 /// the direct move instructions available in ISA 2.07 to avoid the 7242 /// need for load/store combinations. 7243 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7244 SelectionDAG &DAG, 7245 const SDLoc &dl) const { 7246 assert((Op.getValueType() == MVT::f32 || 7247 Op.getValueType() == MVT::f64) && 7248 "Invalid floating point type as target of conversion"); 7249 assert(Subtarget.hasFPCVT() && 7250 "Int to FP conversions with direct moves require FPCVT"); 7251 SDValue FP; 7252 SDValue Src = Op.getOperand(0); 7253 bool SinglePrec = Op.getValueType() == MVT::f32; 7254 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7255 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 7256 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 7257 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 7258 7259 if (WordInt) { 7260 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7261 dl, MVT::f64, Src); 7262 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7263 } 7264 else { 7265 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7266 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7267 } 7268 7269 return FP; 7270 } 7271 7272 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) { 7273 7274 EVT VecVT = Vec.getValueType(); 7275 assert(VecVT.isVector() && "Expected a vector type."); 7276 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width."); 7277 7278 EVT EltVT = VecVT.getVectorElementType(); 7279 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7280 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7281 7282 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements(); 7283 SmallVector<SDValue, 16> Ops(NumConcat); 7284 Ops[0] = Vec; 7285 SDValue UndefVec = DAG.getUNDEF(VecVT); 7286 for (unsigned i = 1; i < NumConcat; ++i) 7287 Ops[i] = UndefVec; 7288 7289 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops); 7290 } 7291 7292 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, 7293 SelectionDAG &DAG, 7294 const SDLoc &dl) const { 7295 7296 unsigned Opc = Op.getOpcode(); 7297 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) && 7298 "Unexpected conversion type"); 7299 assert(Op.getValueType() == MVT::v2f64 && "Supports v2f64 only."); 7300 7301 // CPU's prior to P9 don't have a way to sign-extend in vectors. 7302 bool SignedConv = Opc == ISD::SINT_TO_FP; 7303 if (SignedConv && !Subtarget.hasP9Altivec()) 7304 return SDValue(); 7305 7306 SDValue Wide = widenVec(DAG, Op.getOperand(0), dl); 7307 EVT WideVT = Wide.getValueType(); 7308 unsigned WideNumElts = WideVT.getVectorNumElements(); 7309 7310 SmallVector<int, 16> ShuffV; 7311 for (unsigned i = 0; i < WideNumElts; ++i) 7312 ShuffV.push_back(i + WideNumElts); 7313 7314 if (Subtarget.isLittleEndian()) { 7315 ShuffV[0] = 0; 7316 ShuffV[WideNumElts / 2] = 1; 7317 } 7318 else { 7319 ShuffV[WideNumElts / 2 - 1] = 0; 7320 ShuffV[WideNumElts - 1] = 1; 7321 } 7322 7323 SDValue ShuffleSrc2 = SignedConv ? DAG.getUNDEF(WideVT) : 7324 DAG.getConstant(0, dl, WideVT); 7325 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV); 7326 unsigned ExtendOp = SignedConv ? (unsigned) PPCISD::SExtVElems : 7327 (unsigned) ISD::BITCAST; 7328 SDValue Extend = DAG.getNode(ExtendOp, dl, MVT::v2i64, Arrange); 7329 7330 return DAG.getNode(Opc, dl, Op.getValueType(), Extend); 7331 } 7332 7333 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 7334 SelectionDAG &DAG) const { 7335 SDLoc dl(Op); 7336 7337 if (Op.getValueType() == MVT::v2f64 && 7338 Op.getOperand(0).getValueType() == MVT::v2i16) 7339 return LowerINT_TO_FPVector(Op, DAG, dl); 7340 7341 // Conversions to f128 are legal. 7342 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128)) 7343 return Op; 7344 7345 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 7346 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 7347 return SDValue(); 7348 7349 SDValue Value = Op.getOperand(0); 7350 // The values are now known to be -1 (false) or 1 (true). To convert this 7351 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7352 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7353 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7354 7355 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7356 7357 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7358 7359 if (Op.getValueType() != MVT::v4f64) 7360 Value = DAG.getNode(ISD::FP_ROUND, dl, 7361 Op.getValueType(), Value, 7362 DAG.getIntPtrConstant(1, dl)); 7363 return Value; 7364 } 7365 7366 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 7367 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 7368 return SDValue(); 7369 7370 if (Op.getOperand(0).getValueType() == MVT::i1) 7371 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 7372 DAG.getConstantFP(1.0, dl, Op.getValueType()), 7373 DAG.getConstantFP(0.0, dl, Op.getValueType())); 7374 7375 // If we have direct moves, we can do all the conversion, skip the store/load 7376 // however, without FPCVT we can't do most conversions. 7377 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 7378 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 7379 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 7380 7381 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 7382 "UINT_TO_FP is supported only with FPCVT"); 7383 7384 // If we have FCFIDS, then use it when converting to single-precision. 7385 // Otherwise, convert to double-precision and then round. 7386 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7387 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 7388 : PPCISD::FCFIDS) 7389 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 7390 : PPCISD::FCFID); 7391 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7392 ? MVT::f32 7393 : MVT::f64; 7394 7395 if (Op.getOperand(0).getValueType() == MVT::i64) { 7396 SDValue SINT = Op.getOperand(0); 7397 // When converting to single-precision, we actually need to convert 7398 // to double-precision first and then round to single-precision. 7399 // To avoid double-rounding effects during that operation, we have 7400 // to prepare the input operand. Bits that might be truncated when 7401 // converting to double-precision are replaced by a bit that won't 7402 // be lost at this stage, but is below the single-precision rounding 7403 // position. 7404 // 7405 // However, if -enable-unsafe-fp-math is in effect, accept double 7406 // rounding to avoid the extra overhead. 7407 if (Op.getValueType() == MVT::f32 && 7408 !Subtarget.hasFPCVT() && 7409 !DAG.getTarget().Options.UnsafeFPMath) { 7410 7411 // Twiddle input to make sure the low 11 bits are zero. (If this 7412 // is the case, we are guaranteed the value will fit into the 53 bit 7413 // mantissa of an IEEE double-precision value without rounding.) 7414 // If any of those low 11 bits were not zero originally, make sure 7415 // bit 12 (value 2048) is set instead, so that the final rounding 7416 // to single-precision gets the correct result. 7417 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7418 SINT, DAG.getConstant(2047, dl, MVT::i64)); 7419 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 7420 Round, DAG.getConstant(2047, dl, MVT::i64)); 7421 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 7422 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7423 Round, DAG.getConstant(-2048, dl, MVT::i64)); 7424 7425 // However, we cannot use that value unconditionally: if the magnitude 7426 // of the input value is small, the bit-twiddling we did above might 7427 // end up visibly changing the output. Fortunately, in that case, we 7428 // don't need to twiddle bits since the original input will convert 7429 // exactly to double-precision floating-point already. Therefore, 7430 // construct a conditional to use the original value if the top 11 7431 // bits are all sign-bit copies, and use the rounded value computed 7432 // above otherwise. 7433 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 7434 SINT, DAG.getConstant(53, dl, MVT::i32)); 7435 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 7436 Cond, DAG.getConstant(1, dl, MVT::i64)); 7437 Cond = DAG.getSetCC(dl, MVT::i32, 7438 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 7439 7440 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 7441 } 7442 7443 ReuseLoadInfo RLI; 7444 SDValue Bits; 7445 7446 MachineFunction &MF = DAG.getMachineFunction(); 7447 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 7448 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7449 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7450 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7451 } else if (Subtarget.hasLFIWAX() && 7452 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 7453 MachineMemOperand *MMO = 7454 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7455 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7456 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7457 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 7458 DAG.getVTList(MVT::f64, MVT::Other), 7459 Ops, MVT::i32, MMO); 7460 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7461 } else if (Subtarget.hasFPCVT() && 7462 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 7463 MachineMemOperand *MMO = 7464 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7465 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7466 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7467 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 7468 DAG.getVTList(MVT::f64, MVT::Other), 7469 Ops, MVT::i32, MMO); 7470 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7471 } else if (((Subtarget.hasLFIWAX() && 7472 SINT.getOpcode() == ISD::SIGN_EXTEND) || 7473 (Subtarget.hasFPCVT() && 7474 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 7475 SINT.getOperand(0).getValueType() == MVT::i32) { 7476 MachineFrameInfo &MFI = MF.getFrameInfo(); 7477 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7478 7479 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7480 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7481 7482 SDValue Store = 7483 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 7484 MachinePointerInfo::getFixedStack( 7485 DAG.getMachineFunction(), FrameIdx)); 7486 7487 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7488 "Expected an i32 store"); 7489 7490 RLI.Ptr = FIdx; 7491 RLI.Chain = Store; 7492 RLI.MPI = 7493 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7494 RLI.Alignment = 4; 7495 7496 MachineMemOperand *MMO = 7497 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7498 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7499 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7500 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 7501 PPCISD::LFIWZX : PPCISD::LFIWAX, 7502 dl, DAG.getVTList(MVT::f64, MVT::Other), 7503 Ops, MVT::i32, MMO); 7504 } else 7505 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 7506 7507 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 7508 7509 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7510 FP = DAG.getNode(ISD::FP_ROUND, dl, 7511 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 7512 return FP; 7513 } 7514 7515 assert(Op.getOperand(0).getValueType() == MVT::i32 && 7516 "Unhandled INT_TO_FP type in custom expander!"); 7517 // Since we only generate this in 64-bit mode, we can take advantage of 7518 // 64-bit registers. In particular, sign extend the input value into the 7519 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 7520 // then lfd it and fcfid it. 7521 MachineFunction &MF = DAG.getMachineFunction(); 7522 MachineFrameInfo &MFI = MF.getFrameInfo(); 7523 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7524 7525 SDValue Ld; 7526 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 7527 ReuseLoadInfo RLI; 7528 bool ReusingLoad; 7529 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 7530 DAG))) { 7531 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7532 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7533 7534 SDValue Store = 7535 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7536 MachinePointerInfo::getFixedStack( 7537 DAG.getMachineFunction(), FrameIdx)); 7538 7539 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7540 "Expected an i32 store"); 7541 7542 RLI.Ptr = FIdx; 7543 RLI.Chain = Store; 7544 RLI.MPI = 7545 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7546 RLI.Alignment = 4; 7547 } 7548 7549 MachineMemOperand *MMO = 7550 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7551 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7552 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7553 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 7554 PPCISD::LFIWZX : PPCISD::LFIWAX, 7555 dl, DAG.getVTList(MVT::f64, MVT::Other), 7556 Ops, MVT::i32, MMO); 7557 if (ReusingLoad) 7558 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 7559 } else { 7560 assert(Subtarget.isPPC64() && 7561 "i32->FP without LFIWAX supported only on PPC64"); 7562 7563 int FrameIdx = MFI.CreateStackObject(8, 8, false); 7564 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7565 7566 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 7567 Op.getOperand(0)); 7568 7569 // STD the extended value into the stack slot. 7570 SDValue Store = DAG.getStore( 7571 DAG.getEntryNode(), dl, Ext64, FIdx, 7572 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7573 7574 // Load the value as a double. 7575 Ld = DAG.getLoad( 7576 MVT::f64, dl, Store, FIdx, 7577 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7578 } 7579 7580 // FCFID it and return it. 7581 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 7582 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7583 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 7584 DAG.getIntPtrConstant(0, dl)); 7585 return FP; 7586 } 7587 7588 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 7589 SelectionDAG &DAG) const { 7590 SDLoc dl(Op); 7591 /* 7592 The rounding mode is in bits 30:31 of FPSR, and has the following 7593 settings: 7594 00 Round to nearest 7595 01 Round to 0 7596 10 Round to +inf 7597 11 Round to -inf 7598 7599 FLT_ROUNDS, on the other hand, expects the following: 7600 -1 Undefined 7601 0 Round to 0 7602 1 Round to nearest 7603 2 Round to +inf 7604 3 Round to -inf 7605 7606 To perform the conversion, we do: 7607 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7608 */ 7609 7610 MachineFunction &MF = DAG.getMachineFunction(); 7611 EVT VT = Op.getValueType(); 7612 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7613 7614 // Save FP Control Word to register 7615 EVT NodeTys[] = { 7616 MVT::f64, // return register 7617 MVT::Glue // unused in this context 7618 }; 7619 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7620 7621 // Save FP register to stack slot 7622 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7623 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7624 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7625 MachinePointerInfo()); 7626 7627 // Load FP Control Word from low 32 bits of stack slot. 7628 SDValue Four = DAG.getConstant(4, dl, PtrVT); 7629 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 7630 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 7631 7632 // Transform as necessary 7633 SDValue CWD1 = 7634 DAG.getNode(ISD::AND, dl, MVT::i32, 7635 CWD, DAG.getConstant(3, dl, MVT::i32)); 7636 SDValue CWD2 = 7637 DAG.getNode(ISD::SRL, dl, MVT::i32, 7638 DAG.getNode(ISD::AND, dl, MVT::i32, 7639 DAG.getNode(ISD::XOR, dl, MVT::i32, 7640 CWD, DAG.getConstant(3, dl, MVT::i32)), 7641 DAG.getConstant(3, dl, MVT::i32)), 7642 DAG.getConstant(1, dl, MVT::i32)); 7643 7644 SDValue RetVal = 7645 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 7646 7647 return DAG.getNode((VT.getSizeInBits() < 16 ? 7648 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 7649 } 7650 7651 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7652 EVT VT = Op.getValueType(); 7653 unsigned BitWidth = VT.getSizeInBits(); 7654 SDLoc dl(Op); 7655 assert(Op.getNumOperands() == 3 && 7656 VT == Op.getOperand(1).getValueType() && 7657 "Unexpected SHL!"); 7658 7659 // Expand into a bunch of logical ops. Note that these ops 7660 // depend on the PPC behavior for oversized shift amounts. 7661 SDValue Lo = Op.getOperand(0); 7662 SDValue Hi = Op.getOperand(1); 7663 SDValue Amt = Op.getOperand(2); 7664 EVT AmtVT = Amt.getValueType(); 7665 7666 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7667 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7668 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 7669 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 7670 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 7671 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7672 DAG.getConstant(-BitWidth, dl, AmtVT)); 7673 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 7674 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7675 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 7676 SDValue OutOps[] = { OutLo, OutHi }; 7677 return DAG.getMergeValues(OutOps, dl); 7678 } 7679 7680 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7681 EVT VT = Op.getValueType(); 7682 SDLoc dl(Op); 7683 unsigned BitWidth = VT.getSizeInBits(); 7684 assert(Op.getNumOperands() == 3 && 7685 VT == Op.getOperand(1).getValueType() && 7686 "Unexpected SRL!"); 7687 7688 // Expand into a bunch of logical ops. Note that these ops 7689 // depend on the PPC behavior for oversized shift amounts. 7690 SDValue Lo = Op.getOperand(0); 7691 SDValue Hi = Op.getOperand(1); 7692 SDValue Amt = Op.getOperand(2); 7693 EVT AmtVT = Amt.getValueType(); 7694 7695 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7696 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7697 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7698 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7699 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7700 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7701 DAG.getConstant(-BitWidth, dl, AmtVT)); 7702 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 7703 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7704 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 7705 SDValue OutOps[] = { OutLo, OutHi }; 7706 return DAG.getMergeValues(OutOps, dl); 7707 } 7708 7709 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 7710 SDLoc dl(Op); 7711 EVT VT = Op.getValueType(); 7712 unsigned BitWidth = VT.getSizeInBits(); 7713 assert(Op.getNumOperands() == 3 && 7714 VT == Op.getOperand(1).getValueType() && 7715 "Unexpected SRA!"); 7716 7717 // Expand into a bunch of logical ops, followed by a select_cc. 7718 SDValue Lo = Op.getOperand(0); 7719 SDValue Hi = Op.getOperand(1); 7720 SDValue Amt = Op.getOperand(2); 7721 EVT AmtVT = Amt.getValueType(); 7722 7723 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7724 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7725 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7726 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7727 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7728 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7729 DAG.getConstant(-BitWidth, dl, AmtVT)); 7730 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 7731 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 7732 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 7733 Tmp4, Tmp6, ISD::SETLE); 7734 SDValue OutOps[] = { OutLo, OutHi }; 7735 return DAG.getMergeValues(OutOps, dl); 7736 } 7737 7738 //===----------------------------------------------------------------------===// 7739 // Vector related lowering. 7740 // 7741 7742 /// BuildSplatI - Build a canonical splati of Val with an element size of 7743 /// SplatSize. Cast the result to VT. 7744 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7745 SelectionDAG &DAG, const SDLoc &dl) { 7746 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7747 7748 static const MVT VTys[] = { // canonical VT to use for each size. 7749 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7750 }; 7751 7752 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7753 7754 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7755 if (Val == -1) 7756 SplatSize = 1; 7757 7758 EVT CanonicalVT = VTys[SplatSize-1]; 7759 7760 // Build a canonical splat for this value. 7761 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7762 } 7763 7764 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7765 /// specified intrinsic ID. 7766 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7767 const SDLoc &dl, EVT DestVT = MVT::Other) { 7768 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7769 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7770 DAG.getConstant(IID, dl, MVT::i32), Op); 7771 } 7772 7773 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7774 /// specified intrinsic ID. 7775 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7776 SelectionDAG &DAG, const SDLoc &dl, 7777 EVT DestVT = MVT::Other) { 7778 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7779 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7780 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7781 } 7782 7783 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7784 /// specified intrinsic ID. 7785 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7786 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7787 EVT DestVT = MVT::Other) { 7788 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7789 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7790 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7791 } 7792 7793 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7794 /// amount. The result has the specified value type. 7795 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7796 SelectionDAG &DAG, const SDLoc &dl) { 7797 // Force LHS/RHS to be the right type. 7798 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7799 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7800 7801 int Ops[16]; 7802 for (unsigned i = 0; i != 16; ++i) 7803 Ops[i] = i + Amt; 7804 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7805 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7806 } 7807 7808 /// Do we have an efficient pattern in a .td file for this node? 7809 /// 7810 /// \param V - pointer to the BuildVectorSDNode being matched 7811 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 7812 /// 7813 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 7814 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 7815 /// the opposite is true (expansion is beneficial) are: 7816 /// - The node builds a vector out of integers that are not 32 or 64-bits 7817 /// - The node builds a vector out of constants 7818 /// - The node is a "load-and-splat" 7819 /// In all other cases, we will choose to keep the BUILD_VECTOR. 7820 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 7821 bool HasDirectMove, 7822 bool HasP8Vector) { 7823 EVT VecVT = V->getValueType(0); 7824 bool RightType = VecVT == MVT::v2f64 || 7825 (HasP8Vector && VecVT == MVT::v4f32) || 7826 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 7827 if (!RightType) 7828 return false; 7829 7830 bool IsSplat = true; 7831 bool IsLoad = false; 7832 SDValue Op0 = V->getOperand(0); 7833 7834 // This function is called in a block that confirms the node is not a constant 7835 // splat. So a constant BUILD_VECTOR here means the vector is built out of 7836 // different constants. 7837 if (V->isConstant()) 7838 return false; 7839 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 7840 if (V->getOperand(i).isUndef()) 7841 return false; 7842 // We want to expand nodes that represent load-and-splat even if the 7843 // loaded value is a floating point truncation or conversion to int. 7844 if (V->getOperand(i).getOpcode() == ISD::LOAD || 7845 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 7846 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7847 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 7848 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7849 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 7850 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 7851 IsLoad = true; 7852 // If the operands are different or the input is not a load and has more 7853 // uses than just this BV node, then it isn't a splat. 7854 if (V->getOperand(i) != Op0 || 7855 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 7856 IsSplat = false; 7857 } 7858 return !(IsSplat && IsLoad); 7859 } 7860 7861 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 7862 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 7863 7864 SDLoc dl(Op); 7865 SDValue Op0 = Op->getOperand(0); 7866 7867 if (!EnableQuadPrecision || 7868 (Op.getValueType() != MVT::f128 ) || 7869 (Op0.getOpcode() != ISD::BUILD_PAIR) || 7870 (Op0.getOperand(0).getValueType() != MVT::i64) || 7871 (Op0.getOperand(1).getValueType() != MVT::i64)) 7872 return SDValue(); 7873 7874 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 7875 Op0.getOperand(1)); 7876 } 7877 7878 // If this is a case we can't handle, return null and let the default 7879 // expansion code take care of it. If we CAN select this case, and if it 7880 // selects to a single instruction, return Op. Otherwise, if we can codegen 7881 // this case more efficiently than a constant pool load, lower it to the 7882 // sequence of ops that should be used. 7883 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7884 SelectionDAG &DAG) const { 7885 SDLoc dl(Op); 7886 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7887 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7888 7889 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7890 // We first build an i32 vector, load it into a QPX register, 7891 // then convert it to a floating-point vector and compare it 7892 // to a zero vector to get the boolean result. 7893 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7894 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7895 MachinePointerInfo PtrInfo = 7896 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7897 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7898 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7899 7900 assert(BVN->getNumOperands() == 4 && 7901 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7902 7903 bool IsConst = true; 7904 for (unsigned i = 0; i < 4; ++i) { 7905 if (BVN->getOperand(i).isUndef()) continue; 7906 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7907 IsConst = false; 7908 break; 7909 } 7910 } 7911 7912 if (IsConst) { 7913 Constant *One = 7914 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7915 Constant *NegOne = 7916 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7917 7918 Constant *CV[4]; 7919 for (unsigned i = 0; i < 4; ++i) { 7920 if (BVN->getOperand(i).isUndef()) 7921 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7922 else if (isNullConstant(BVN->getOperand(i))) 7923 CV[i] = NegOne; 7924 else 7925 CV[i] = One; 7926 } 7927 7928 Constant *CP = ConstantVector::get(CV); 7929 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7930 16 /* alignment */); 7931 7932 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7933 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7934 return DAG.getMemIntrinsicNode( 7935 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7936 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7937 } 7938 7939 SmallVector<SDValue, 4> Stores; 7940 for (unsigned i = 0; i < 4; ++i) { 7941 if (BVN->getOperand(i).isUndef()) continue; 7942 7943 unsigned Offset = 4*i; 7944 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7945 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7946 7947 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7948 if (StoreSize > 4) { 7949 Stores.push_back( 7950 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7951 PtrInfo.getWithOffset(Offset), MVT::i32)); 7952 } else { 7953 SDValue StoreValue = BVN->getOperand(i); 7954 if (StoreSize < 4) 7955 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7956 7957 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7958 PtrInfo.getWithOffset(Offset))); 7959 } 7960 } 7961 7962 SDValue StoreChain; 7963 if (!Stores.empty()) 7964 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7965 else 7966 StoreChain = DAG.getEntryNode(); 7967 7968 // Now load from v4i32 into the QPX register; this will extend it to 7969 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7970 // is typed as v4f64 because the QPX register integer states are not 7971 // explicitly represented. 7972 7973 SDValue Ops[] = {StoreChain, 7974 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7975 FIdx}; 7976 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7977 7978 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7979 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7980 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7981 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7982 LoadedVect); 7983 7984 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7985 7986 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7987 } 7988 7989 // All other QPX vectors are handled by generic code. 7990 if (Subtarget.hasQPX()) 7991 return SDValue(); 7992 7993 // Check if this is a splat of a constant value. 7994 APInt APSplatBits, APSplatUndef; 7995 unsigned SplatBitSize; 7996 bool HasAnyUndefs; 7997 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7998 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7999 SplatBitSize > 32) { 8000 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 8001 // lowered to VSX instructions under certain conditions. 8002 // Without VSX, there is no pattern more efficient than expanding the node. 8003 if (Subtarget.hasVSX() && 8004 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 8005 Subtarget.hasP8Vector())) 8006 return Op; 8007 return SDValue(); 8008 } 8009 8010 unsigned SplatBits = APSplatBits.getZExtValue(); 8011 unsigned SplatUndef = APSplatUndef.getZExtValue(); 8012 unsigned SplatSize = SplatBitSize / 8; 8013 8014 // First, handle single instruction cases. 8015 8016 // All zeros? 8017 if (SplatBits == 0) { 8018 // Canonicalize all zero vectors to be v4i32. 8019 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 8020 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 8021 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 8022 } 8023 return Op; 8024 } 8025 8026 // We have XXSPLTIB for constant splats one byte wide 8027 if (Subtarget.hasP9Vector() && SplatSize == 1) { 8028 // This is a splat of 1-byte elements with some elements potentially undef. 8029 // Rather than trying to match undef in the SDAG patterns, ensure that all 8030 // elements are the same constant. 8031 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 8032 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 8033 dl, MVT::i32)); 8034 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 8035 if (Op.getValueType() != MVT::v16i8) 8036 return DAG.getBitcast(Op.getValueType(), NewBV); 8037 return NewBV; 8038 } 8039 8040 // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll 8041 // detect that constant splats like v8i16: 0xABAB are really just splats 8042 // of a 1-byte constant. In this case, we need to convert the node to a 8043 // splat of v16i8 and a bitcast. 8044 if (Op.getValueType() != MVT::v16i8) 8045 return DAG.getBitcast(Op.getValueType(), 8046 DAG.getConstant(SplatBits, dl, MVT::v16i8)); 8047 8048 return Op; 8049 } 8050 8051 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 8052 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 8053 (32-SplatBitSize)); 8054 if (SextVal >= -16 && SextVal <= 15) 8055 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 8056 8057 // Two instruction sequences. 8058 8059 // If this value is in the range [-32,30] and is even, use: 8060 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 8061 // If this value is in the range [17,31] and is odd, use: 8062 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 8063 // If this value is in the range [-31,-17] and is odd, use: 8064 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 8065 // Note the last two are three-instruction sequences. 8066 if (SextVal >= -32 && SextVal <= 31) { 8067 // To avoid having these optimizations undone by constant folding, 8068 // we convert to a pseudo that will be expanded later into one of 8069 // the above forms. 8070 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 8071 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 8072 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 8073 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 8074 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 8075 if (VT == Op.getValueType()) 8076 return RetVal; 8077 else 8078 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 8079 } 8080 8081 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 8082 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 8083 // for fneg/fabs. 8084 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 8085 // Make -1 and vspltisw -1: 8086 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 8087 8088 // Make the VSLW intrinsic, computing 0x8000_0000. 8089 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 8090 OnesV, DAG, dl); 8091 8092 // xor by OnesV to invert it. 8093 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 8094 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8095 } 8096 8097 // Check to see if this is a wide variety of vsplti*, binop self cases. 8098 static const signed char SplatCsts[] = { 8099 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 8100 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 8101 }; 8102 8103 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 8104 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 8105 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 8106 int i = SplatCsts[idx]; 8107 8108 // Figure out what shift amount will be used by altivec if shifted by i in 8109 // this splat size. 8110 unsigned TypeShiftAmt = i & (SplatBitSize-1); 8111 8112 // vsplti + shl self. 8113 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 8114 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8115 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8116 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 8117 Intrinsic::ppc_altivec_vslw 8118 }; 8119 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8120 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8121 } 8122 8123 // vsplti + srl self. 8124 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8125 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8126 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8127 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 8128 Intrinsic::ppc_altivec_vsrw 8129 }; 8130 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8131 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8132 } 8133 8134 // vsplti + sra self. 8135 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8136 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8137 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8138 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 8139 Intrinsic::ppc_altivec_vsraw 8140 }; 8141 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8142 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8143 } 8144 8145 // vsplti + rol self. 8146 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 8147 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 8148 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8149 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8150 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 8151 Intrinsic::ppc_altivec_vrlw 8152 }; 8153 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8154 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8155 } 8156 8157 // t = vsplti c, result = vsldoi t, t, 1 8158 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 8159 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8160 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 8161 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8162 } 8163 // t = vsplti c, result = vsldoi t, t, 2 8164 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 8165 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8166 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 8167 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8168 } 8169 // t = vsplti c, result = vsldoi t, t, 3 8170 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 8171 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8172 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 8173 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8174 } 8175 } 8176 8177 return SDValue(); 8178 } 8179 8180 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 8181 /// the specified operations to build the shuffle. 8182 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 8183 SDValue RHS, SelectionDAG &DAG, 8184 const SDLoc &dl) { 8185 unsigned OpNum = (PFEntry >> 26) & 0x0F; 8186 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8187 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8188 8189 enum { 8190 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 8191 OP_VMRGHW, 8192 OP_VMRGLW, 8193 OP_VSPLTISW0, 8194 OP_VSPLTISW1, 8195 OP_VSPLTISW2, 8196 OP_VSPLTISW3, 8197 OP_VSLDOI4, 8198 OP_VSLDOI8, 8199 OP_VSLDOI12 8200 }; 8201 8202 if (OpNum == OP_COPY) { 8203 if (LHSID == (1*9+2)*9+3) return LHS; 8204 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 8205 return RHS; 8206 } 8207 8208 SDValue OpLHS, OpRHS; 8209 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 8210 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 8211 8212 int ShufIdxs[16]; 8213 switch (OpNum) { 8214 default: llvm_unreachable("Unknown i32 permute!"); 8215 case OP_VMRGHW: 8216 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 8217 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 8218 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 8219 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 8220 break; 8221 case OP_VMRGLW: 8222 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 8223 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 8224 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 8225 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 8226 break; 8227 case OP_VSPLTISW0: 8228 for (unsigned i = 0; i != 16; ++i) 8229 ShufIdxs[i] = (i&3)+0; 8230 break; 8231 case OP_VSPLTISW1: 8232 for (unsigned i = 0; i != 16; ++i) 8233 ShufIdxs[i] = (i&3)+4; 8234 break; 8235 case OP_VSPLTISW2: 8236 for (unsigned i = 0; i != 16; ++i) 8237 ShufIdxs[i] = (i&3)+8; 8238 break; 8239 case OP_VSPLTISW3: 8240 for (unsigned i = 0; i != 16; ++i) 8241 ShufIdxs[i] = (i&3)+12; 8242 break; 8243 case OP_VSLDOI4: 8244 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 8245 case OP_VSLDOI8: 8246 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 8247 case OP_VSLDOI12: 8248 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 8249 } 8250 EVT VT = OpLHS.getValueType(); 8251 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 8252 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 8253 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 8254 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8255 } 8256 8257 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 8258 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 8259 /// SDValue. 8260 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 8261 SelectionDAG &DAG) const { 8262 const unsigned BytesInVector = 16; 8263 bool IsLE = Subtarget.isLittleEndian(); 8264 SDLoc dl(N); 8265 SDValue V1 = N->getOperand(0); 8266 SDValue V2 = N->getOperand(1); 8267 unsigned ShiftElts = 0, InsertAtByte = 0; 8268 bool Swap = false; 8269 8270 // Shifts required to get the byte we want at element 7. 8271 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 8272 0, 15, 14, 13, 12, 11, 10, 9}; 8273 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 8274 1, 2, 3, 4, 5, 6, 7, 8}; 8275 8276 ArrayRef<int> Mask = N->getMask(); 8277 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 8278 8279 // For each mask element, find out if we're just inserting something 8280 // from V2 into V1 or vice versa. 8281 // Possible permutations inserting an element from V2 into V1: 8282 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8283 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8284 // ... 8285 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 8286 // Inserting from V1 into V2 will be similar, except mask range will be 8287 // [16,31]. 8288 8289 bool FoundCandidate = false; 8290 // If both vector operands for the shuffle are the same vector, the mask 8291 // will contain only elements from the first one and the second one will be 8292 // undef. 8293 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 8294 // Go through the mask of half-words to find an element that's being moved 8295 // from one vector to the other. 8296 for (unsigned i = 0; i < BytesInVector; ++i) { 8297 unsigned CurrentElement = Mask[i]; 8298 // If 2nd operand is undefined, we should only look for element 7 in the 8299 // Mask. 8300 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 8301 continue; 8302 8303 bool OtherElementsInOrder = true; 8304 // Examine the other elements in the Mask to see if they're in original 8305 // order. 8306 for (unsigned j = 0; j < BytesInVector; ++j) { 8307 if (j == i) 8308 continue; 8309 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 8310 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 8311 // in which we always assume we're always picking from the 1st operand. 8312 int MaskOffset = 8313 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 8314 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 8315 OtherElementsInOrder = false; 8316 break; 8317 } 8318 } 8319 // If other elements are in original order, we record the number of shifts 8320 // we need to get the element we want into element 7. Also record which byte 8321 // in the vector we should insert into. 8322 if (OtherElementsInOrder) { 8323 // If 2nd operand is undefined, we assume no shifts and no swapping. 8324 if (V2.isUndef()) { 8325 ShiftElts = 0; 8326 Swap = false; 8327 } else { 8328 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 8329 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 8330 : BigEndianShifts[CurrentElement & 0xF]; 8331 Swap = CurrentElement < BytesInVector; 8332 } 8333 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 8334 FoundCandidate = true; 8335 break; 8336 } 8337 } 8338 8339 if (!FoundCandidate) 8340 return SDValue(); 8341 8342 // Candidate found, construct the proper SDAG sequence with VINSERTB, 8343 // optionally with VECSHL if shift is required. 8344 if (Swap) 8345 std::swap(V1, V2); 8346 if (V2.isUndef()) 8347 V2 = V1; 8348 if (ShiftElts) { 8349 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8350 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8351 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 8352 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8353 } 8354 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 8355 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8356 } 8357 8358 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 8359 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 8360 /// SDValue. 8361 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 8362 SelectionDAG &DAG) const { 8363 const unsigned NumHalfWords = 8; 8364 const unsigned BytesInVector = NumHalfWords * 2; 8365 // Check that the shuffle is on half-words. 8366 if (!isNByteElemShuffleMask(N, 2, 1)) 8367 return SDValue(); 8368 8369 bool IsLE = Subtarget.isLittleEndian(); 8370 SDLoc dl(N); 8371 SDValue V1 = N->getOperand(0); 8372 SDValue V2 = N->getOperand(1); 8373 unsigned ShiftElts = 0, InsertAtByte = 0; 8374 bool Swap = false; 8375 8376 // Shifts required to get the half-word we want at element 3. 8377 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 8378 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 8379 8380 uint32_t Mask = 0; 8381 uint32_t OriginalOrderLow = 0x1234567; 8382 uint32_t OriginalOrderHigh = 0x89ABCDEF; 8383 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 8384 // 32-bit space, only need 4-bit nibbles per element. 8385 for (unsigned i = 0; i < NumHalfWords; ++i) { 8386 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8387 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 8388 } 8389 8390 // For each mask element, find out if we're just inserting something 8391 // from V2 into V1 or vice versa. Possible permutations inserting an element 8392 // from V2 into V1: 8393 // X, 1, 2, 3, 4, 5, 6, 7 8394 // 0, X, 2, 3, 4, 5, 6, 7 8395 // 0, 1, X, 3, 4, 5, 6, 7 8396 // 0, 1, 2, X, 4, 5, 6, 7 8397 // 0, 1, 2, 3, X, 5, 6, 7 8398 // 0, 1, 2, 3, 4, X, 6, 7 8399 // 0, 1, 2, 3, 4, 5, X, 7 8400 // 0, 1, 2, 3, 4, 5, 6, X 8401 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 8402 8403 bool FoundCandidate = false; 8404 // Go through the mask of half-words to find an element that's being moved 8405 // from one vector to the other. 8406 for (unsigned i = 0; i < NumHalfWords; ++i) { 8407 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8408 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 8409 uint32_t MaskOtherElts = ~(0xF << MaskShift); 8410 uint32_t TargetOrder = 0x0; 8411 8412 // If both vector operands for the shuffle are the same vector, the mask 8413 // will contain only elements from the first one and the second one will be 8414 // undef. 8415 if (V2.isUndef()) { 8416 ShiftElts = 0; 8417 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 8418 TargetOrder = OriginalOrderLow; 8419 Swap = false; 8420 // Skip if not the correct element or mask of other elements don't equal 8421 // to our expected order. 8422 if (MaskOneElt == VINSERTHSrcElem && 8423 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8424 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8425 FoundCandidate = true; 8426 break; 8427 } 8428 } else { // If both operands are defined. 8429 // Target order is [8,15] if the current mask is between [0,7]. 8430 TargetOrder = 8431 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 8432 // Skip if mask of other elements don't equal our expected order. 8433 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8434 // We only need the last 3 bits for the number of shifts. 8435 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 8436 : BigEndianShifts[MaskOneElt & 0x7]; 8437 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8438 Swap = MaskOneElt < NumHalfWords; 8439 FoundCandidate = true; 8440 break; 8441 } 8442 } 8443 } 8444 8445 if (!FoundCandidate) 8446 return SDValue(); 8447 8448 // Candidate found, construct the proper SDAG sequence with VINSERTH, 8449 // optionally with VECSHL if shift is required. 8450 if (Swap) 8451 std::swap(V1, V2); 8452 if (V2.isUndef()) 8453 V2 = V1; 8454 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8455 if (ShiftElts) { 8456 // Double ShiftElts because we're left shifting on v16i8 type. 8457 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8458 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 8459 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 8460 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8461 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8462 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8463 } 8464 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 8465 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8466 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8467 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8468 } 8469 8470 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 8471 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 8472 /// return the code it can be lowered into. Worst case, it can always be 8473 /// lowered into a vperm. 8474 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 8475 SelectionDAG &DAG) const { 8476 SDLoc dl(Op); 8477 SDValue V1 = Op.getOperand(0); 8478 SDValue V2 = Op.getOperand(1); 8479 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 8480 EVT VT = Op.getValueType(); 8481 bool isLittleEndian = Subtarget.isLittleEndian(); 8482 8483 unsigned ShiftElts, InsertAtByte; 8484 bool Swap = false; 8485 if (Subtarget.hasP9Vector() && 8486 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 8487 isLittleEndian)) { 8488 if (Swap) 8489 std::swap(V1, V2); 8490 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8491 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 8492 if (ShiftElts) { 8493 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 8494 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8495 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 8496 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8497 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8498 } 8499 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 8500 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8501 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8502 } 8503 8504 if (Subtarget.hasP9Altivec()) { 8505 SDValue NewISDNode; 8506 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 8507 return NewISDNode; 8508 8509 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 8510 return NewISDNode; 8511 } 8512 8513 if (Subtarget.hasVSX() && 8514 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8515 if (Swap) 8516 std::swap(V1, V2); 8517 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8518 SDValue Conv2 = 8519 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 8520 8521 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 8522 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8523 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 8524 } 8525 8526 if (Subtarget.hasVSX() && 8527 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8528 if (Swap) 8529 std::swap(V1, V2); 8530 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8531 SDValue Conv2 = 8532 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 8533 8534 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 8535 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8536 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 8537 } 8538 8539 if (Subtarget.hasP9Vector()) { 8540 if (PPC::isXXBRHShuffleMask(SVOp)) { 8541 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8542 SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv); 8543 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 8544 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 8545 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8546 SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv); 8547 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 8548 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 8549 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8550 SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv); 8551 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 8552 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 8553 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 8554 SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv); 8555 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 8556 } 8557 } 8558 8559 if (Subtarget.hasVSX()) { 8560 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 8561 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 8562 8563 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8564 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 8565 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8566 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 8567 } 8568 8569 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 8570 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 8571 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 8572 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 8573 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 8574 } 8575 } 8576 8577 if (Subtarget.hasQPX()) { 8578 if (VT.getVectorNumElements() != 4) 8579 return SDValue(); 8580 8581 if (V2.isUndef()) V2 = V1; 8582 8583 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 8584 if (AlignIdx != -1) { 8585 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 8586 DAG.getConstant(AlignIdx, dl, MVT::i32)); 8587 } else if (SVOp->isSplat()) { 8588 int SplatIdx = SVOp->getSplatIndex(); 8589 if (SplatIdx >= 4) { 8590 std::swap(V1, V2); 8591 SplatIdx -= 4; 8592 } 8593 8594 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 8595 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8596 } 8597 8598 // Lower this into a qvgpci/qvfperm pair. 8599 8600 // Compute the qvgpci literal 8601 unsigned idx = 0; 8602 for (unsigned i = 0; i < 4; ++i) { 8603 int m = SVOp->getMaskElt(i); 8604 unsigned mm = m >= 0 ? (unsigned) m : i; 8605 idx |= mm << (3-i)*3; 8606 } 8607 8608 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 8609 DAG.getConstant(idx, dl, MVT::i32)); 8610 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 8611 } 8612 8613 // Cases that are handled by instructions that take permute immediates 8614 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 8615 // selected by the instruction selector. 8616 if (V2.isUndef()) { 8617 if (PPC::isSplatShuffleMask(SVOp, 1) || 8618 PPC::isSplatShuffleMask(SVOp, 2) || 8619 PPC::isSplatShuffleMask(SVOp, 4) || 8620 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 8621 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 8622 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 8623 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 8624 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 8625 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 8626 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 8627 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 8628 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 8629 (Subtarget.hasP8Altivec() && ( 8630 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 8631 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 8632 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 8633 return Op; 8634 } 8635 } 8636 8637 // Altivec has a variety of "shuffle immediates" that take two vector inputs 8638 // and produce a fixed permutation. If any of these match, do not lower to 8639 // VPERM. 8640 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 8641 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 8642 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 8643 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 8644 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8645 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8646 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8647 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8648 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8649 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8650 (Subtarget.hasP8Altivec() && ( 8651 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 8652 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 8653 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 8654 return Op; 8655 8656 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 8657 // perfect shuffle table to emit an optimal matching sequence. 8658 ArrayRef<int> PermMask = SVOp->getMask(); 8659 8660 unsigned PFIndexes[4]; 8661 bool isFourElementShuffle = true; 8662 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 8663 unsigned EltNo = 8; // Start out undef. 8664 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 8665 if (PermMask[i*4+j] < 0) 8666 continue; // Undef, ignore it. 8667 8668 unsigned ByteSource = PermMask[i*4+j]; 8669 if ((ByteSource & 3) != j) { 8670 isFourElementShuffle = false; 8671 break; 8672 } 8673 8674 if (EltNo == 8) { 8675 EltNo = ByteSource/4; 8676 } else if (EltNo != ByteSource/4) { 8677 isFourElementShuffle = false; 8678 break; 8679 } 8680 } 8681 PFIndexes[i] = EltNo; 8682 } 8683 8684 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 8685 // perfect shuffle vector to determine if it is cost effective to do this as 8686 // discrete instructions, or whether we should use a vperm. 8687 // For now, we skip this for little endian until such time as we have a 8688 // little-endian perfect shuffle table. 8689 if (isFourElementShuffle && !isLittleEndian) { 8690 // Compute the index in the perfect shuffle table. 8691 unsigned PFTableIndex = 8692 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 8693 8694 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 8695 unsigned Cost = (PFEntry >> 30); 8696 8697 // Determining when to avoid vperm is tricky. Many things affect the cost 8698 // of vperm, particularly how many times the perm mask needs to be computed. 8699 // For example, if the perm mask can be hoisted out of a loop or is already 8700 // used (perhaps because there are multiple permutes with the same shuffle 8701 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 8702 // the loop requires an extra register. 8703 // 8704 // As a compromise, we only emit discrete instructions if the shuffle can be 8705 // generated in 3 or fewer operations. When we have loop information 8706 // available, if this block is within a loop, we should avoid using vperm 8707 // for 3-operation perms and use a constant pool load instead. 8708 if (Cost < 3) 8709 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8710 } 8711 8712 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 8713 // vector that will get spilled to the constant pool. 8714 if (V2.isUndef()) V2 = V1; 8715 8716 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 8717 // that it is in input element units, not in bytes. Convert now. 8718 8719 // For little endian, the order of the input vectors is reversed, and 8720 // the permutation mask is complemented with respect to 31. This is 8721 // necessary to produce proper semantics with the big-endian-biased vperm 8722 // instruction. 8723 EVT EltVT = V1.getValueType().getVectorElementType(); 8724 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 8725 8726 SmallVector<SDValue, 16> ResultMask; 8727 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 8728 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 8729 8730 for (unsigned j = 0; j != BytesPerElement; ++j) 8731 if (isLittleEndian) 8732 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 8733 dl, MVT::i32)); 8734 else 8735 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 8736 MVT::i32)); 8737 } 8738 8739 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 8740 if (isLittleEndian) 8741 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8742 V2, V1, VPermMask); 8743 else 8744 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8745 V1, V2, VPermMask); 8746 } 8747 8748 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 8749 /// vector comparison. If it is, return true and fill in Opc/isDot with 8750 /// information about the intrinsic. 8751 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 8752 bool &isDot, const PPCSubtarget &Subtarget) { 8753 unsigned IntrinsicID = 8754 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 8755 CompareOpc = -1; 8756 isDot = false; 8757 switch (IntrinsicID) { 8758 default: 8759 return false; 8760 // Comparison predicates. 8761 case Intrinsic::ppc_altivec_vcmpbfp_p: 8762 CompareOpc = 966; 8763 isDot = true; 8764 break; 8765 case Intrinsic::ppc_altivec_vcmpeqfp_p: 8766 CompareOpc = 198; 8767 isDot = true; 8768 break; 8769 case Intrinsic::ppc_altivec_vcmpequb_p: 8770 CompareOpc = 6; 8771 isDot = true; 8772 break; 8773 case Intrinsic::ppc_altivec_vcmpequh_p: 8774 CompareOpc = 70; 8775 isDot = true; 8776 break; 8777 case Intrinsic::ppc_altivec_vcmpequw_p: 8778 CompareOpc = 134; 8779 isDot = true; 8780 break; 8781 case Intrinsic::ppc_altivec_vcmpequd_p: 8782 if (Subtarget.hasP8Altivec()) { 8783 CompareOpc = 199; 8784 isDot = true; 8785 } else 8786 return false; 8787 break; 8788 case Intrinsic::ppc_altivec_vcmpneb_p: 8789 case Intrinsic::ppc_altivec_vcmpneh_p: 8790 case Intrinsic::ppc_altivec_vcmpnew_p: 8791 case Intrinsic::ppc_altivec_vcmpnezb_p: 8792 case Intrinsic::ppc_altivec_vcmpnezh_p: 8793 case Intrinsic::ppc_altivec_vcmpnezw_p: 8794 if (Subtarget.hasP9Altivec()) { 8795 switch (IntrinsicID) { 8796 default: 8797 llvm_unreachable("Unknown comparison intrinsic."); 8798 case Intrinsic::ppc_altivec_vcmpneb_p: 8799 CompareOpc = 7; 8800 break; 8801 case Intrinsic::ppc_altivec_vcmpneh_p: 8802 CompareOpc = 71; 8803 break; 8804 case Intrinsic::ppc_altivec_vcmpnew_p: 8805 CompareOpc = 135; 8806 break; 8807 case Intrinsic::ppc_altivec_vcmpnezb_p: 8808 CompareOpc = 263; 8809 break; 8810 case Intrinsic::ppc_altivec_vcmpnezh_p: 8811 CompareOpc = 327; 8812 break; 8813 case Intrinsic::ppc_altivec_vcmpnezw_p: 8814 CompareOpc = 391; 8815 break; 8816 } 8817 isDot = true; 8818 } else 8819 return false; 8820 break; 8821 case Intrinsic::ppc_altivec_vcmpgefp_p: 8822 CompareOpc = 454; 8823 isDot = true; 8824 break; 8825 case Intrinsic::ppc_altivec_vcmpgtfp_p: 8826 CompareOpc = 710; 8827 isDot = true; 8828 break; 8829 case Intrinsic::ppc_altivec_vcmpgtsb_p: 8830 CompareOpc = 774; 8831 isDot = true; 8832 break; 8833 case Intrinsic::ppc_altivec_vcmpgtsh_p: 8834 CompareOpc = 838; 8835 isDot = true; 8836 break; 8837 case Intrinsic::ppc_altivec_vcmpgtsw_p: 8838 CompareOpc = 902; 8839 isDot = true; 8840 break; 8841 case Intrinsic::ppc_altivec_vcmpgtsd_p: 8842 if (Subtarget.hasP8Altivec()) { 8843 CompareOpc = 967; 8844 isDot = true; 8845 } else 8846 return false; 8847 break; 8848 case Intrinsic::ppc_altivec_vcmpgtub_p: 8849 CompareOpc = 518; 8850 isDot = true; 8851 break; 8852 case Intrinsic::ppc_altivec_vcmpgtuh_p: 8853 CompareOpc = 582; 8854 isDot = true; 8855 break; 8856 case Intrinsic::ppc_altivec_vcmpgtuw_p: 8857 CompareOpc = 646; 8858 isDot = true; 8859 break; 8860 case Intrinsic::ppc_altivec_vcmpgtud_p: 8861 if (Subtarget.hasP8Altivec()) { 8862 CompareOpc = 711; 8863 isDot = true; 8864 } else 8865 return false; 8866 break; 8867 8868 // VSX predicate comparisons use the same infrastructure 8869 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8870 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8871 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8872 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8873 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8874 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8875 if (Subtarget.hasVSX()) { 8876 switch (IntrinsicID) { 8877 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8878 CompareOpc = 99; 8879 break; 8880 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8881 CompareOpc = 115; 8882 break; 8883 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8884 CompareOpc = 107; 8885 break; 8886 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8887 CompareOpc = 67; 8888 break; 8889 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8890 CompareOpc = 83; 8891 break; 8892 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8893 CompareOpc = 75; 8894 break; 8895 } 8896 isDot = true; 8897 } else 8898 return false; 8899 break; 8900 8901 // Normal Comparisons. 8902 case Intrinsic::ppc_altivec_vcmpbfp: 8903 CompareOpc = 966; 8904 break; 8905 case Intrinsic::ppc_altivec_vcmpeqfp: 8906 CompareOpc = 198; 8907 break; 8908 case Intrinsic::ppc_altivec_vcmpequb: 8909 CompareOpc = 6; 8910 break; 8911 case Intrinsic::ppc_altivec_vcmpequh: 8912 CompareOpc = 70; 8913 break; 8914 case Intrinsic::ppc_altivec_vcmpequw: 8915 CompareOpc = 134; 8916 break; 8917 case Intrinsic::ppc_altivec_vcmpequd: 8918 if (Subtarget.hasP8Altivec()) 8919 CompareOpc = 199; 8920 else 8921 return false; 8922 break; 8923 case Intrinsic::ppc_altivec_vcmpneb: 8924 case Intrinsic::ppc_altivec_vcmpneh: 8925 case Intrinsic::ppc_altivec_vcmpnew: 8926 case Intrinsic::ppc_altivec_vcmpnezb: 8927 case Intrinsic::ppc_altivec_vcmpnezh: 8928 case Intrinsic::ppc_altivec_vcmpnezw: 8929 if (Subtarget.hasP9Altivec()) 8930 switch (IntrinsicID) { 8931 default: 8932 llvm_unreachable("Unknown comparison intrinsic."); 8933 case Intrinsic::ppc_altivec_vcmpneb: 8934 CompareOpc = 7; 8935 break; 8936 case Intrinsic::ppc_altivec_vcmpneh: 8937 CompareOpc = 71; 8938 break; 8939 case Intrinsic::ppc_altivec_vcmpnew: 8940 CompareOpc = 135; 8941 break; 8942 case Intrinsic::ppc_altivec_vcmpnezb: 8943 CompareOpc = 263; 8944 break; 8945 case Intrinsic::ppc_altivec_vcmpnezh: 8946 CompareOpc = 327; 8947 break; 8948 case Intrinsic::ppc_altivec_vcmpnezw: 8949 CompareOpc = 391; 8950 break; 8951 } 8952 else 8953 return false; 8954 break; 8955 case Intrinsic::ppc_altivec_vcmpgefp: 8956 CompareOpc = 454; 8957 break; 8958 case Intrinsic::ppc_altivec_vcmpgtfp: 8959 CompareOpc = 710; 8960 break; 8961 case Intrinsic::ppc_altivec_vcmpgtsb: 8962 CompareOpc = 774; 8963 break; 8964 case Intrinsic::ppc_altivec_vcmpgtsh: 8965 CompareOpc = 838; 8966 break; 8967 case Intrinsic::ppc_altivec_vcmpgtsw: 8968 CompareOpc = 902; 8969 break; 8970 case Intrinsic::ppc_altivec_vcmpgtsd: 8971 if (Subtarget.hasP8Altivec()) 8972 CompareOpc = 967; 8973 else 8974 return false; 8975 break; 8976 case Intrinsic::ppc_altivec_vcmpgtub: 8977 CompareOpc = 518; 8978 break; 8979 case Intrinsic::ppc_altivec_vcmpgtuh: 8980 CompareOpc = 582; 8981 break; 8982 case Intrinsic::ppc_altivec_vcmpgtuw: 8983 CompareOpc = 646; 8984 break; 8985 case Intrinsic::ppc_altivec_vcmpgtud: 8986 if (Subtarget.hasP8Altivec()) 8987 CompareOpc = 711; 8988 else 8989 return false; 8990 break; 8991 } 8992 return true; 8993 } 8994 8995 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 8996 /// lower, do it, otherwise return null. 8997 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 8998 SelectionDAG &DAG) const { 8999 unsigned IntrinsicID = 9000 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9001 9002 SDLoc dl(Op); 9003 9004 if (IntrinsicID == Intrinsic::thread_pointer) { 9005 // Reads the thread pointer register, used for __builtin_thread_pointer. 9006 if (Subtarget.isPPC64()) 9007 return DAG.getRegister(PPC::X13, MVT::i64); 9008 return DAG.getRegister(PPC::R2, MVT::i32); 9009 } 9010 9011 // If this is a lowered altivec predicate compare, CompareOpc is set to the 9012 // opcode number of the comparison. 9013 int CompareOpc; 9014 bool isDot; 9015 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 9016 return SDValue(); // Don't custom lower most intrinsics. 9017 9018 // If this is a non-dot comparison, make the VCMP node and we are done. 9019 if (!isDot) { 9020 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 9021 Op.getOperand(1), Op.getOperand(2), 9022 DAG.getConstant(CompareOpc, dl, MVT::i32)); 9023 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 9024 } 9025 9026 // Create the PPCISD altivec 'dot' comparison node. 9027 SDValue Ops[] = { 9028 Op.getOperand(2), // LHS 9029 Op.getOperand(3), // RHS 9030 DAG.getConstant(CompareOpc, dl, MVT::i32) 9031 }; 9032 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 9033 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 9034 9035 // Now that we have the comparison, emit a copy from the CR to a GPR. 9036 // This is flagged to the above dot comparison. 9037 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 9038 DAG.getRegister(PPC::CR6, MVT::i32), 9039 CompNode.getValue(1)); 9040 9041 // Unpack the result based on how the target uses it. 9042 unsigned BitNo; // Bit # of CR6. 9043 bool InvertBit; // Invert result? 9044 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 9045 default: // Can't happen, don't crash on invalid number though. 9046 case 0: // Return the value of the EQ bit of CR6. 9047 BitNo = 0; InvertBit = false; 9048 break; 9049 case 1: // Return the inverted value of the EQ bit of CR6. 9050 BitNo = 0; InvertBit = true; 9051 break; 9052 case 2: // Return the value of the LT bit of CR6. 9053 BitNo = 2; InvertBit = false; 9054 break; 9055 case 3: // Return the inverted value of the LT bit of CR6. 9056 BitNo = 2; InvertBit = true; 9057 break; 9058 } 9059 9060 // Shift the bit into the low position. 9061 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 9062 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 9063 // Isolate the bit. 9064 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 9065 DAG.getConstant(1, dl, MVT::i32)); 9066 9067 // If we are supposed to, toggle the bit. 9068 if (InvertBit) 9069 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 9070 DAG.getConstant(1, dl, MVT::i32)); 9071 return Flags; 9072 } 9073 9074 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 9075 SelectionDAG &DAG) const { 9076 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 9077 // the beginning of the argument list. 9078 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 9079 SDLoc DL(Op); 9080 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 9081 case Intrinsic::ppc_cfence: { 9082 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 9083 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 9084 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 9085 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 9086 Op.getOperand(ArgStart + 1)), 9087 Op.getOperand(0)), 9088 0); 9089 } 9090 default: 9091 break; 9092 } 9093 return SDValue(); 9094 } 9095 9096 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 9097 // Check for a DIV with the same operands as this REM. 9098 for (auto UI : Op.getOperand(1)->uses()) { 9099 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 9100 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 9101 if (UI->getOperand(0) == Op.getOperand(0) && 9102 UI->getOperand(1) == Op.getOperand(1)) 9103 return SDValue(); 9104 } 9105 return Op; 9106 } 9107 9108 // Lower scalar BSWAP64 to xxbrd. 9109 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 9110 SDLoc dl(Op); 9111 // MTVSRDD 9112 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 9113 Op.getOperand(0)); 9114 // XXBRD 9115 Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op); 9116 // MFVSRD 9117 int VectorIndex = 0; 9118 if (Subtarget.isLittleEndian()) 9119 VectorIndex = 1; 9120 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 9121 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 9122 return Op; 9123 } 9124 9125 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 9126 // compared to a value that is atomically loaded (atomic loads zero-extend). 9127 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 9128 SelectionDAG &DAG) const { 9129 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 9130 "Expecting an atomic compare-and-swap here."); 9131 SDLoc dl(Op); 9132 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 9133 EVT MemVT = AtomicNode->getMemoryVT(); 9134 if (MemVT.getSizeInBits() >= 32) 9135 return Op; 9136 9137 SDValue CmpOp = Op.getOperand(2); 9138 // If this is already correctly zero-extended, leave it alone. 9139 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 9140 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 9141 return Op; 9142 9143 // Clear the high bits of the compare operand. 9144 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 9145 SDValue NewCmpOp = 9146 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 9147 DAG.getConstant(MaskVal, dl, MVT::i32)); 9148 9149 // Replace the existing compare operand with the properly zero-extended one. 9150 SmallVector<SDValue, 4> Ops; 9151 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 9152 Ops.push_back(AtomicNode->getOperand(i)); 9153 Ops[2] = NewCmpOp; 9154 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 9155 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 9156 auto NodeTy = 9157 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 9158 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 9159 } 9160 9161 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 9162 SelectionDAG &DAG) const { 9163 SDLoc dl(Op); 9164 // Create a stack slot that is 16-byte aligned. 9165 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9166 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9167 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9168 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9169 9170 // Store the input value into Value#0 of the stack slot. 9171 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 9172 MachinePointerInfo()); 9173 // Load it out. 9174 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 9175 } 9176 9177 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 9178 SelectionDAG &DAG) const { 9179 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 9180 "Should only be called for ISD::INSERT_VECTOR_ELT"); 9181 9182 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 9183 // We have legal lowering for constant indices but not for variable ones. 9184 if (!C) 9185 return SDValue(); 9186 9187 EVT VT = Op.getValueType(); 9188 SDLoc dl(Op); 9189 SDValue V1 = Op.getOperand(0); 9190 SDValue V2 = Op.getOperand(1); 9191 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 9192 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 9193 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 9194 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 9195 unsigned InsertAtElement = C->getZExtValue(); 9196 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 9197 if (Subtarget.isLittleEndian()) { 9198 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 9199 } 9200 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 9201 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9202 } 9203 return Op; 9204 } 9205 9206 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 9207 SelectionDAG &DAG) const { 9208 SDLoc dl(Op); 9209 SDNode *N = Op.getNode(); 9210 9211 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 9212 "Unknown extract_vector_elt type"); 9213 9214 SDValue Value = N->getOperand(0); 9215 9216 // The first part of this is like the store lowering except that we don't 9217 // need to track the chain. 9218 9219 // The values are now known to be -1 (false) or 1 (true). To convert this 9220 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9221 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9222 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9223 9224 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9225 // understand how to form the extending load. 9226 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9227 9228 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9229 9230 // Now convert to an integer and store. 9231 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9232 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9233 Value); 9234 9235 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9236 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9237 MachinePointerInfo PtrInfo = 9238 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9239 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9240 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9241 9242 SDValue StoreChain = DAG.getEntryNode(); 9243 SDValue Ops[] = {StoreChain, 9244 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9245 Value, FIdx}; 9246 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9247 9248 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9249 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9250 9251 // Extract the value requested. 9252 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9253 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9254 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9255 9256 SDValue IntVal = 9257 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 9258 9259 if (!Subtarget.useCRBits()) 9260 return IntVal; 9261 9262 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 9263 } 9264 9265 /// Lowering for QPX v4i1 loads 9266 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 9267 SelectionDAG &DAG) const { 9268 SDLoc dl(Op); 9269 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 9270 SDValue LoadChain = LN->getChain(); 9271 SDValue BasePtr = LN->getBasePtr(); 9272 9273 if (Op.getValueType() == MVT::v4f64 || 9274 Op.getValueType() == MVT::v4f32) { 9275 EVT MemVT = LN->getMemoryVT(); 9276 unsigned Alignment = LN->getAlignment(); 9277 9278 // If this load is properly aligned, then it is legal. 9279 if (Alignment >= MemVT.getStoreSize()) 9280 return Op; 9281 9282 EVT ScalarVT = Op.getValueType().getScalarType(), 9283 ScalarMemVT = MemVT.getScalarType(); 9284 unsigned Stride = ScalarMemVT.getStoreSize(); 9285 9286 SDValue Vals[4], LoadChains[4]; 9287 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9288 SDValue Load; 9289 if (ScalarVT != ScalarMemVT) 9290 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 9291 BasePtr, 9292 LN->getPointerInfo().getWithOffset(Idx * Stride), 9293 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9294 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9295 else 9296 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 9297 LN->getPointerInfo().getWithOffset(Idx * Stride), 9298 MinAlign(Alignment, Idx * Stride), 9299 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9300 9301 if (Idx == 0 && LN->isIndexed()) { 9302 assert(LN->getAddressingMode() == ISD::PRE_INC && 9303 "Unknown addressing mode on vector load"); 9304 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 9305 LN->getAddressingMode()); 9306 } 9307 9308 Vals[Idx] = Load; 9309 LoadChains[Idx] = Load.getValue(1); 9310 9311 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9312 DAG.getConstant(Stride, dl, 9313 BasePtr.getValueType())); 9314 } 9315 9316 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9317 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 9318 9319 if (LN->isIndexed()) { 9320 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 9321 return DAG.getMergeValues(RetOps, dl); 9322 } 9323 9324 SDValue RetOps[] = { Value, TF }; 9325 return DAG.getMergeValues(RetOps, dl); 9326 } 9327 9328 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 9329 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 9330 9331 // To lower v4i1 from a byte array, we load the byte elements of the 9332 // vector and then reuse the BUILD_VECTOR logic. 9333 9334 SDValue VectElmts[4], VectElmtChains[4]; 9335 for (unsigned i = 0; i < 4; ++i) { 9336 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9337 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9338 9339 VectElmts[i] = DAG.getExtLoad( 9340 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 9341 LN->getPointerInfo().getWithOffset(i), MVT::i8, 9342 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9343 VectElmtChains[i] = VectElmts[i].getValue(1); 9344 } 9345 9346 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 9347 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 9348 9349 SDValue RVals[] = { Value, LoadChain }; 9350 return DAG.getMergeValues(RVals, dl); 9351 } 9352 9353 /// Lowering for QPX v4i1 stores 9354 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 9355 SelectionDAG &DAG) const { 9356 SDLoc dl(Op); 9357 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 9358 SDValue StoreChain = SN->getChain(); 9359 SDValue BasePtr = SN->getBasePtr(); 9360 SDValue Value = SN->getValue(); 9361 9362 if (Value.getValueType() == MVT::v4f64 || 9363 Value.getValueType() == MVT::v4f32) { 9364 EVT MemVT = SN->getMemoryVT(); 9365 unsigned Alignment = SN->getAlignment(); 9366 9367 // If this store is properly aligned, then it is legal. 9368 if (Alignment >= MemVT.getStoreSize()) 9369 return Op; 9370 9371 EVT ScalarVT = Value.getValueType().getScalarType(), 9372 ScalarMemVT = MemVT.getScalarType(); 9373 unsigned Stride = ScalarMemVT.getStoreSize(); 9374 9375 SDValue Stores[4]; 9376 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9377 SDValue Ex = DAG.getNode( 9378 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 9379 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 9380 SDValue Store; 9381 if (ScalarVT != ScalarMemVT) 9382 Store = 9383 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 9384 SN->getPointerInfo().getWithOffset(Idx * Stride), 9385 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9386 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9387 else 9388 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 9389 SN->getPointerInfo().getWithOffset(Idx * Stride), 9390 MinAlign(Alignment, Idx * Stride), 9391 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9392 9393 if (Idx == 0 && SN->isIndexed()) { 9394 assert(SN->getAddressingMode() == ISD::PRE_INC && 9395 "Unknown addressing mode on vector store"); 9396 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 9397 SN->getAddressingMode()); 9398 } 9399 9400 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9401 DAG.getConstant(Stride, dl, 9402 BasePtr.getValueType())); 9403 Stores[Idx] = Store; 9404 } 9405 9406 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9407 9408 if (SN->isIndexed()) { 9409 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 9410 return DAG.getMergeValues(RetOps, dl); 9411 } 9412 9413 return TF; 9414 } 9415 9416 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 9417 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 9418 9419 // The values are now known to be -1 (false) or 1 (true). To convert this 9420 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9421 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9422 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9423 9424 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9425 // understand how to form the extending load. 9426 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9427 9428 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9429 9430 // Now convert to an integer and store. 9431 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9432 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9433 Value); 9434 9435 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9436 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9437 MachinePointerInfo PtrInfo = 9438 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9439 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9440 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9441 9442 SDValue Ops[] = {StoreChain, 9443 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9444 Value, FIdx}; 9445 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9446 9447 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9448 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9449 9450 // Move data into the byte array. 9451 SDValue Loads[4], LoadChains[4]; 9452 for (unsigned i = 0; i < 4; ++i) { 9453 unsigned Offset = 4*i; 9454 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9455 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9456 9457 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 9458 PtrInfo.getWithOffset(Offset)); 9459 LoadChains[i] = Loads[i].getValue(1); 9460 } 9461 9462 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9463 9464 SDValue Stores[4]; 9465 for (unsigned i = 0; i < 4; ++i) { 9466 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9467 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9468 9469 Stores[i] = DAG.getTruncStore( 9470 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 9471 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 9472 SN->getAAInfo()); 9473 } 9474 9475 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9476 9477 return StoreChain; 9478 } 9479 9480 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 9481 SDLoc dl(Op); 9482 if (Op.getValueType() == MVT::v4i32) { 9483 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9484 9485 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 9486 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 9487 9488 SDValue RHSSwap = // = vrlw RHS, 16 9489 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 9490 9491 // Shrinkify inputs to v8i16. 9492 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 9493 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 9494 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 9495 9496 // Low parts multiplied together, generating 32-bit results (we ignore the 9497 // top parts). 9498 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 9499 LHS, RHS, DAG, dl, MVT::v4i32); 9500 9501 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 9502 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 9503 // Shift the high parts up 16 bits. 9504 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 9505 Neg16, DAG, dl); 9506 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 9507 } else if (Op.getValueType() == MVT::v8i16) { 9508 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9509 9510 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 9511 9512 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 9513 LHS, RHS, Zero, DAG, dl); 9514 } else if (Op.getValueType() == MVT::v16i8) { 9515 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9516 bool isLittleEndian = Subtarget.isLittleEndian(); 9517 9518 // Multiply the even 8-bit parts, producing 16-bit sums. 9519 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 9520 LHS, RHS, DAG, dl, MVT::v8i16); 9521 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 9522 9523 // Multiply the odd 8-bit parts, producing 16-bit sums. 9524 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 9525 LHS, RHS, DAG, dl, MVT::v8i16); 9526 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 9527 9528 // Merge the results together. Because vmuleub and vmuloub are 9529 // instructions with a big-endian bias, we must reverse the 9530 // element numbering and reverse the meaning of "odd" and "even" 9531 // when generating little endian code. 9532 int Ops[16]; 9533 for (unsigned i = 0; i != 8; ++i) { 9534 if (isLittleEndian) { 9535 Ops[i*2 ] = 2*i; 9536 Ops[i*2+1] = 2*i+16; 9537 } else { 9538 Ops[i*2 ] = 2*i+1; 9539 Ops[i*2+1] = 2*i+1+16; 9540 } 9541 } 9542 if (isLittleEndian) 9543 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 9544 else 9545 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 9546 } else { 9547 llvm_unreachable("Unknown mul to lower!"); 9548 } 9549 } 9550 9551 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { 9552 9553 assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS"); 9554 9555 EVT VT = Op.getValueType(); 9556 assert(VT.isVector() && 9557 "Only set vector abs as custom, scalar abs shouldn't reach here!"); 9558 assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 9559 VT == MVT::v16i8) && 9560 "Unexpected vector element type!"); 9561 assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) && 9562 "Current subtarget doesn't support smax v2i64!"); 9563 9564 // For vector abs, it can be lowered to: 9565 // abs x 9566 // ==> 9567 // y = -x 9568 // smax(x, y) 9569 9570 SDLoc dl(Op); 9571 SDValue X = Op.getOperand(0); 9572 SDValue Zero = DAG.getConstant(0, dl, VT); 9573 SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X); 9574 9575 // SMAX patch https://reviews.llvm.org/D47332 9576 // hasn't landed yet, so use intrinsic first here. 9577 // TODO: Should use SMAX directly once SMAX patch landed 9578 Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw; 9579 if (VT == MVT::v2i64) 9580 BifID = Intrinsic::ppc_altivec_vmaxsd; 9581 else if (VT == MVT::v8i16) 9582 BifID = Intrinsic::ppc_altivec_vmaxsh; 9583 else if (VT == MVT::v16i8) 9584 BifID = Intrinsic::ppc_altivec_vmaxsb; 9585 9586 return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT); 9587 } 9588 9589 /// LowerOperation - Provide custom lowering hooks for some operations. 9590 /// 9591 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9592 switch (Op.getOpcode()) { 9593 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 9594 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 9595 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 9596 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 9597 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 9598 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 9599 case ISD::SETCC: return LowerSETCC(Op, DAG); 9600 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 9601 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 9602 9603 // Variable argument lowering. 9604 case ISD::VASTART: return LowerVASTART(Op, DAG); 9605 case ISD::VAARG: return LowerVAARG(Op, DAG); 9606 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 9607 9608 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 9609 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 9610 case ISD::GET_DYNAMIC_AREA_OFFSET: 9611 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 9612 9613 // Exception handling lowering. 9614 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 9615 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 9616 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 9617 9618 case ISD::LOAD: return LowerLOAD(Op, DAG); 9619 case ISD::STORE: return LowerSTORE(Op, DAG); 9620 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 9621 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 9622 case ISD::FP_TO_UINT: 9623 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 9624 case ISD::UINT_TO_FP: 9625 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 9626 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9627 9628 // Lower 64-bit shifts. 9629 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 9630 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 9631 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 9632 9633 // Vector-related lowering. 9634 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 9635 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 9636 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 9637 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 9638 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 9639 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9640 case ISD::MUL: return LowerMUL(Op, DAG); 9641 case ISD::ABS: return LowerABS(Op, DAG); 9642 9643 // For counter-based loop handling. 9644 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 9645 9646 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 9647 9648 // Frame & Return address. 9649 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9650 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9651 9652 case ISD::INTRINSIC_VOID: 9653 return LowerINTRINSIC_VOID(Op, DAG); 9654 case ISD::SREM: 9655 case ISD::UREM: 9656 return LowerREM(Op, DAG); 9657 case ISD::BSWAP: 9658 return LowerBSWAP(Op, DAG); 9659 case ISD::ATOMIC_CMP_SWAP: 9660 return LowerATOMIC_CMP_SWAP(Op, DAG); 9661 } 9662 } 9663 9664 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 9665 SmallVectorImpl<SDValue>&Results, 9666 SelectionDAG &DAG) const { 9667 SDLoc dl(N); 9668 switch (N->getOpcode()) { 9669 default: 9670 llvm_unreachable("Do not know how to custom type legalize this operation!"); 9671 case ISD::READCYCLECOUNTER: { 9672 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 9673 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 9674 9675 Results.push_back(RTB); 9676 Results.push_back(RTB.getValue(1)); 9677 Results.push_back(RTB.getValue(2)); 9678 break; 9679 } 9680 case ISD::INTRINSIC_W_CHAIN: { 9681 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 9682 Intrinsic::ppc_is_decremented_ctr_nonzero) 9683 break; 9684 9685 assert(N->getValueType(0) == MVT::i1 && 9686 "Unexpected result type for CTR decrement intrinsic"); 9687 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 9688 N->getValueType(0)); 9689 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 9690 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 9691 N->getOperand(1)); 9692 9693 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 9694 Results.push_back(NewInt.getValue(1)); 9695 break; 9696 } 9697 case ISD::VAARG: { 9698 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 9699 return; 9700 9701 EVT VT = N->getValueType(0); 9702 9703 if (VT == MVT::i64) { 9704 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 9705 9706 Results.push_back(NewNode); 9707 Results.push_back(NewNode.getValue(1)); 9708 } 9709 return; 9710 } 9711 case ISD::FP_TO_SINT: 9712 case ISD::FP_TO_UINT: 9713 // LowerFP_TO_INT() can only handle f32 and f64. 9714 if (N->getOperand(0).getValueType() == MVT::ppcf128) 9715 return; 9716 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 9717 return; 9718 case ISD::BITCAST: 9719 // Don't handle bitcast here. 9720 return; 9721 } 9722 } 9723 9724 //===----------------------------------------------------------------------===// 9725 // Other Lowering Code 9726 //===----------------------------------------------------------------------===// 9727 9728 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 9729 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 9730 Function *Func = Intrinsic::getDeclaration(M, Id); 9731 return Builder.CreateCall(Func, {}); 9732 } 9733 9734 // The mappings for emitLeading/TrailingFence is taken from 9735 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 9736 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 9737 Instruction *Inst, 9738 AtomicOrdering Ord) const { 9739 if (Ord == AtomicOrdering::SequentiallyConsistent) 9740 return callIntrinsic(Builder, Intrinsic::ppc_sync); 9741 if (isReleaseOrStronger(Ord)) 9742 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9743 return nullptr; 9744 } 9745 9746 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 9747 Instruction *Inst, 9748 AtomicOrdering Ord) const { 9749 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 9750 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 9751 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 9752 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 9753 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 9754 return Builder.CreateCall( 9755 Intrinsic::getDeclaration( 9756 Builder.GetInsertBlock()->getParent()->getParent(), 9757 Intrinsic::ppc_cfence, {Inst->getType()}), 9758 {Inst}); 9759 // FIXME: Can use isync for rmw operation. 9760 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9761 } 9762 return nullptr; 9763 } 9764 9765 MachineBasicBlock * 9766 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 9767 unsigned AtomicSize, 9768 unsigned BinOpcode, 9769 unsigned CmpOpcode, 9770 unsigned CmpPred) const { 9771 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9772 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9773 9774 auto LoadMnemonic = PPC::LDARX; 9775 auto StoreMnemonic = PPC::STDCX; 9776 switch (AtomicSize) { 9777 default: 9778 llvm_unreachable("Unexpected size of atomic entity"); 9779 case 1: 9780 LoadMnemonic = PPC::LBARX; 9781 StoreMnemonic = PPC::STBCX; 9782 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9783 break; 9784 case 2: 9785 LoadMnemonic = PPC::LHARX; 9786 StoreMnemonic = PPC::STHCX; 9787 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9788 break; 9789 case 4: 9790 LoadMnemonic = PPC::LWARX; 9791 StoreMnemonic = PPC::STWCX; 9792 break; 9793 case 8: 9794 LoadMnemonic = PPC::LDARX; 9795 StoreMnemonic = PPC::STDCX; 9796 break; 9797 } 9798 9799 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9800 MachineFunction *F = BB->getParent(); 9801 MachineFunction::iterator It = ++BB->getIterator(); 9802 9803 unsigned dest = MI.getOperand(0).getReg(); 9804 unsigned ptrA = MI.getOperand(1).getReg(); 9805 unsigned ptrB = MI.getOperand(2).getReg(); 9806 unsigned incr = MI.getOperand(3).getReg(); 9807 DebugLoc dl = MI.getDebugLoc(); 9808 9809 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9810 MachineBasicBlock *loop2MBB = 9811 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9812 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9813 F->insert(It, loopMBB); 9814 if (CmpOpcode) 9815 F->insert(It, loop2MBB); 9816 F->insert(It, exitMBB); 9817 exitMBB->splice(exitMBB->begin(), BB, 9818 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9819 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9820 9821 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9822 unsigned TmpReg = (!BinOpcode) ? incr : 9823 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 9824 : &PPC::GPRCRegClass); 9825 9826 // thisMBB: 9827 // ... 9828 // fallthrough --> loopMBB 9829 BB->addSuccessor(loopMBB); 9830 9831 // loopMBB: 9832 // l[wd]arx dest, ptr 9833 // add r0, dest, incr 9834 // st[wd]cx. r0, ptr 9835 // bne- loopMBB 9836 // fallthrough --> exitMBB 9837 9838 // For max/min... 9839 // loopMBB: 9840 // l[wd]arx dest, ptr 9841 // cmpl?[wd] incr, dest 9842 // bgt exitMBB 9843 // loop2MBB: 9844 // st[wd]cx. dest, ptr 9845 // bne- loopMBB 9846 // fallthrough --> exitMBB 9847 9848 BB = loopMBB; 9849 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9850 .addReg(ptrA).addReg(ptrB); 9851 if (BinOpcode) 9852 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 9853 if (CmpOpcode) { 9854 // Signed comparisons of byte or halfword values must be sign-extended. 9855 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 9856 unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9857 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 9858 ExtReg).addReg(dest); 9859 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9860 .addReg(incr).addReg(ExtReg); 9861 } else 9862 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9863 .addReg(incr).addReg(dest); 9864 9865 BuildMI(BB, dl, TII->get(PPC::BCC)) 9866 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9867 BB->addSuccessor(loop2MBB); 9868 BB->addSuccessor(exitMBB); 9869 BB = loop2MBB; 9870 } 9871 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9872 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 9873 BuildMI(BB, dl, TII->get(PPC::BCC)) 9874 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9875 BB->addSuccessor(loopMBB); 9876 BB->addSuccessor(exitMBB); 9877 9878 // exitMBB: 9879 // ... 9880 BB = exitMBB; 9881 return BB; 9882 } 9883 9884 MachineBasicBlock * 9885 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 9886 MachineBasicBlock *BB, 9887 bool is8bit, // operation 9888 unsigned BinOpcode, 9889 unsigned CmpOpcode, 9890 unsigned CmpPred) const { 9891 // If we support part-word atomic mnemonics, just use them 9892 if (Subtarget.hasPartwordAtomics()) 9893 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, 9894 CmpOpcode, CmpPred); 9895 9896 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9897 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9898 // In 64 bit mode we have to use 64 bits for addresses, even though the 9899 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 9900 // registers without caring whether they're 32 or 64, but here we're 9901 // doing actual arithmetic on the addresses. 9902 bool is64bit = Subtarget.isPPC64(); 9903 bool isLittleEndian = Subtarget.isLittleEndian(); 9904 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9905 9906 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9907 MachineFunction *F = BB->getParent(); 9908 MachineFunction::iterator It = ++BB->getIterator(); 9909 9910 unsigned dest = MI.getOperand(0).getReg(); 9911 unsigned ptrA = MI.getOperand(1).getReg(); 9912 unsigned ptrB = MI.getOperand(2).getReg(); 9913 unsigned incr = MI.getOperand(3).getReg(); 9914 DebugLoc dl = MI.getDebugLoc(); 9915 9916 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9917 MachineBasicBlock *loop2MBB = 9918 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9919 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9920 F->insert(It, loopMBB); 9921 if (CmpOpcode) 9922 F->insert(It, loop2MBB); 9923 F->insert(It, exitMBB); 9924 exitMBB->splice(exitMBB->begin(), BB, 9925 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9926 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9927 9928 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9929 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9930 : &PPC::GPRCRegClass; 9931 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9932 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9933 unsigned ShiftReg = 9934 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9935 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 9936 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9937 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9938 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9939 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9940 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 9941 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9942 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9943 unsigned Ptr1Reg; 9944 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 9945 9946 // thisMBB: 9947 // ... 9948 // fallthrough --> loopMBB 9949 BB->addSuccessor(loopMBB); 9950 9951 // The 4-byte load must be aligned, while a char or short may be 9952 // anywhere in the word. Hence all this nasty bookkeeping code. 9953 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9954 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9955 // xori shift, shift1, 24 [16] 9956 // rlwinm ptr, ptr1, 0, 0, 29 9957 // slw incr2, incr, shift 9958 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9959 // slw mask, mask2, shift 9960 // loopMBB: 9961 // lwarx tmpDest, ptr 9962 // add tmp, tmpDest, incr2 9963 // andc tmp2, tmpDest, mask 9964 // and tmp3, tmp, mask 9965 // or tmp4, tmp3, tmp2 9966 // stwcx. tmp4, ptr 9967 // bne- loopMBB 9968 // fallthrough --> exitMBB 9969 // srw dest, tmpDest, shift 9970 if (ptrA != ZeroReg) { 9971 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9972 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9973 .addReg(ptrA).addReg(ptrB); 9974 } else { 9975 Ptr1Reg = ptrB; 9976 } 9977 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9978 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9979 if (!isLittleEndian) 9980 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9981 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9982 if (is64bit) 9983 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9984 .addReg(Ptr1Reg).addImm(0).addImm(61); 9985 else 9986 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9987 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9988 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 9989 .addReg(incr).addReg(ShiftReg); 9990 if (is8bit) 9991 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9992 else { 9993 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9994 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 9995 } 9996 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9997 .addReg(Mask2Reg).addReg(ShiftReg); 9998 9999 BB = loopMBB; 10000 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10001 .addReg(ZeroReg).addReg(PtrReg); 10002 if (BinOpcode) 10003 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 10004 .addReg(Incr2Reg).addReg(TmpDestReg); 10005 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 10006 .addReg(TmpDestReg).addReg(MaskReg); 10007 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 10008 .addReg(TmpReg).addReg(MaskReg); 10009 if (CmpOpcode) { 10010 // For unsigned comparisons, we can directly compare the shifted values. 10011 // For signed comparisons we shift and sign extend. 10012 unsigned SReg = RegInfo.createVirtualRegister(RC); 10013 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg) 10014 .addReg(TmpDestReg).addReg(MaskReg); 10015 unsigned ValueReg = SReg; 10016 unsigned CmpReg = Incr2Reg; 10017 if (CmpOpcode == PPC::CMPW) { 10018 ValueReg = RegInfo.createVirtualRegister(RC); 10019 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 10020 .addReg(SReg).addReg(ShiftReg); 10021 unsigned ValueSReg = RegInfo.createVirtualRegister(RC); 10022 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 10023 .addReg(ValueReg); 10024 ValueReg = ValueSReg; 10025 CmpReg = incr; 10026 } 10027 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10028 .addReg(CmpReg).addReg(ValueReg); 10029 BuildMI(BB, dl, TII->get(PPC::BCC)) 10030 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 10031 BB->addSuccessor(loop2MBB); 10032 BB->addSuccessor(exitMBB); 10033 BB = loop2MBB; 10034 } 10035 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 10036 .addReg(Tmp3Reg).addReg(Tmp2Reg); 10037 BuildMI(BB, dl, TII->get(PPC::STWCX)) 10038 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 10039 BuildMI(BB, dl, TII->get(PPC::BCC)) 10040 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 10041 BB->addSuccessor(loopMBB); 10042 BB->addSuccessor(exitMBB); 10043 10044 // exitMBB: 10045 // ... 10046 BB = exitMBB; 10047 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 10048 .addReg(ShiftReg); 10049 return BB; 10050 } 10051 10052 llvm::MachineBasicBlock * 10053 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 10054 MachineBasicBlock *MBB) const { 10055 DebugLoc DL = MI.getDebugLoc(); 10056 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10057 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 10058 10059 MachineFunction *MF = MBB->getParent(); 10060 MachineRegisterInfo &MRI = MF->getRegInfo(); 10061 10062 const BasicBlock *BB = MBB->getBasicBlock(); 10063 MachineFunction::iterator I = ++MBB->getIterator(); 10064 10065 unsigned DstReg = MI.getOperand(0).getReg(); 10066 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 10067 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 10068 unsigned mainDstReg = MRI.createVirtualRegister(RC); 10069 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 10070 10071 MVT PVT = getPointerTy(MF->getDataLayout()); 10072 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10073 "Invalid Pointer Size!"); 10074 // For v = setjmp(buf), we generate 10075 // 10076 // thisMBB: 10077 // SjLjSetup mainMBB 10078 // bl mainMBB 10079 // v_restore = 1 10080 // b sinkMBB 10081 // 10082 // mainMBB: 10083 // buf[LabelOffset] = LR 10084 // v_main = 0 10085 // 10086 // sinkMBB: 10087 // v = phi(main, restore) 10088 // 10089 10090 MachineBasicBlock *thisMBB = MBB; 10091 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 10092 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 10093 MF->insert(I, mainMBB); 10094 MF->insert(I, sinkMBB); 10095 10096 MachineInstrBuilder MIB; 10097 10098 // Transfer the remainder of BB and its successor edges to sinkMBB. 10099 sinkMBB->splice(sinkMBB->begin(), MBB, 10100 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 10101 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 10102 10103 // Note that the structure of the jmp_buf used here is not compatible 10104 // with that used by libc, and is not designed to be. Specifically, it 10105 // stores only those 'reserved' registers that LLVM does not otherwise 10106 // understand how to spill. Also, by convention, by the time this 10107 // intrinsic is called, Clang has already stored the frame address in the 10108 // first slot of the buffer and stack address in the third. Following the 10109 // X86 target code, we'll store the jump address in the second slot. We also 10110 // need to save the TOC pointer (R2) to handle jumps between shared 10111 // libraries, and that will be stored in the fourth slot. The thread 10112 // identifier (R13) is not affected. 10113 10114 // thisMBB: 10115 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10116 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10117 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10118 10119 // Prepare IP either in reg. 10120 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 10121 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 10122 unsigned BufReg = MI.getOperand(1).getReg(); 10123 10124 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 10125 setUsesTOCBasePtr(*MBB->getParent()); 10126 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 10127 .addReg(PPC::X2) 10128 .addImm(TOCOffset) 10129 .addReg(BufReg) 10130 .cloneMemRefs(MI); 10131 } 10132 10133 // Naked functions never have a base pointer, and so we use r1. For all 10134 // other functions, this decision must be delayed until during PEI. 10135 unsigned BaseReg; 10136 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 10137 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 10138 else 10139 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 10140 10141 MIB = BuildMI(*thisMBB, MI, DL, 10142 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 10143 .addReg(BaseReg) 10144 .addImm(BPOffset) 10145 .addReg(BufReg) 10146 .cloneMemRefs(MI); 10147 10148 // Setup 10149 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 10150 MIB.addRegMask(TRI->getNoPreservedMask()); 10151 10152 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 10153 10154 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 10155 .addMBB(mainMBB); 10156 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 10157 10158 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 10159 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 10160 10161 // mainMBB: 10162 // mainDstReg = 0 10163 MIB = 10164 BuildMI(mainMBB, DL, 10165 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 10166 10167 // Store IP 10168 if (Subtarget.isPPC64()) { 10169 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 10170 .addReg(LabelReg) 10171 .addImm(LabelOffset) 10172 .addReg(BufReg); 10173 } else { 10174 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 10175 .addReg(LabelReg) 10176 .addImm(LabelOffset) 10177 .addReg(BufReg); 10178 } 10179 MIB.cloneMemRefs(MI); 10180 10181 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 10182 mainMBB->addSuccessor(sinkMBB); 10183 10184 // sinkMBB: 10185 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 10186 TII->get(PPC::PHI), DstReg) 10187 .addReg(mainDstReg).addMBB(mainMBB) 10188 .addReg(restoreDstReg).addMBB(thisMBB); 10189 10190 MI.eraseFromParent(); 10191 return sinkMBB; 10192 } 10193 10194 MachineBasicBlock * 10195 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 10196 MachineBasicBlock *MBB) const { 10197 DebugLoc DL = MI.getDebugLoc(); 10198 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10199 10200 MachineFunction *MF = MBB->getParent(); 10201 MachineRegisterInfo &MRI = MF->getRegInfo(); 10202 10203 MVT PVT = getPointerTy(MF->getDataLayout()); 10204 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10205 "Invalid Pointer Size!"); 10206 10207 const TargetRegisterClass *RC = 10208 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10209 unsigned Tmp = MRI.createVirtualRegister(RC); 10210 // Since FP is only updated here but NOT referenced, it's treated as GPR. 10211 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 10212 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 10213 unsigned BP = 10214 (PVT == MVT::i64) 10215 ? PPC::X30 10216 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 10217 : PPC::R30); 10218 10219 MachineInstrBuilder MIB; 10220 10221 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10222 const int64_t SPOffset = 2 * PVT.getStoreSize(); 10223 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10224 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10225 10226 unsigned BufReg = MI.getOperand(0).getReg(); 10227 10228 // Reload FP (the jumped-to function may not have had a 10229 // frame pointer, and if so, then its r31 will be restored 10230 // as necessary). 10231 if (PVT == MVT::i64) { 10232 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 10233 .addImm(0) 10234 .addReg(BufReg); 10235 } else { 10236 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 10237 .addImm(0) 10238 .addReg(BufReg); 10239 } 10240 MIB.cloneMemRefs(MI); 10241 10242 // Reload IP 10243 if (PVT == MVT::i64) { 10244 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 10245 .addImm(LabelOffset) 10246 .addReg(BufReg); 10247 } else { 10248 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 10249 .addImm(LabelOffset) 10250 .addReg(BufReg); 10251 } 10252 MIB.cloneMemRefs(MI); 10253 10254 // Reload SP 10255 if (PVT == MVT::i64) { 10256 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 10257 .addImm(SPOffset) 10258 .addReg(BufReg); 10259 } else { 10260 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 10261 .addImm(SPOffset) 10262 .addReg(BufReg); 10263 } 10264 MIB.cloneMemRefs(MI); 10265 10266 // Reload BP 10267 if (PVT == MVT::i64) { 10268 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 10269 .addImm(BPOffset) 10270 .addReg(BufReg); 10271 } else { 10272 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 10273 .addImm(BPOffset) 10274 .addReg(BufReg); 10275 } 10276 MIB.cloneMemRefs(MI); 10277 10278 // Reload TOC 10279 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 10280 setUsesTOCBasePtr(*MBB->getParent()); 10281 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 10282 .addImm(TOCOffset) 10283 .addReg(BufReg) 10284 .cloneMemRefs(MI); 10285 } 10286 10287 // Jump 10288 BuildMI(*MBB, MI, DL, 10289 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 10290 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 10291 10292 MI.eraseFromParent(); 10293 return MBB; 10294 } 10295 10296 MachineBasicBlock * 10297 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 10298 MachineBasicBlock *BB) const { 10299 if (MI.getOpcode() == TargetOpcode::STACKMAP || 10300 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10301 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 10302 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10303 // Call lowering should have added an r2 operand to indicate a dependence 10304 // on the TOC base pointer value. It can't however, because there is no 10305 // way to mark the dependence as implicit there, and so the stackmap code 10306 // will confuse it with a regular operand. Instead, add the dependence 10307 // here. 10308 setUsesTOCBasePtr(*BB->getParent()); 10309 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 10310 } 10311 10312 return emitPatchPoint(MI, BB); 10313 } 10314 10315 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 10316 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 10317 return emitEHSjLjSetJmp(MI, BB); 10318 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 10319 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 10320 return emitEHSjLjLongJmp(MI, BB); 10321 } 10322 10323 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10324 10325 // To "insert" these instructions we actually have to insert their 10326 // control-flow patterns. 10327 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10328 MachineFunction::iterator It = ++BB->getIterator(); 10329 10330 MachineFunction *F = BB->getParent(); 10331 10332 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10333 MI.getOpcode() == PPC::SELECT_CC_I8 || 10334 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) { 10335 SmallVector<MachineOperand, 2> Cond; 10336 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10337 MI.getOpcode() == PPC::SELECT_CC_I8) 10338 Cond.push_back(MI.getOperand(4)); 10339 else 10340 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 10341 Cond.push_back(MI.getOperand(1)); 10342 10343 DebugLoc dl = MI.getDebugLoc(); 10344 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 10345 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 10346 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10347 MI.getOpcode() == PPC::SELECT_CC_I8 || 10348 MI.getOpcode() == PPC::SELECT_CC_F4 || 10349 MI.getOpcode() == PPC::SELECT_CC_F8 || 10350 MI.getOpcode() == PPC::SELECT_CC_F16 || 10351 MI.getOpcode() == PPC::SELECT_CC_QFRC || 10352 MI.getOpcode() == PPC::SELECT_CC_QSRC || 10353 MI.getOpcode() == PPC::SELECT_CC_QBRC || 10354 MI.getOpcode() == PPC::SELECT_CC_VRRC || 10355 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 10356 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 10357 MI.getOpcode() == PPC::SELECT_CC_VSRC || 10358 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 10359 MI.getOpcode() == PPC::SELECT_CC_SPE || 10360 MI.getOpcode() == PPC::SELECT_I4 || 10361 MI.getOpcode() == PPC::SELECT_I8 || 10362 MI.getOpcode() == PPC::SELECT_F4 || 10363 MI.getOpcode() == PPC::SELECT_F8 || 10364 MI.getOpcode() == PPC::SELECT_F16 || 10365 MI.getOpcode() == PPC::SELECT_QFRC || 10366 MI.getOpcode() == PPC::SELECT_QSRC || 10367 MI.getOpcode() == PPC::SELECT_QBRC || 10368 MI.getOpcode() == PPC::SELECT_SPE || 10369 MI.getOpcode() == PPC::SELECT_SPE4 || 10370 MI.getOpcode() == PPC::SELECT_VRRC || 10371 MI.getOpcode() == PPC::SELECT_VSFRC || 10372 MI.getOpcode() == PPC::SELECT_VSSRC || 10373 MI.getOpcode() == PPC::SELECT_VSRC) { 10374 // The incoming instruction knows the destination vreg to set, the 10375 // condition code register to branch on, the true/false values to 10376 // select between, and a branch opcode to use. 10377 10378 // thisMBB: 10379 // ... 10380 // TrueVal = ... 10381 // cmpTY ccX, r1, r2 10382 // bCC copy1MBB 10383 // fallthrough --> copy0MBB 10384 MachineBasicBlock *thisMBB = BB; 10385 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 10386 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10387 DebugLoc dl = MI.getDebugLoc(); 10388 F->insert(It, copy0MBB); 10389 F->insert(It, sinkMBB); 10390 10391 // Transfer the remainder of BB and its successor edges to sinkMBB. 10392 sinkMBB->splice(sinkMBB->begin(), BB, 10393 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10394 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10395 10396 // Next, add the true and fallthrough blocks as its successors. 10397 BB->addSuccessor(copy0MBB); 10398 BB->addSuccessor(sinkMBB); 10399 10400 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 10401 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 10402 MI.getOpcode() == PPC::SELECT_F16 || 10403 MI.getOpcode() == PPC::SELECT_SPE4 || 10404 MI.getOpcode() == PPC::SELECT_SPE || 10405 MI.getOpcode() == PPC::SELECT_QFRC || 10406 MI.getOpcode() == PPC::SELECT_QSRC || 10407 MI.getOpcode() == PPC::SELECT_QBRC || 10408 MI.getOpcode() == PPC::SELECT_VRRC || 10409 MI.getOpcode() == PPC::SELECT_VSFRC || 10410 MI.getOpcode() == PPC::SELECT_VSSRC || 10411 MI.getOpcode() == PPC::SELECT_VSRC) { 10412 BuildMI(BB, dl, TII->get(PPC::BC)) 10413 .addReg(MI.getOperand(1).getReg()) 10414 .addMBB(sinkMBB); 10415 } else { 10416 unsigned SelectPred = MI.getOperand(4).getImm(); 10417 BuildMI(BB, dl, TII->get(PPC::BCC)) 10418 .addImm(SelectPred) 10419 .addReg(MI.getOperand(1).getReg()) 10420 .addMBB(sinkMBB); 10421 } 10422 10423 // copy0MBB: 10424 // %FalseValue = ... 10425 // # fallthrough to sinkMBB 10426 BB = copy0MBB; 10427 10428 // Update machine-CFG edges 10429 BB->addSuccessor(sinkMBB); 10430 10431 // sinkMBB: 10432 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 10433 // ... 10434 BB = sinkMBB; 10435 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 10436 .addReg(MI.getOperand(3).getReg()) 10437 .addMBB(copy0MBB) 10438 .addReg(MI.getOperand(2).getReg()) 10439 .addMBB(thisMBB); 10440 } else if (MI.getOpcode() == PPC::ReadTB) { 10441 // To read the 64-bit time-base register on a 32-bit target, we read the 10442 // two halves. Should the counter have wrapped while it was being read, we 10443 // need to try again. 10444 // ... 10445 // readLoop: 10446 // mfspr Rx,TBU # load from TBU 10447 // mfspr Ry,TB # load from TB 10448 // mfspr Rz,TBU # load from TBU 10449 // cmpw crX,Rx,Rz # check if 'old'='new' 10450 // bne readLoop # branch if they're not equal 10451 // ... 10452 10453 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 10454 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10455 DebugLoc dl = MI.getDebugLoc(); 10456 F->insert(It, readMBB); 10457 F->insert(It, sinkMBB); 10458 10459 // Transfer the remainder of BB and its successor edges to sinkMBB. 10460 sinkMBB->splice(sinkMBB->begin(), BB, 10461 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10462 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10463 10464 BB->addSuccessor(readMBB); 10465 BB = readMBB; 10466 10467 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10468 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10469 unsigned LoReg = MI.getOperand(0).getReg(); 10470 unsigned HiReg = MI.getOperand(1).getReg(); 10471 10472 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 10473 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 10474 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 10475 10476 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10477 10478 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 10479 .addReg(HiReg).addReg(ReadAgainReg); 10480 BuildMI(BB, dl, TII->get(PPC::BCC)) 10481 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 10482 10483 BB->addSuccessor(readMBB); 10484 BB->addSuccessor(sinkMBB); 10485 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 10486 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 10487 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 10488 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 10489 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 10490 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 10491 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 10492 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 10493 10494 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 10495 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 10496 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 10497 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 10498 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 10499 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 10500 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 10501 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 10502 10503 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 10504 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 10505 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 10506 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 10507 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 10508 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 10509 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 10510 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 10511 10512 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 10513 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 10514 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 10515 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 10516 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 10517 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 10518 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 10519 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 10520 10521 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 10522 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 10523 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 10524 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 10525 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 10526 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 10527 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 10528 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 10529 10530 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 10531 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 10532 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 10533 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 10534 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 10535 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 10536 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 10537 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 10538 10539 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 10540 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 10541 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 10542 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 10543 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 10544 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 10545 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 10546 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 10547 10548 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 10549 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 10550 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 10551 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 10552 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 10553 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 10554 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 10555 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 10556 10557 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 10558 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 10559 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 10560 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 10561 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 10562 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 10563 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 10564 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 10565 10566 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 10567 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 10568 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 10569 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 10570 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 10571 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 10572 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 10573 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 10574 10575 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 10576 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 10577 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 10578 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 10579 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 10580 BB = EmitAtomicBinary(MI, BB, 4, 0); 10581 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 10582 BB = EmitAtomicBinary(MI, BB, 8, 0); 10583 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 10584 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 10585 (Subtarget.hasPartwordAtomics() && 10586 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 10587 (Subtarget.hasPartwordAtomics() && 10588 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 10589 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 10590 10591 auto LoadMnemonic = PPC::LDARX; 10592 auto StoreMnemonic = PPC::STDCX; 10593 switch (MI.getOpcode()) { 10594 default: 10595 llvm_unreachable("Compare and swap of unknown size"); 10596 case PPC::ATOMIC_CMP_SWAP_I8: 10597 LoadMnemonic = PPC::LBARX; 10598 StoreMnemonic = PPC::STBCX; 10599 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10600 break; 10601 case PPC::ATOMIC_CMP_SWAP_I16: 10602 LoadMnemonic = PPC::LHARX; 10603 StoreMnemonic = PPC::STHCX; 10604 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10605 break; 10606 case PPC::ATOMIC_CMP_SWAP_I32: 10607 LoadMnemonic = PPC::LWARX; 10608 StoreMnemonic = PPC::STWCX; 10609 break; 10610 case PPC::ATOMIC_CMP_SWAP_I64: 10611 LoadMnemonic = PPC::LDARX; 10612 StoreMnemonic = PPC::STDCX; 10613 break; 10614 } 10615 unsigned dest = MI.getOperand(0).getReg(); 10616 unsigned ptrA = MI.getOperand(1).getReg(); 10617 unsigned ptrB = MI.getOperand(2).getReg(); 10618 unsigned oldval = MI.getOperand(3).getReg(); 10619 unsigned newval = MI.getOperand(4).getReg(); 10620 DebugLoc dl = MI.getDebugLoc(); 10621 10622 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10623 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10624 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10625 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10626 F->insert(It, loop1MBB); 10627 F->insert(It, loop2MBB); 10628 F->insert(It, midMBB); 10629 F->insert(It, exitMBB); 10630 exitMBB->splice(exitMBB->begin(), BB, 10631 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10632 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10633 10634 // thisMBB: 10635 // ... 10636 // fallthrough --> loopMBB 10637 BB->addSuccessor(loop1MBB); 10638 10639 // loop1MBB: 10640 // l[bhwd]arx dest, ptr 10641 // cmp[wd] dest, oldval 10642 // bne- midMBB 10643 // loop2MBB: 10644 // st[bhwd]cx. newval, ptr 10645 // bne- loopMBB 10646 // b exitBB 10647 // midMBB: 10648 // st[bhwd]cx. dest, ptr 10649 // exitBB: 10650 BB = loop1MBB; 10651 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10652 .addReg(ptrA).addReg(ptrB); 10653 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 10654 .addReg(oldval).addReg(dest); 10655 BuildMI(BB, dl, TII->get(PPC::BCC)) 10656 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10657 BB->addSuccessor(loop2MBB); 10658 BB->addSuccessor(midMBB); 10659 10660 BB = loop2MBB; 10661 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10662 .addReg(newval).addReg(ptrA).addReg(ptrB); 10663 BuildMI(BB, dl, TII->get(PPC::BCC)) 10664 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10665 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10666 BB->addSuccessor(loop1MBB); 10667 BB->addSuccessor(exitMBB); 10668 10669 BB = midMBB; 10670 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10671 .addReg(dest).addReg(ptrA).addReg(ptrB); 10672 BB->addSuccessor(exitMBB); 10673 10674 // exitMBB: 10675 // ... 10676 BB = exitMBB; 10677 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 10678 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 10679 // We must use 64-bit registers for addresses when targeting 64-bit, 10680 // since we're actually doing arithmetic on them. Other registers 10681 // can be 32-bit. 10682 bool is64bit = Subtarget.isPPC64(); 10683 bool isLittleEndian = Subtarget.isLittleEndian(); 10684 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 10685 10686 unsigned dest = MI.getOperand(0).getReg(); 10687 unsigned ptrA = MI.getOperand(1).getReg(); 10688 unsigned ptrB = MI.getOperand(2).getReg(); 10689 unsigned oldval = MI.getOperand(3).getReg(); 10690 unsigned newval = MI.getOperand(4).getReg(); 10691 DebugLoc dl = MI.getDebugLoc(); 10692 10693 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10694 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10695 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10696 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10697 F->insert(It, loop1MBB); 10698 F->insert(It, loop2MBB); 10699 F->insert(It, midMBB); 10700 F->insert(It, exitMBB); 10701 exitMBB->splice(exitMBB->begin(), BB, 10702 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10703 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10704 10705 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10706 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 10707 : &PPC::GPRCRegClass; 10708 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 10709 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 10710 unsigned ShiftReg = 10711 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 10712 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 10713 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 10714 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 10715 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 10716 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 10717 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 10718 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 10719 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 10720 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 10721 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 10722 unsigned Ptr1Reg; 10723 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 10724 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10725 // thisMBB: 10726 // ... 10727 // fallthrough --> loopMBB 10728 BB->addSuccessor(loop1MBB); 10729 10730 // The 4-byte load must be aligned, while a char or short may be 10731 // anywhere in the word. Hence all this nasty bookkeeping code. 10732 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10733 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10734 // xori shift, shift1, 24 [16] 10735 // rlwinm ptr, ptr1, 0, 0, 29 10736 // slw newval2, newval, shift 10737 // slw oldval2, oldval,shift 10738 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10739 // slw mask, mask2, shift 10740 // and newval3, newval2, mask 10741 // and oldval3, oldval2, mask 10742 // loop1MBB: 10743 // lwarx tmpDest, ptr 10744 // and tmp, tmpDest, mask 10745 // cmpw tmp, oldval3 10746 // bne- midMBB 10747 // loop2MBB: 10748 // andc tmp2, tmpDest, mask 10749 // or tmp4, tmp2, newval3 10750 // stwcx. tmp4, ptr 10751 // bne- loop1MBB 10752 // b exitBB 10753 // midMBB: 10754 // stwcx. tmpDest, ptr 10755 // exitBB: 10756 // srw dest, tmpDest, shift 10757 if (ptrA != ZeroReg) { 10758 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10759 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10760 .addReg(ptrA).addReg(ptrB); 10761 } else { 10762 Ptr1Reg = ptrB; 10763 } 10764 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 10765 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 10766 if (!isLittleEndian) 10767 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 10768 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 10769 if (is64bit) 10770 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10771 .addReg(Ptr1Reg).addImm(0).addImm(61); 10772 else 10773 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10774 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 10775 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 10776 .addReg(newval).addReg(ShiftReg); 10777 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 10778 .addReg(oldval).addReg(ShiftReg); 10779 if (is8bit) 10780 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10781 else { 10782 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10783 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10784 .addReg(Mask3Reg).addImm(65535); 10785 } 10786 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10787 .addReg(Mask2Reg).addReg(ShiftReg); 10788 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 10789 .addReg(NewVal2Reg).addReg(MaskReg); 10790 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 10791 .addReg(OldVal2Reg).addReg(MaskReg); 10792 10793 BB = loop1MBB; 10794 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10795 .addReg(ZeroReg).addReg(PtrReg); 10796 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 10797 .addReg(TmpDestReg).addReg(MaskReg); 10798 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 10799 .addReg(TmpReg).addReg(OldVal3Reg); 10800 BuildMI(BB, dl, TII->get(PPC::BCC)) 10801 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10802 BB->addSuccessor(loop2MBB); 10803 BB->addSuccessor(midMBB); 10804 10805 BB = loop2MBB; 10806 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 10807 .addReg(TmpDestReg).addReg(MaskReg); 10808 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 10809 .addReg(Tmp2Reg).addReg(NewVal3Reg); 10810 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 10811 .addReg(ZeroReg).addReg(PtrReg); 10812 BuildMI(BB, dl, TII->get(PPC::BCC)) 10813 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10814 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10815 BB->addSuccessor(loop1MBB); 10816 BB->addSuccessor(exitMBB); 10817 10818 BB = midMBB; 10819 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 10820 .addReg(ZeroReg).addReg(PtrReg); 10821 BB->addSuccessor(exitMBB); 10822 10823 // exitMBB: 10824 // ... 10825 BB = exitMBB; 10826 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 10827 .addReg(ShiftReg); 10828 } else if (MI.getOpcode() == PPC::FADDrtz) { 10829 // This pseudo performs an FADD with rounding mode temporarily forced 10830 // to round-to-zero. We emit this via custom inserter since the FPSCR 10831 // is not modeled at the SelectionDAG level. 10832 unsigned Dest = MI.getOperand(0).getReg(); 10833 unsigned Src1 = MI.getOperand(1).getReg(); 10834 unsigned Src2 = MI.getOperand(2).getReg(); 10835 DebugLoc dl = MI.getDebugLoc(); 10836 10837 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10838 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 10839 10840 // Save FPSCR value. 10841 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 10842 10843 // Set rounding mode to round-to-zero. 10844 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 10845 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 10846 10847 // Perform addition. 10848 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 10849 10850 // Restore FPSCR value. 10851 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 10852 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10853 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 10854 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10855 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 10856 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10857 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 10858 ? PPC::ANDIo8 10859 : PPC::ANDIo; 10860 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10861 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 10862 10863 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10864 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 10865 &PPC::GPRCRegClass : 10866 &PPC::G8RCRegClass); 10867 10868 DebugLoc dl = MI.getDebugLoc(); 10869 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 10870 .addReg(MI.getOperand(1).getReg()) 10871 .addImm(1); 10872 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 10873 MI.getOperand(0).getReg()) 10874 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 10875 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 10876 DebugLoc Dl = MI.getDebugLoc(); 10877 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10878 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10879 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 10880 return BB; 10881 } else { 10882 llvm_unreachable("Unexpected instr type to insert"); 10883 } 10884 10885 MI.eraseFromParent(); // The pseudo instruction is gone now. 10886 return BB; 10887 } 10888 10889 //===----------------------------------------------------------------------===// 10890 // Target Optimization Hooks 10891 //===----------------------------------------------------------------------===// 10892 10893 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 10894 // For the estimates, convergence is quadratic, so we essentially double the 10895 // number of digits correct after every iteration. For both FRE and FRSQRTE, 10896 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 10897 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 10898 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 10899 if (VT.getScalarType() == MVT::f64) 10900 RefinementSteps++; 10901 return RefinementSteps; 10902 } 10903 10904 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 10905 int Enabled, int &RefinementSteps, 10906 bool &UseOneConstNR, 10907 bool Reciprocal) const { 10908 EVT VT = Operand.getValueType(); 10909 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 10910 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 10911 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10912 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10913 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10914 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10915 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10916 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10917 10918 UseOneConstNR = true; 10919 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 10920 } 10921 return SDValue(); 10922 } 10923 10924 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 10925 int Enabled, 10926 int &RefinementSteps) const { 10927 EVT VT = Operand.getValueType(); 10928 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 10929 (VT == MVT::f64 && Subtarget.hasFRE()) || 10930 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10931 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10932 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10933 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10934 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10935 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10936 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 10937 } 10938 return SDValue(); 10939 } 10940 10941 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 10942 // Note: This functionality is used only when unsafe-fp-math is enabled, and 10943 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 10944 // enabled for division), this functionality is redundant with the default 10945 // combiner logic (once the division -> reciprocal/multiply transformation 10946 // has taken place). As a result, this matters more for older cores than for 10947 // newer ones. 10948 10949 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 10950 // reciprocal if there are two or more FDIVs (for embedded cores with only 10951 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 10952 switch (Subtarget.getDarwinDirective()) { 10953 default: 10954 return 3; 10955 case PPC::DIR_440: 10956 case PPC::DIR_A2: 10957 case PPC::DIR_E500: 10958 case PPC::DIR_E500mc: 10959 case PPC::DIR_E5500: 10960 return 2; 10961 } 10962 } 10963 10964 // isConsecutiveLSLoc needs to work even if all adds have not yet been 10965 // collapsed, and so we need to look through chains of them. 10966 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 10967 int64_t& Offset, SelectionDAG &DAG) { 10968 if (DAG.isBaseWithConstantOffset(Loc)) { 10969 Base = Loc.getOperand(0); 10970 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 10971 10972 // The base might itself be a base plus an offset, and if so, accumulate 10973 // that as well. 10974 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 10975 } 10976 } 10977 10978 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 10979 unsigned Bytes, int Dist, 10980 SelectionDAG &DAG) { 10981 if (VT.getSizeInBits() / 8 != Bytes) 10982 return false; 10983 10984 SDValue BaseLoc = Base->getBasePtr(); 10985 if (Loc.getOpcode() == ISD::FrameIndex) { 10986 if (BaseLoc.getOpcode() != ISD::FrameIndex) 10987 return false; 10988 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10989 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 10990 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 10991 int FS = MFI.getObjectSize(FI); 10992 int BFS = MFI.getObjectSize(BFI); 10993 if (FS != BFS || FS != (int)Bytes) return false; 10994 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 10995 } 10996 10997 SDValue Base1 = Loc, Base2 = BaseLoc; 10998 int64_t Offset1 = 0, Offset2 = 0; 10999 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 11000 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 11001 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 11002 return true; 11003 11004 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11005 const GlobalValue *GV1 = nullptr; 11006 const GlobalValue *GV2 = nullptr; 11007 Offset1 = 0; 11008 Offset2 = 0; 11009 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 11010 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 11011 if (isGA1 && isGA2 && GV1 == GV2) 11012 return Offset1 == (Offset2 + Dist*Bytes); 11013 return false; 11014 } 11015 11016 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 11017 // not enforce equality of the chain operands. 11018 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 11019 unsigned Bytes, int Dist, 11020 SelectionDAG &DAG) { 11021 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 11022 EVT VT = LS->getMemoryVT(); 11023 SDValue Loc = LS->getBasePtr(); 11024 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 11025 } 11026 11027 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 11028 EVT VT; 11029 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11030 default: return false; 11031 case Intrinsic::ppc_qpx_qvlfd: 11032 case Intrinsic::ppc_qpx_qvlfda: 11033 VT = MVT::v4f64; 11034 break; 11035 case Intrinsic::ppc_qpx_qvlfs: 11036 case Intrinsic::ppc_qpx_qvlfsa: 11037 VT = MVT::v4f32; 11038 break; 11039 case Intrinsic::ppc_qpx_qvlfcd: 11040 case Intrinsic::ppc_qpx_qvlfcda: 11041 VT = MVT::v2f64; 11042 break; 11043 case Intrinsic::ppc_qpx_qvlfcs: 11044 case Intrinsic::ppc_qpx_qvlfcsa: 11045 VT = MVT::v2f32; 11046 break; 11047 case Intrinsic::ppc_qpx_qvlfiwa: 11048 case Intrinsic::ppc_qpx_qvlfiwz: 11049 case Intrinsic::ppc_altivec_lvx: 11050 case Intrinsic::ppc_altivec_lvxl: 11051 case Intrinsic::ppc_vsx_lxvw4x: 11052 case Intrinsic::ppc_vsx_lxvw4x_be: 11053 VT = MVT::v4i32; 11054 break; 11055 case Intrinsic::ppc_vsx_lxvd2x: 11056 case Intrinsic::ppc_vsx_lxvd2x_be: 11057 VT = MVT::v2f64; 11058 break; 11059 case Intrinsic::ppc_altivec_lvebx: 11060 VT = MVT::i8; 11061 break; 11062 case Intrinsic::ppc_altivec_lvehx: 11063 VT = MVT::i16; 11064 break; 11065 case Intrinsic::ppc_altivec_lvewx: 11066 VT = MVT::i32; 11067 break; 11068 } 11069 11070 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 11071 } 11072 11073 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 11074 EVT VT; 11075 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11076 default: return false; 11077 case Intrinsic::ppc_qpx_qvstfd: 11078 case Intrinsic::ppc_qpx_qvstfda: 11079 VT = MVT::v4f64; 11080 break; 11081 case Intrinsic::ppc_qpx_qvstfs: 11082 case Intrinsic::ppc_qpx_qvstfsa: 11083 VT = MVT::v4f32; 11084 break; 11085 case Intrinsic::ppc_qpx_qvstfcd: 11086 case Intrinsic::ppc_qpx_qvstfcda: 11087 VT = MVT::v2f64; 11088 break; 11089 case Intrinsic::ppc_qpx_qvstfcs: 11090 case Intrinsic::ppc_qpx_qvstfcsa: 11091 VT = MVT::v2f32; 11092 break; 11093 case Intrinsic::ppc_qpx_qvstfiw: 11094 case Intrinsic::ppc_qpx_qvstfiwa: 11095 case Intrinsic::ppc_altivec_stvx: 11096 case Intrinsic::ppc_altivec_stvxl: 11097 case Intrinsic::ppc_vsx_stxvw4x: 11098 VT = MVT::v4i32; 11099 break; 11100 case Intrinsic::ppc_vsx_stxvd2x: 11101 VT = MVT::v2f64; 11102 break; 11103 case Intrinsic::ppc_vsx_stxvw4x_be: 11104 VT = MVT::v4i32; 11105 break; 11106 case Intrinsic::ppc_vsx_stxvd2x_be: 11107 VT = MVT::v2f64; 11108 break; 11109 case Intrinsic::ppc_altivec_stvebx: 11110 VT = MVT::i8; 11111 break; 11112 case Intrinsic::ppc_altivec_stvehx: 11113 VT = MVT::i16; 11114 break; 11115 case Intrinsic::ppc_altivec_stvewx: 11116 VT = MVT::i32; 11117 break; 11118 } 11119 11120 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 11121 } 11122 11123 return false; 11124 } 11125 11126 // Return true is there is a nearyby consecutive load to the one provided 11127 // (regardless of alignment). We search up and down the chain, looking though 11128 // token factors and other loads (but nothing else). As a result, a true result 11129 // indicates that it is safe to create a new consecutive load adjacent to the 11130 // load provided. 11131 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 11132 SDValue Chain = LD->getChain(); 11133 EVT VT = LD->getMemoryVT(); 11134 11135 SmallSet<SDNode *, 16> LoadRoots; 11136 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 11137 SmallSet<SDNode *, 16> Visited; 11138 11139 // First, search up the chain, branching to follow all token-factor operands. 11140 // If we find a consecutive load, then we're done, otherwise, record all 11141 // nodes just above the top-level loads and token factors. 11142 while (!Queue.empty()) { 11143 SDNode *ChainNext = Queue.pop_back_val(); 11144 if (!Visited.insert(ChainNext).second) 11145 continue; 11146 11147 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 11148 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 11149 return true; 11150 11151 if (!Visited.count(ChainLD->getChain().getNode())) 11152 Queue.push_back(ChainLD->getChain().getNode()); 11153 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 11154 for (const SDUse &O : ChainNext->ops()) 11155 if (!Visited.count(O.getNode())) 11156 Queue.push_back(O.getNode()); 11157 } else 11158 LoadRoots.insert(ChainNext); 11159 } 11160 11161 // Second, search down the chain, starting from the top-level nodes recorded 11162 // in the first phase. These top-level nodes are the nodes just above all 11163 // loads and token factors. Starting with their uses, recursively look though 11164 // all loads (just the chain uses) and token factors to find a consecutive 11165 // load. 11166 Visited.clear(); 11167 Queue.clear(); 11168 11169 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 11170 IE = LoadRoots.end(); I != IE; ++I) { 11171 Queue.push_back(*I); 11172 11173 while (!Queue.empty()) { 11174 SDNode *LoadRoot = Queue.pop_back_val(); 11175 if (!Visited.insert(LoadRoot).second) 11176 continue; 11177 11178 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 11179 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 11180 return true; 11181 11182 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 11183 UE = LoadRoot->use_end(); UI != UE; ++UI) 11184 if (((isa<MemSDNode>(*UI) && 11185 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 11186 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 11187 Queue.push_back(*UI); 11188 } 11189 } 11190 11191 return false; 11192 } 11193 11194 /// This function is called when we have proved that a SETCC node can be replaced 11195 /// by subtraction (and other supporting instructions) so that the result of 11196 /// comparison is kept in a GPR instead of CR. This function is purely for 11197 /// codegen purposes and has some flags to guide the codegen process. 11198 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 11199 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 11200 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 11201 11202 // Zero extend the operands to the largest legal integer. Originally, they 11203 // must be of a strictly smaller size. 11204 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 11205 DAG.getConstant(Size, DL, MVT::i32)); 11206 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 11207 DAG.getConstant(Size, DL, MVT::i32)); 11208 11209 // Swap if needed. Depends on the condition code. 11210 if (Swap) 11211 std::swap(Op0, Op1); 11212 11213 // Subtract extended integers. 11214 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 11215 11216 // Move the sign bit to the least significant position and zero out the rest. 11217 // Now the least significant bit carries the result of original comparison. 11218 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 11219 DAG.getConstant(Size - 1, DL, MVT::i32)); 11220 auto Final = Shifted; 11221 11222 // Complement the result if needed. Based on the condition code. 11223 if (Complement) 11224 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 11225 DAG.getConstant(1, DL, MVT::i64)); 11226 11227 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 11228 } 11229 11230 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 11231 DAGCombinerInfo &DCI) const { 11232 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 11233 11234 SelectionDAG &DAG = DCI.DAG; 11235 SDLoc DL(N); 11236 11237 // Size of integers being compared has a critical role in the following 11238 // analysis, so we prefer to do this when all types are legal. 11239 if (!DCI.isAfterLegalizeDAG()) 11240 return SDValue(); 11241 11242 // If all users of SETCC extend its value to a legal integer type 11243 // then we replace SETCC with a subtraction 11244 for (SDNode::use_iterator UI = N->use_begin(), 11245 UE = N->use_end(); UI != UE; ++UI) { 11246 if (UI->getOpcode() != ISD::ZERO_EXTEND) 11247 return SDValue(); 11248 } 11249 11250 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 11251 auto OpSize = N->getOperand(0).getValueSizeInBits(); 11252 11253 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 11254 11255 if (OpSize < Size) { 11256 switch (CC) { 11257 default: break; 11258 case ISD::SETULT: 11259 return generateEquivalentSub(N, Size, false, false, DL, DAG); 11260 case ISD::SETULE: 11261 return generateEquivalentSub(N, Size, true, true, DL, DAG); 11262 case ISD::SETUGT: 11263 return generateEquivalentSub(N, Size, false, true, DL, DAG); 11264 case ISD::SETUGE: 11265 return generateEquivalentSub(N, Size, true, false, DL, DAG); 11266 } 11267 } 11268 11269 return SDValue(); 11270 } 11271 11272 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 11273 DAGCombinerInfo &DCI) const { 11274 SelectionDAG &DAG = DCI.DAG; 11275 SDLoc dl(N); 11276 11277 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 11278 // If we're tracking CR bits, we need to be careful that we don't have: 11279 // trunc(binary-ops(zext(x), zext(y))) 11280 // or 11281 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 11282 // such that we're unnecessarily moving things into GPRs when it would be 11283 // better to keep them in CR bits. 11284 11285 // Note that trunc here can be an actual i1 trunc, or can be the effective 11286 // truncation that comes from a setcc or select_cc. 11287 if (N->getOpcode() == ISD::TRUNCATE && 11288 N->getValueType(0) != MVT::i1) 11289 return SDValue(); 11290 11291 if (N->getOperand(0).getValueType() != MVT::i32 && 11292 N->getOperand(0).getValueType() != MVT::i64) 11293 return SDValue(); 11294 11295 if (N->getOpcode() == ISD::SETCC || 11296 N->getOpcode() == ISD::SELECT_CC) { 11297 // If we're looking at a comparison, then we need to make sure that the 11298 // high bits (all except for the first) don't matter the result. 11299 ISD::CondCode CC = 11300 cast<CondCodeSDNode>(N->getOperand( 11301 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 11302 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 11303 11304 if (ISD::isSignedIntSetCC(CC)) { 11305 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 11306 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 11307 return SDValue(); 11308 } else if (ISD::isUnsignedIntSetCC(CC)) { 11309 if (!DAG.MaskedValueIsZero(N->getOperand(0), 11310 APInt::getHighBitsSet(OpBits, OpBits-1)) || 11311 !DAG.MaskedValueIsZero(N->getOperand(1), 11312 APInt::getHighBitsSet(OpBits, OpBits-1))) 11313 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 11314 : SDValue()); 11315 } else { 11316 // This is neither a signed nor an unsigned comparison, just make sure 11317 // that the high bits are equal. 11318 KnownBits Op1Known, Op2Known; 11319 DAG.computeKnownBits(N->getOperand(0), Op1Known); 11320 DAG.computeKnownBits(N->getOperand(1), Op2Known); 11321 11322 // We don't really care about what is known about the first bit (if 11323 // anything), so clear it in all masks prior to comparing them. 11324 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 11325 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 11326 11327 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 11328 return SDValue(); 11329 } 11330 } 11331 11332 // We now know that the higher-order bits are irrelevant, we just need to 11333 // make sure that all of the intermediate operations are bit operations, and 11334 // all inputs are extensions. 11335 if (N->getOperand(0).getOpcode() != ISD::AND && 11336 N->getOperand(0).getOpcode() != ISD::OR && 11337 N->getOperand(0).getOpcode() != ISD::XOR && 11338 N->getOperand(0).getOpcode() != ISD::SELECT && 11339 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 11340 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 11341 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 11342 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 11343 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 11344 return SDValue(); 11345 11346 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 11347 N->getOperand(1).getOpcode() != ISD::AND && 11348 N->getOperand(1).getOpcode() != ISD::OR && 11349 N->getOperand(1).getOpcode() != ISD::XOR && 11350 N->getOperand(1).getOpcode() != ISD::SELECT && 11351 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 11352 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 11353 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 11354 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 11355 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 11356 return SDValue(); 11357 11358 SmallVector<SDValue, 4> Inputs; 11359 SmallVector<SDValue, 8> BinOps, PromOps; 11360 SmallPtrSet<SDNode *, 16> Visited; 11361 11362 for (unsigned i = 0; i < 2; ++i) { 11363 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11364 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11365 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11366 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11367 isa<ConstantSDNode>(N->getOperand(i))) 11368 Inputs.push_back(N->getOperand(i)); 11369 else 11370 BinOps.push_back(N->getOperand(i)); 11371 11372 if (N->getOpcode() == ISD::TRUNCATE) 11373 break; 11374 } 11375 11376 // Visit all inputs, collect all binary operations (and, or, xor and 11377 // select) that are all fed by extensions. 11378 while (!BinOps.empty()) { 11379 SDValue BinOp = BinOps.back(); 11380 BinOps.pop_back(); 11381 11382 if (!Visited.insert(BinOp.getNode()).second) 11383 continue; 11384 11385 PromOps.push_back(BinOp); 11386 11387 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11388 // The condition of the select is not promoted. 11389 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11390 continue; 11391 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11392 continue; 11393 11394 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11395 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11396 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11397 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11398 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11399 Inputs.push_back(BinOp.getOperand(i)); 11400 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11401 BinOp.getOperand(i).getOpcode() == ISD::OR || 11402 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11403 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11404 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 11405 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11406 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11407 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11408 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 11409 BinOps.push_back(BinOp.getOperand(i)); 11410 } else { 11411 // We have an input that is not an extension or another binary 11412 // operation; we'll abort this transformation. 11413 return SDValue(); 11414 } 11415 } 11416 } 11417 11418 // Make sure that this is a self-contained cluster of operations (which 11419 // is not quite the same thing as saying that everything has only one 11420 // use). 11421 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11422 if (isa<ConstantSDNode>(Inputs[i])) 11423 continue; 11424 11425 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11426 UE = Inputs[i].getNode()->use_end(); 11427 UI != UE; ++UI) { 11428 SDNode *User = *UI; 11429 if (User != N && !Visited.count(User)) 11430 return SDValue(); 11431 11432 // Make sure that we're not going to promote the non-output-value 11433 // operand(s) or SELECT or SELECT_CC. 11434 // FIXME: Although we could sometimes handle this, and it does occur in 11435 // practice that one of the condition inputs to the select is also one of 11436 // the outputs, we currently can't deal with this. 11437 if (User->getOpcode() == ISD::SELECT) { 11438 if (User->getOperand(0) == Inputs[i]) 11439 return SDValue(); 11440 } else if (User->getOpcode() == ISD::SELECT_CC) { 11441 if (User->getOperand(0) == Inputs[i] || 11442 User->getOperand(1) == Inputs[i]) 11443 return SDValue(); 11444 } 11445 } 11446 } 11447 11448 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11449 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11450 UE = PromOps[i].getNode()->use_end(); 11451 UI != UE; ++UI) { 11452 SDNode *User = *UI; 11453 if (User != N && !Visited.count(User)) 11454 return SDValue(); 11455 11456 // Make sure that we're not going to promote the non-output-value 11457 // operand(s) or SELECT or SELECT_CC. 11458 // FIXME: Although we could sometimes handle this, and it does occur in 11459 // practice that one of the condition inputs to the select is also one of 11460 // the outputs, we currently can't deal with this. 11461 if (User->getOpcode() == ISD::SELECT) { 11462 if (User->getOperand(0) == PromOps[i]) 11463 return SDValue(); 11464 } else if (User->getOpcode() == ISD::SELECT_CC) { 11465 if (User->getOperand(0) == PromOps[i] || 11466 User->getOperand(1) == PromOps[i]) 11467 return SDValue(); 11468 } 11469 } 11470 } 11471 11472 // Replace all inputs with the extension operand. 11473 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11474 // Constants may have users outside the cluster of to-be-promoted nodes, 11475 // and so we need to replace those as we do the promotions. 11476 if (isa<ConstantSDNode>(Inputs[i])) 11477 continue; 11478 else 11479 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 11480 } 11481 11482 std::list<HandleSDNode> PromOpHandles; 11483 for (auto &PromOp : PromOps) 11484 PromOpHandles.emplace_back(PromOp); 11485 11486 // Replace all operations (these are all the same, but have a different 11487 // (i1) return type). DAG.getNode will validate that the types of 11488 // a binary operator match, so go through the list in reverse so that 11489 // we've likely promoted both operands first. Any intermediate truncations or 11490 // extensions disappear. 11491 while (!PromOpHandles.empty()) { 11492 SDValue PromOp = PromOpHandles.back().getValue(); 11493 PromOpHandles.pop_back(); 11494 11495 if (PromOp.getOpcode() == ISD::TRUNCATE || 11496 PromOp.getOpcode() == ISD::SIGN_EXTEND || 11497 PromOp.getOpcode() == ISD::ZERO_EXTEND || 11498 PromOp.getOpcode() == ISD::ANY_EXTEND) { 11499 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 11500 PromOp.getOperand(0).getValueType() != MVT::i1) { 11501 // The operand is not yet ready (see comment below). 11502 PromOpHandles.emplace_front(PromOp); 11503 continue; 11504 } 11505 11506 SDValue RepValue = PromOp.getOperand(0); 11507 if (isa<ConstantSDNode>(RepValue)) 11508 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 11509 11510 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 11511 continue; 11512 } 11513 11514 unsigned C; 11515 switch (PromOp.getOpcode()) { 11516 default: C = 0; break; 11517 case ISD::SELECT: C = 1; break; 11518 case ISD::SELECT_CC: C = 2; break; 11519 } 11520 11521 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11522 PromOp.getOperand(C).getValueType() != MVT::i1) || 11523 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11524 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 11525 // The to-be-promoted operands of this node have not yet been 11526 // promoted (this should be rare because we're going through the 11527 // list backward, but if one of the operands has several users in 11528 // this cluster of to-be-promoted nodes, it is possible). 11529 PromOpHandles.emplace_front(PromOp); 11530 continue; 11531 } 11532 11533 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11534 PromOp.getNode()->op_end()); 11535 11536 // If there are any constant inputs, make sure they're replaced now. 11537 for (unsigned i = 0; i < 2; ++i) 11538 if (isa<ConstantSDNode>(Ops[C+i])) 11539 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 11540 11541 DAG.ReplaceAllUsesOfValueWith(PromOp, 11542 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 11543 } 11544 11545 // Now we're left with the initial truncation itself. 11546 if (N->getOpcode() == ISD::TRUNCATE) 11547 return N->getOperand(0); 11548 11549 // Otherwise, this is a comparison. The operands to be compared have just 11550 // changed type (to i1), but everything else is the same. 11551 return SDValue(N, 0); 11552 } 11553 11554 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 11555 DAGCombinerInfo &DCI) const { 11556 SelectionDAG &DAG = DCI.DAG; 11557 SDLoc dl(N); 11558 11559 // If we're tracking CR bits, we need to be careful that we don't have: 11560 // zext(binary-ops(trunc(x), trunc(y))) 11561 // or 11562 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 11563 // such that we're unnecessarily moving things into CR bits that can more 11564 // efficiently stay in GPRs. Note that if we're not certain that the high 11565 // bits are set as required by the final extension, we still may need to do 11566 // some masking to get the proper behavior. 11567 11568 // This same functionality is important on PPC64 when dealing with 11569 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 11570 // the return values of functions. Because it is so similar, it is handled 11571 // here as well. 11572 11573 if (N->getValueType(0) != MVT::i32 && 11574 N->getValueType(0) != MVT::i64) 11575 return SDValue(); 11576 11577 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 11578 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 11579 return SDValue(); 11580 11581 if (N->getOperand(0).getOpcode() != ISD::AND && 11582 N->getOperand(0).getOpcode() != ISD::OR && 11583 N->getOperand(0).getOpcode() != ISD::XOR && 11584 N->getOperand(0).getOpcode() != ISD::SELECT && 11585 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 11586 return SDValue(); 11587 11588 SmallVector<SDValue, 4> Inputs; 11589 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 11590 SmallPtrSet<SDNode *, 16> Visited; 11591 11592 // Visit all inputs, collect all binary operations (and, or, xor and 11593 // select) that are all fed by truncations. 11594 while (!BinOps.empty()) { 11595 SDValue BinOp = BinOps.back(); 11596 BinOps.pop_back(); 11597 11598 if (!Visited.insert(BinOp.getNode()).second) 11599 continue; 11600 11601 PromOps.push_back(BinOp); 11602 11603 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11604 // The condition of the select is not promoted. 11605 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11606 continue; 11607 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11608 continue; 11609 11610 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11611 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11612 Inputs.push_back(BinOp.getOperand(i)); 11613 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11614 BinOp.getOperand(i).getOpcode() == ISD::OR || 11615 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11616 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11617 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 11618 BinOps.push_back(BinOp.getOperand(i)); 11619 } else { 11620 // We have an input that is not a truncation or another binary 11621 // operation; we'll abort this transformation. 11622 return SDValue(); 11623 } 11624 } 11625 } 11626 11627 // The operands of a select that must be truncated when the select is 11628 // promoted because the operand is actually part of the to-be-promoted set. 11629 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 11630 11631 // Make sure that this is a self-contained cluster of operations (which 11632 // is not quite the same thing as saying that everything has only one 11633 // use). 11634 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11635 if (isa<ConstantSDNode>(Inputs[i])) 11636 continue; 11637 11638 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11639 UE = Inputs[i].getNode()->use_end(); 11640 UI != UE; ++UI) { 11641 SDNode *User = *UI; 11642 if (User != N && !Visited.count(User)) 11643 return SDValue(); 11644 11645 // If we're going to promote the non-output-value operand(s) or SELECT or 11646 // SELECT_CC, record them for truncation. 11647 if (User->getOpcode() == ISD::SELECT) { 11648 if (User->getOperand(0) == Inputs[i]) 11649 SelectTruncOp[0].insert(std::make_pair(User, 11650 User->getOperand(0).getValueType())); 11651 } else if (User->getOpcode() == ISD::SELECT_CC) { 11652 if (User->getOperand(0) == Inputs[i]) 11653 SelectTruncOp[0].insert(std::make_pair(User, 11654 User->getOperand(0).getValueType())); 11655 if (User->getOperand(1) == Inputs[i]) 11656 SelectTruncOp[1].insert(std::make_pair(User, 11657 User->getOperand(1).getValueType())); 11658 } 11659 } 11660 } 11661 11662 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11663 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11664 UE = PromOps[i].getNode()->use_end(); 11665 UI != UE; ++UI) { 11666 SDNode *User = *UI; 11667 if (User != N && !Visited.count(User)) 11668 return SDValue(); 11669 11670 // If we're going to promote the non-output-value operand(s) or SELECT or 11671 // SELECT_CC, record them for truncation. 11672 if (User->getOpcode() == ISD::SELECT) { 11673 if (User->getOperand(0) == PromOps[i]) 11674 SelectTruncOp[0].insert(std::make_pair(User, 11675 User->getOperand(0).getValueType())); 11676 } else if (User->getOpcode() == ISD::SELECT_CC) { 11677 if (User->getOperand(0) == PromOps[i]) 11678 SelectTruncOp[0].insert(std::make_pair(User, 11679 User->getOperand(0).getValueType())); 11680 if (User->getOperand(1) == PromOps[i]) 11681 SelectTruncOp[1].insert(std::make_pair(User, 11682 User->getOperand(1).getValueType())); 11683 } 11684 } 11685 } 11686 11687 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 11688 bool ReallyNeedsExt = false; 11689 if (N->getOpcode() != ISD::ANY_EXTEND) { 11690 // If all of the inputs are not already sign/zero extended, then 11691 // we'll still need to do that at the end. 11692 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11693 if (isa<ConstantSDNode>(Inputs[i])) 11694 continue; 11695 11696 unsigned OpBits = 11697 Inputs[i].getOperand(0).getValueSizeInBits(); 11698 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 11699 11700 if ((N->getOpcode() == ISD::ZERO_EXTEND && 11701 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 11702 APInt::getHighBitsSet(OpBits, 11703 OpBits-PromBits))) || 11704 (N->getOpcode() == ISD::SIGN_EXTEND && 11705 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 11706 (OpBits-(PromBits-1)))) { 11707 ReallyNeedsExt = true; 11708 break; 11709 } 11710 } 11711 } 11712 11713 // Replace all inputs, either with the truncation operand, or a 11714 // truncation or extension to the final output type. 11715 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11716 // Constant inputs need to be replaced with the to-be-promoted nodes that 11717 // use them because they might have users outside of the cluster of 11718 // promoted nodes. 11719 if (isa<ConstantSDNode>(Inputs[i])) 11720 continue; 11721 11722 SDValue InSrc = Inputs[i].getOperand(0); 11723 if (Inputs[i].getValueType() == N->getValueType(0)) 11724 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 11725 else if (N->getOpcode() == ISD::SIGN_EXTEND) 11726 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11727 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 11728 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11729 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11730 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 11731 else 11732 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11733 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 11734 } 11735 11736 std::list<HandleSDNode> PromOpHandles; 11737 for (auto &PromOp : PromOps) 11738 PromOpHandles.emplace_back(PromOp); 11739 11740 // Replace all operations (these are all the same, but have a different 11741 // (promoted) return type). DAG.getNode will validate that the types of 11742 // a binary operator match, so go through the list in reverse so that 11743 // we've likely promoted both operands first. 11744 while (!PromOpHandles.empty()) { 11745 SDValue PromOp = PromOpHandles.back().getValue(); 11746 PromOpHandles.pop_back(); 11747 11748 unsigned C; 11749 switch (PromOp.getOpcode()) { 11750 default: C = 0; break; 11751 case ISD::SELECT: C = 1; break; 11752 case ISD::SELECT_CC: C = 2; break; 11753 } 11754 11755 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11756 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 11757 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11758 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 11759 // The to-be-promoted operands of this node have not yet been 11760 // promoted (this should be rare because we're going through the 11761 // list backward, but if one of the operands has several users in 11762 // this cluster of to-be-promoted nodes, it is possible). 11763 PromOpHandles.emplace_front(PromOp); 11764 continue; 11765 } 11766 11767 // For SELECT and SELECT_CC nodes, we do a similar check for any 11768 // to-be-promoted comparison inputs. 11769 if (PromOp.getOpcode() == ISD::SELECT || 11770 PromOp.getOpcode() == ISD::SELECT_CC) { 11771 if ((SelectTruncOp[0].count(PromOp.getNode()) && 11772 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 11773 (SelectTruncOp[1].count(PromOp.getNode()) && 11774 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 11775 PromOpHandles.emplace_front(PromOp); 11776 continue; 11777 } 11778 } 11779 11780 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11781 PromOp.getNode()->op_end()); 11782 11783 // If this node has constant inputs, then they'll need to be promoted here. 11784 for (unsigned i = 0; i < 2; ++i) { 11785 if (!isa<ConstantSDNode>(Ops[C+i])) 11786 continue; 11787 if (Ops[C+i].getValueType() == N->getValueType(0)) 11788 continue; 11789 11790 if (N->getOpcode() == ISD::SIGN_EXTEND) 11791 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11792 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11793 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11794 else 11795 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11796 } 11797 11798 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 11799 // truncate them again to the original value type. 11800 if (PromOp.getOpcode() == ISD::SELECT || 11801 PromOp.getOpcode() == ISD::SELECT_CC) { 11802 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 11803 if (SI0 != SelectTruncOp[0].end()) 11804 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 11805 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 11806 if (SI1 != SelectTruncOp[1].end()) 11807 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 11808 } 11809 11810 DAG.ReplaceAllUsesOfValueWith(PromOp, 11811 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 11812 } 11813 11814 // Now we're left with the initial extension itself. 11815 if (!ReallyNeedsExt) 11816 return N->getOperand(0); 11817 11818 // To zero extend, just mask off everything except for the first bit (in the 11819 // i1 case). 11820 if (N->getOpcode() == ISD::ZERO_EXTEND) 11821 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 11822 DAG.getConstant(APInt::getLowBitsSet( 11823 N->getValueSizeInBits(0), PromBits), 11824 dl, N->getValueType(0))); 11825 11826 assert(N->getOpcode() == ISD::SIGN_EXTEND && 11827 "Invalid extension type"); 11828 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 11829 SDValue ShiftCst = 11830 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 11831 return DAG.getNode( 11832 ISD::SRA, dl, N->getValueType(0), 11833 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 11834 ShiftCst); 11835 } 11836 11837 SDValue PPCTargetLowering::combineSetCC(SDNode *N, 11838 DAGCombinerInfo &DCI) const { 11839 assert(N->getOpcode() == ISD::SETCC && 11840 "Should be called with a SETCC node"); 11841 11842 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 11843 if (CC == ISD::SETNE || CC == ISD::SETEQ) { 11844 SDValue LHS = N->getOperand(0); 11845 SDValue RHS = N->getOperand(1); 11846 11847 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS. 11848 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && 11849 LHS.hasOneUse()) 11850 std::swap(LHS, RHS); 11851 11852 // x == 0-y --> x+y == 0 11853 // x != 0-y --> x+y != 0 11854 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && 11855 RHS.hasOneUse()) { 11856 SDLoc DL(N); 11857 SelectionDAG &DAG = DCI.DAG; 11858 EVT VT = N->getValueType(0); 11859 EVT OpVT = LHS.getValueType(); 11860 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1)); 11861 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); 11862 } 11863 } 11864 11865 return DAGCombineTruncBoolExt(N, DCI); 11866 } 11867 11868 // Is this an extending load from an f32 to an f64? 11869 static bool isFPExtLoad(SDValue Op) { 11870 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 11871 return LD->getExtensionType() == ISD::EXTLOAD && 11872 Op.getValueType() == MVT::f64; 11873 return false; 11874 } 11875 11876 /// Reduces the number of fp-to-int conversion when building a vector. 11877 /// 11878 /// If this vector is built out of floating to integer conversions, 11879 /// transform it to a vector built out of floating point values followed by a 11880 /// single floating to integer conversion of the vector. 11881 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 11882 /// becomes (fptosi (build_vector ($A, $B, ...))) 11883 SDValue PPCTargetLowering:: 11884 combineElementTruncationToVectorTruncation(SDNode *N, 11885 DAGCombinerInfo &DCI) const { 11886 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11887 "Should be called with a BUILD_VECTOR node"); 11888 11889 SelectionDAG &DAG = DCI.DAG; 11890 SDLoc dl(N); 11891 11892 SDValue FirstInput = N->getOperand(0); 11893 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 11894 "The input operand must be an fp-to-int conversion."); 11895 11896 // This combine happens after legalization so the fp_to_[su]i nodes are 11897 // already converted to PPCSISD nodes. 11898 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 11899 if (FirstConversion == PPCISD::FCTIDZ || 11900 FirstConversion == PPCISD::FCTIDUZ || 11901 FirstConversion == PPCISD::FCTIWZ || 11902 FirstConversion == PPCISD::FCTIWUZ) { 11903 bool IsSplat = true; 11904 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 11905 FirstConversion == PPCISD::FCTIWUZ; 11906 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 11907 SmallVector<SDValue, 4> Ops; 11908 EVT TargetVT = N->getValueType(0); 11909 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11910 SDValue NextOp = N->getOperand(i); 11911 if (NextOp.getOpcode() != PPCISD::MFVSR) 11912 return SDValue(); 11913 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 11914 if (NextConversion != FirstConversion) 11915 return SDValue(); 11916 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 11917 // This is not valid if the input was originally double precision. It is 11918 // also not profitable to do unless this is an extending load in which 11919 // case doing this combine will allow us to combine consecutive loads. 11920 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 11921 return SDValue(); 11922 if (N->getOperand(i) != FirstInput) 11923 IsSplat = false; 11924 } 11925 11926 // If this is a splat, we leave it as-is since there will be only a single 11927 // fp-to-int conversion followed by a splat of the integer. This is better 11928 // for 32-bit and smaller ints and neutral for 64-bit ints. 11929 if (IsSplat) 11930 return SDValue(); 11931 11932 // Now that we know we have the right type of node, get its operands 11933 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11934 SDValue In = N->getOperand(i).getOperand(0); 11935 if (Is32Bit) { 11936 // For 32-bit values, we need to add an FP_ROUND node (if we made it 11937 // here, we know that all inputs are extending loads so this is safe). 11938 if (In.isUndef()) 11939 Ops.push_back(DAG.getUNDEF(SrcVT)); 11940 else { 11941 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 11942 MVT::f32, In.getOperand(0), 11943 DAG.getIntPtrConstant(1, dl)); 11944 Ops.push_back(Trunc); 11945 } 11946 } else 11947 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 11948 } 11949 11950 unsigned Opcode; 11951 if (FirstConversion == PPCISD::FCTIDZ || 11952 FirstConversion == PPCISD::FCTIWZ) 11953 Opcode = ISD::FP_TO_SINT; 11954 else 11955 Opcode = ISD::FP_TO_UINT; 11956 11957 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 11958 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 11959 return DAG.getNode(Opcode, dl, TargetVT, BV); 11960 } 11961 return SDValue(); 11962 } 11963 11964 /// Reduce the number of loads when building a vector. 11965 /// 11966 /// Building a vector out of multiple loads can be converted to a load 11967 /// of the vector type if the loads are consecutive. If the loads are 11968 /// consecutive but in descending order, a shuffle is added at the end 11969 /// to reorder the vector. 11970 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 11971 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11972 "Should be called with a BUILD_VECTOR node"); 11973 11974 SDLoc dl(N); 11975 bool InputsAreConsecutiveLoads = true; 11976 bool InputsAreReverseConsecutive = true; 11977 unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8; 11978 SDValue FirstInput = N->getOperand(0); 11979 bool IsRoundOfExtLoad = false; 11980 11981 if (FirstInput.getOpcode() == ISD::FP_ROUND && 11982 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 11983 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 11984 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 11985 } 11986 // Not a build vector of (possibly fp_rounded) loads. 11987 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) || 11988 N->getNumOperands() == 1) 11989 return SDValue(); 11990 11991 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 11992 // If any inputs are fp_round(extload), they all must be. 11993 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 11994 return SDValue(); 11995 11996 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 11997 N->getOperand(i); 11998 if (NextInput.getOpcode() != ISD::LOAD) 11999 return SDValue(); 12000 12001 SDValue PreviousInput = 12002 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 12003 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 12004 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 12005 12006 // If any inputs are fp_round(extload), they all must be. 12007 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 12008 return SDValue(); 12009 12010 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 12011 InputsAreConsecutiveLoads = false; 12012 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 12013 InputsAreReverseConsecutive = false; 12014 12015 // Exit early if the loads are neither consecutive nor reverse consecutive. 12016 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 12017 return SDValue(); 12018 } 12019 12020 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 12021 "The loads cannot be both consecutive and reverse consecutive."); 12022 12023 SDValue FirstLoadOp = 12024 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 12025 SDValue LastLoadOp = 12026 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 12027 N->getOperand(N->getNumOperands()-1); 12028 12029 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 12030 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 12031 if (InputsAreConsecutiveLoads) { 12032 assert(LD1 && "Input needs to be a LoadSDNode."); 12033 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 12034 LD1->getBasePtr(), LD1->getPointerInfo(), 12035 LD1->getAlignment()); 12036 } 12037 if (InputsAreReverseConsecutive) { 12038 assert(LDL && "Input needs to be a LoadSDNode."); 12039 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 12040 LDL->getBasePtr(), LDL->getPointerInfo(), 12041 LDL->getAlignment()); 12042 SmallVector<int, 16> Ops; 12043 for (int i = N->getNumOperands() - 1; i >= 0; i--) 12044 Ops.push_back(i); 12045 12046 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 12047 DAG.getUNDEF(N->getValueType(0)), Ops); 12048 } 12049 return SDValue(); 12050 } 12051 12052 // This function adds the required vector_shuffle needed to get 12053 // the elements of the vector extract in the correct position 12054 // as specified by the CorrectElems encoding. 12055 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 12056 SDValue Input, uint64_t Elems, 12057 uint64_t CorrectElems) { 12058 SDLoc dl(N); 12059 12060 unsigned NumElems = Input.getValueType().getVectorNumElements(); 12061 SmallVector<int, 16> ShuffleMask(NumElems, -1); 12062 12063 // Knowing the element indices being extracted from the original 12064 // vector and the order in which they're being inserted, just put 12065 // them at element indices required for the instruction. 12066 for (unsigned i = 0; i < N->getNumOperands(); i++) { 12067 if (DAG.getDataLayout().isLittleEndian()) 12068 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 12069 else 12070 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 12071 CorrectElems = CorrectElems >> 8; 12072 Elems = Elems >> 8; 12073 } 12074 12075 SDValue Shuffle = 12076 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 12077 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 12078 12079 EVT Ty = N->getValueType(0); 12080 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 12081 return BV; 12082 } 12083 12084 // Look for build vector patterns where input operands come from sign 12085 // extended vector_extract elements of specific indices. If the correct indices 12086 // aren't used, add a vector shuffle to fix up the indices and create a new 12087 // PPCISD:SExtVElems node which selects the vector sign extend instructions 12088 // during instruction selection. 12089 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 12090 // This array encodes the indices that the vector sign extend instructions 12091 // extract from when extending from one type to another for both BE and LE. 12092 // The right nibble of each byte corresponds to the LE incides. 12093 // and the left nibble of each byte corresponds to the BE incides. 12094 // For example: 0x3074B8FC byte->word 12095 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 12096 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 12097 // For example: 0x000070F8 byte->double word 12098 // For LE: the allowed indices are: 0x0,0x8 12099 // For BE: the allowed indices are: 0x7,0xF 12100 uint64_t TargetElems[] = { 12101 0x3074B8FC, // b->w 12102 0x000070F8, // b->d 12103 0x10325476, // h->w 12104 0x00003074, // h->d 12105 0x00001032, // w->d 12106 }; 12107 12108 uint64_t Elems = 0; 12109 int Index; 12110 SDValue Input; 12111 12112 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 12113 if (!Op) 12114 return false; 12115 if (Op.getOpcode() != ISD::SIGN_EXTEND && 12116 Op.getOpcode() != ISD::SIGN_EXTEND_INREG) 12117 return false; 12118 12119 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value 12120 // of the right width. 12121 SDValue Extract = Op.getOperand(0); 12122 if (Extract.getOpcode() == ISD::ANY_EXTEND) 12123 Extract = Extract.getOperand(0); 12124 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12125 return false; 12126 12127 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 12128 if (!ExtOp) 12129 return false; 12130 12131 Index = ExtOp->getZExtValue(); 12132 if (Input && Input != Extract.getOperand(0)) 12133 return false; 12134 12135 if (!Input) 12136 Input = Extract.getOperand(0); 12137 12138 Elems = Elems << 8; 12139 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 12140 Elems |= Index; 12141 12142 return true; 12143 }; 12144 12145 // If the build vector operands aren't sign extended vector extracts, 12146 // of the same input vector, then return. 12147 for (unsigned i = 0; i < N->getNumOperands(); i++) { 12148 if (!isSExtOfVecExtract(N->getOperand(i))) { 12149 return SDValue(); 12150 } 12151 } 12152 12153 // If the vector extract indicies are not correct, add the appropriate 12154 // vector_shuffle. 12155 int TgtElemArrayIdx; 12156 int InputSize = Input.getValueType().getScalarSizeInBits(); 12157 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 12158 if (InputSize + OutputSize == 40) 12159 TgtElemArrayIdx = 0; 12160 else if (InputSize + OutputSize == 72) 12161 TgtElemArrayIdx = 1; 12162 else if (InputSize + OutputSize == 48) 12163 TgtElemArrayIdx = 2; 12164 else if (InputSize + OutputSize == 80) 12165 TgtElemArrayIdx = 3; 12166 else if (InputSize + OutputSize == 96) 12167 TgtElemArrayIdx = 4; 12168 else 12169 return SDValue(); 12170 12171 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 12172 CorrectElems = DAG.getDataLayout().isLittleEndian() 12173 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 12174 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 12175 if (Elems != CorrectElems) { 12176 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 12177 } 12178 12179 // Regular lowering will catch cases where a shuffle is not needed. 12180 return SDValue(); 12181 } 12182 12183 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 12184 DAGCombinerInfo &DCI) const { 12185 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12186 "Should be called with a BUILD_VECTOR node"); 12187 12188 SelectionDAG &DAG = DCI.DAG; 12189 SDLoc dl(N); 12190 12191 if (!Subtarget.hasVSX()) 12192 return SDValue(); 12193 12194 // The target independent DAG combiner will leave a build_vector of 12195 // float-to-int conversions intact. We can generate MUCH better code for 12196 // a float-to-int conversion of a vector of floats. 12197 SDValue FirstInput = N->getOperand(0); 12198 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 12199 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 12200 if (Reduced) 12201 return Reduced; 12202 } 12203 12204 // If we're building a vector out of consecutive loads, just load that 12205 // vector type. 12206 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 12207 if (Reduced) 12208 return Reduced; 12209 12210 // If we're building a vector out of extended elements from another vector 12211 // we have P9 vector integer extend instructions. The code assumes legal 12212 // input types (i.e. it can't handle things like v4i16) so do not run before 12213 // legalization. 12214 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) { 12215 Reduced = combineBVOfVecSExt(N, DAG); 12216 if (Reduced) 12217 return Reduced; 12218 } 12219 12220 12221 if (N->getValueType(0) != MVT::v2f64) 12222 return SDValue(); 12223 12224 // Looking for: 12225 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 12226 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 12227 FirstInput.getOpcode() != ISD::UINT_TO_FP) 12228 return SDValue(); 12229 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 12230 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 12231 return SDValue(); 12232 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 12233 return SDValue(); 12234 12235 SDValue Ext1 = FirstInput.getOperand(0); 12236 SDValue Ext2 = N->getOperand(1).getOperand(0); 12237 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 12238 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12239 return SDValue(); 12240 12241 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 12242 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 12243 if (!Ext1Op || !Ext2Op) 12244 return SDValue(); 12245 if (Ext1.getValueType() != MVT::i32 || 12246 Ext2.getValueType() != MVT::i32) 12247 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 12248 return SDValue(); 12249 12250 int FirstElem = Ext1Op->getZExtValue(); 12251 int SecondElem = Ext2Op->getZExtValue(); 12252 int SubvecIdx; 12253 if (FirstElem == 0 && SecondElem == 1) 12254 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 12255 else if (FirstElem == 2 && SecondElem == 3) 12256 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 12257 else 12258 return SDValue(); 12259 12260 SDValue SrcVec = Ext1.getOperand(0); 12261 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 12262 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 12263 return DAG.getNode(NodeType, dl, MVT::v2f64, 12264 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 12265 } 12266 12267 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 12268 DAGCombinerInfo &DCI) const { 12269 assert((N->getOpcode() == ISD::SINT_TO_FP || 12270 N->getOpcode() == ISD::UINT_TO_FP) && 12271 "Need an int -> FP conversion node here"); 12272 12273 if (useSoftFloat() || !Subtarget.has64BitSupport()) 12274 return SDValue(); 12275 12276 SelectionDAG &DAG = DCI.DAG; 12277 SDLoc dl(N); 12278 SDValue Op(N, 0); 12279 12280 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 12281 // from the hardware. 12282 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 12283 return SDValue(); 12284 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 12285 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 12286 return SDValue(); 12287 12288 SDValue FirstOperand(Op.getOperand(0)); 12289 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 12290 (FirstOperand.getValueType() == MVT::i8 || 12291 FirstOperand.getValueType() == MVT::i16); 12292 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 12293 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 12294 bool DstDouble = Op.getValueType() == MVT::f64; 12295 unsigned ConvOp = Signed ? 12296 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 12297 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 12298 SDValue WidthConst = 12299 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 12300 dl, false); 12301 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 12302 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 12303 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 12304 DAG.getVTList(MVT::f64, MVT::Other), 12305 Ops, MVT::i8, LDN->getMemOperand()); 12306 12307 // For signed conversion, we need to sign-extend the value in the VSR 12308 if (Signed) { 12309 SDValue ExtOps[] = { Ld, WidthConst }; 12310 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 12311 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 12312 } else 12313 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 12314 } 12315 12316 12317 // For i32 intermediate values, unfortunately, the conversion functions 12318 // leave the upper 32 bits of the value are undefined. Within the set of 12319 // scalar instructions, we have no method for zero- or sign-extending the 12320 // value. Thus, we cannot handle i32 intermediate values here. 12321 if (Op.getOperand(0).getValueType() == MVT::i32) 12322 return SDValue(); 12323 12324 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 12325 "UINT_TO_FP is supported only with FPCVT"); 12326 12327 // If we have FCFIDS, then use it when converting to single-precision. 12328 // Otherwise, convert to double-precision and then round. 12329 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12330 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 12331 : PPCISD::FCFIDS) 12332 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 12333 : PPCISD::FCFID); 12334 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12335 ? MVT::f32 12336 : MVT::f64; 12337 12338 // If we're converting from a float, to an int, and back to a float again, 12339 // then we don't need the store/load pair at all. 12340 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 12341 Subtarget.hasFPCVT()) || 12342 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 12343 SDValue Src = Op.getOperand(0).getOperand(0); 12344 if (Src.getValueType() == MVT::f32) { 12345 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 12346 DCI.AddToWorklist(Src.getNode()); 12347 } else if (Src.getValueType() != MVT::f64) { 12348 // Make sure that we don't pick up a ppc_fp128 source value. 12349 return SDValue(); 12350 } 12351 12352 unsigned FCTOp = 12353 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 12354 PPCISD::FCTIDUZ; 12355 12356 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 12357 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 12358 12359 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 12360 FP = DAG.getNode(ISD::FP_ROUND, dl, 12361 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 12362 DCI.AddToWorklist(FP.getNode()); 12363 } 12364 12365 return FP; 12366 } 12367 12368 return SDValue(); 12369 } 12370 12371 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 12372 // builtins) into loads with swaps. 12373 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 12374 DAGCombinerInfo &DCI) const { 12375 SelectionDAG &DAG = DCI.DAG; 12376 SDLoc dl(N); 12377 SDValue Chain; 12378 SDValue Base; 12379 MachineMemOperand *MMO; 12380 12381 switch (N->getOpcode()) { 12382 default: 12383 llvm_unreachable("Unexpected opcode for little endian VSX load"); 12384 case ISD::LOAD: { 12385 LoadSDNode *LD = cast<LoadSDNode>(N); 12386 Chain = LD->getChain(); 12387 Base = LD->getBasePtr(); 12388 MMO = LD->getMemOperand(); 12389 // If the MMO suggests this isn't a load of a full vector, leave 12390 // things alone. For a built-in, we have to make the change for 12391 // correctness, so if there is a size problem that will be a bug. 12392 if (MMO->getSize() < 16) 12393 return SDValue(); 12394 break; 12395 } 12396 case ISD::INTRINSIC_W_CHAIN: { 12397 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12398 Chain = Intrin->getChain(); 12399 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 12400 // us what we want. Get operand 2 instead. 12401 Base = Intrin->getOperand(2); 12402 MMO = Intrin->getMemOperand(); 12403 break; 12404 } 12405 } 12406 12407 MVT VecTy = N->getValueType(0).getSimpleVT(); 12408 12409 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 12410 // aligned and the type is a vector with elements up to 4 bytes 12411 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12412 && VecTy.getScalarSizeInBits() <= 32 ) { 12413 return SDValue(); 12414 } 12415 12416 SDValue LoadOps[] = { Chain, Base }; 12417 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 12418 DAG.getVTList(MVT::v2f64, MVT::Other), 12419 LoadOps, MVT::v2f64, MMO); 12420 12421 DCI.AddToWorklist(Load.getNode()); 12422 Chain = Load.getValue(1); 12423 SDValue Swap = DAG.getNode( 12424 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 12425 DCI.AddToWorklist(Swap.getNode()); 12426 12427 // Add a bitcast if the resulting load type doesn't match v2f64. 12428 if (VecTy != MVT::v2f64) { 12429 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 12430 DCI.AddToWorklist(N.getNode()); 12431 // Package {bitcast value, swap's chain} to match Load's shape. 12432 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 12433 N, Swap.getValue(1)); 12434 } 12435 12436 return Swap; 12437 } 12438 12439 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 12440 // builtins) into stores with swaps. 12441 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 12442 DAGCombinerInfo &DCI) const { 12443 SelectionDAG &DAG = DCI.DAG; 12444 SDLoc dl(N); 12445 SDValue Chain; 12446 SDValue Base; 12447 unsigned SrcOpnd; 12448 MachineMemOperand *MMO; 12449 12450 switch (N->getOpcode()) { 12451 default: 12452 llvm_unreachable("Unexpected opcode for little endian VSX store"); 12453 case ISD::STORE: { 12454 StoreSDNode *ST = cast<StoreSDNode>(N); 12455 Chain = ST->getChain(); 12456 Base = ST->getBasePtr(); 12457 MMO = ST->getMemOperand(); 12458 SrcOpnd = 1; 12459 // If the MMO suggests this isn't a store of a full vector, leave 12460 // things alone. For a built-in, we have to make the change for 12461 // correctness, so if there is a size problem that will be a bug. 12462 if (MMO->getSize() < 16) 12463 return SDValue(); 12464 break; 12465 } 12466 case ISD::INTRINSIC_VOID: { 12467 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12468 Chain = Intrin->getChain(); 12469 // Intrin->getBasePtr() oddly does not get what we want. 12470 Base = Intrin->getOperand(3); 12471 MMO = Intrin->getMemOperand(); 12472 SrcOpnd = 2; 12473 break; 12474 } 12475 } 12476 12477 SDValue Src = N->getOperand(SrcOpnd); 12478 MVT VecTy = Src.getValueType().getSimpleVT(); 12479 12480 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 12481 // aligned and the type is a vector with elements up to 4 bytes 12482 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12483 && VecTy.getScalarSizeInBits() <= 32 ) { 12484 return SDValue(); 12485 } 12486 12487 // All stores are done as v2f64 and possible bit cast. 12488 if (VecTy != MVT::v2f64) { 12489 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 12490 DCI.AddToWorklist(Src.getNode()); 12491 } 12492 12493 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 12494 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 12495 DCI.AddToWorklist(Swap.getNode()); 12496 Chain = Swap.getValue(1); 12497 SDValue StoreOps[] = { Chain, Swap, Base }; 12498 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 12499 DAG.getVTList(MVT::Other), 12500 StoreOps, VecTy, MMO); 12501 DCI.AddToWorklist(Store.getNode()); 12502 return Store; 12503 } 12504 12505 // Handle DAG combine for STORE (FP_TO_INT F). 12506 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 12507 DAGCombinerInfo &DCI) const { 12508 12509 SelectionDAG &DAG = DCI.DAG; 12510 SDLoc dl(N); 12511 unsigned Opcode = N->getOperand(1).getOpcode(); 12512 12513 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 12514 && "Not a FP_TO_INT Instruction!"); 12515 12516 SDValue Val = N->getOperand(1).getOperand(0); 12517 EVT Op1VT = N->getOperand(1).getValueType(); 12518 EVT ResVT = Val.getValueType(); 12519 12520 // Floating point types smaller than 32 bits are not legal on Power. 12521 if (ResVT.getScalarSizeInBits() < 32) 12522 return SDValue(); 12523 12524 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 12525 bool ValidTypeForStoreFltAsInt = 12526 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 12527 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 12528 12529 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() || 12530 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 12531 return SDValue(); 12532 12533 // Extend f32 values to f64 12534 if (ResVT.getScalarSizeInBits() == 32) { 12535 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 12536 DCI.AddToWorklist(Val.getNode()); 12537 } 12538 12539 // Set signed or unsigned conversion opcode. 12540 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 12541 PPCISD::FP_TO_SINT_IN_VSR : 12542 PPCISD::FP_TO_UINT_IN_VSR; 12543 12544 Val = DAG.getNode(ConvOpcode, 12545 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 12546 DCI.AddToWorklist(Val.getNode()); 12547 12548 // Set number of bytes being converted. 12549 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 12550 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 12551 DAG.getIntPtrConstant(ByteSize, dl, false), 12552 DAG.getValueType(Op1VT) }; 12553 12554 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 12555 DAG.getVTList(MVT::Other), Ops, 12556 cast<StoreSDNode>(N)->getMemoryVT(), 12557 cast<StoreSDNode>(N)->getMemOperand()); 12558 12559 DCI.AddToWorklist(Val.getNode()); 12560 return Val; 12561 } 12562 12563 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 12564 DAGCombinerInfo &DCI) const { 12565 SelectionDAG &DAG = DCI.DAG; 12566 SDLoc dl(N); 12567 switch (N->getOpcode()) { 12568 default: break; 12569 case ISD::ADD: 12570 return combineADD(N, DCI); 12571 case ISD::SHL: 12572 return combineSHL(N, DCI); 12573 case ISD::SRA: 12574 return combineSRA(N, DCI); 12575 case ISD::SRL: 12576 return combineSRL(N, DCI); 12577 case PPCISD::SHL: 12578 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 12579 return N->getOperand(0); 12580 break; 12581 case PPCISD::SRL: 12582 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 12583 return N->getOperand(0); 12584 break; 12585 case PPCISD::SRA: 12586 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 12587 if (C->isNullValue() || // 0 >>s V -> 0. 12588 C->isAllOnesValue()) // -1 >>s V -> -1. 12589 return N->getOperand(0); 12590 } 12591 break; 12592 case ISD::SIGN_EXTEND: 12593 case ISD::ZERO_EXTEND: 12594 case ISD::ANY_EXTEND: 12595 return DAGCombineExtBoolTrunc(N, DCI); 12596 case ISD::TRUNCATE: 12597 return combineTRUNCATE(N, DCI); 12598 case ISD::SETCC: 12599 if (SDValue CSCC = combineSetCC(N, DCI)) 12600 return CSCC; 12601 LLVM_FALLTHROUGH; 12602 case ISD::SELECT_CC: 12603 return DAGCombineTruncBoolExt(N, DCI); 12604 case ISD::SINT_TO_FP: 12605 case ISD::UINT_TO_FP: 12606 return combineFPToIntToFP(N, DCI); 12607 case ISD::STORE: { 12608 12609 EVT Op1VT = N->getOperand(1).getValueType(); 12610 unsigned Opcode = N->getOperand(1).getOpcode(); 12611 12612 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 12613 SDValue Val= combineStoreFPToInt(N, DCI); 12614 if (Val) 12615 return Val; 12616 } 12617 12618 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 12619 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 12620 N->getOperand(1).getNode()->hasOneUse() && 12621 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 12622 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 12623 12624 // STBRX can only handle simple types and it makes no sense to store less 12625 // two bytes in byte-reversed order. 12626 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 12627 if (mVT.isExtended() || mVT.getSizeInBits() < 16) 12628 break; 12629 12630 SDValue BSwapOp = N->getOperand(1).getOperand(0); 12631 // Do an any-extend to 32-bits if this is a half-word input. 12632 if (BSwapOp.getValueType() == MVT::i16) 12633 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 12634 12635 // If the type of BSWAP operand is wider than stored memory width 12636 // it need to be shifted to the right side before STBRX. 12637 if (Op1VT.bitsGT(mVT)) { 12638 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 12639 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 12640 DAG.getConstant(Shift, dl, MVT::i32)); 12641 // Need to truncate if this is a bswap of i64 stored as i32/i16. 12642 if (Op1VT == MVT::i64) 12643 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 12644 } 12645 12646 SDValue Ops[] = { 12647 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 12648 }; 12649 return 12650 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 12651 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 12652 cast<StoreSDNode>(N)->getMemOperand()); 12653 } 12654 12655 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 12656 // So it can increase the chance of CSE constant construction. 12657 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 12658 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 12659 // Need to sign-extended to 64-bits to handle negative values. 12660 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 12661 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 12662 MemVT.getSizeInBits()); 12663 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 12664 12665 // DAG.getTruncStore() can't be used here because it doesn't accept 12666 // the general (base + offset) addressing mode. 12667 // So we use UpdateNodeOperands and setTruncatingStore instead. 12668 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 12669 N->getOperand(3)); 12670 cast<StoreSDNode>(N)->setTruncatingStore(true); 12671 return SDValue(N, 0); 12672 } 12673 12674 // For little endian, VSX stores require generating xxswapd/lxvd2x. 12675 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12676 if (Op1VT.isSimple()) { 12677 MVT StoreVT = Op1VT.getSimpleVT(); 12678 if (Subtarget.needsSwapsForVSXMemOps() && 12679 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 12680 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 12681 return expandVSXStoreForLE(N, DCI); 12682 } 12683 break; 12684 } 12685 case ISD::LOAD: { 12686 LoadSDNode *LD = cast<LoadSDNode>(N); 12687 EVT VT = LD->getValueType(0); 12688 12689 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12690 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12691 if (VT.isSimple()) { 12692 MVT LoadVT = VT.getSimpleVT(); 12693 if (Subtarget.needsSwapsForVSXMemOps() && 12694 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 12695 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 12696 return expandVSXLoadForLE(N, DCI); 12697 } 12698 12699 // We sometimes end up with a 64-bit integer load, from which we extract 12700 // two single-precision floating-point numbers. This happens with 12701 // std::complex<float>, and other similar structures, because of the way we 12702 // canonicalize structure copies. However, if we lack direct moves, 12703 // then the final bitcasts from the extracted integer values to the 12704 // floating-point numbers turn into store/load pairs. Even with direct moves, 12705 // just loading the two floating-point numbers is likely better. 12706 auto ReplaceTwoFloatLoad = [&]() { 12707 if (VT != MVT::i64) 12708 return false; 12709 12710 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 12711 LD->isVolatile()) 12712 return false; 12713 12714 // We're looking for a sequence like this: 12715 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 12716 // t16: i64 = srl t13, Constant:i32<32> 12717 // t17: i32 = truncate t16 12718 // t18: f32 = bitcast t17 12719 // t19: i32 = truncate t13 12720 // t20: f32 = bitcast t19 12721 12722 if (!LD->hasNUsesOfValue(2, 0)) 12723 return false; 12724 12725 auto UI = LD->use_begin(); 12726 while (UI.getUse().getResNo() != 0) ++UI; 12727 SDNode *Trunc = *UI++; 12728 while (UI.getUse().getResNo() != 0) ++UI; 12729 SDNode *RightShift = *UI; 12730 if (Trunc->getOpcode() != ISD::TRUNCATE) 12731 std::swap(Trunc, RightShift); 12732 12733 if (Trunc->getOpcode() != ISD::TRUNCATE || 12734 Trunc->getValueType(0) != MVT::i32 || 12735 !Trunc->hasOneUse()) 12736 return false; 12737 if (RightShift->getOpcode() != ISD::SRL || 12738 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 12739 RightShift->getConstantOperandVal(1) != 32 || 12740 !RightShift->hasOneUse()) 12741 return false; 12742 12743 SDNode *Trunc2 = *RightShift->use_begin(); 12744 if (Trunc2->getOpcode() != ISD::TRUNCATE || 12745 Trunc2->getValueType(0) != MVT::i32 || 12746 !Trunc2->hasOneUse()) 12747 return false; 12748 12749 SDNode *Bitcast = *Trunc->use_begin(); 12750 SDNode *Bitcast2 = *Trunc2->use_begin(); 12751 12752 if (Bitcast->getOpcode() != ISD::BITCAST || 12753 Bitcast->getValueType(0) != MVT::f32) 12754 return false; 12755 if (Bitcast2->getOpcode() != ISD::BITCAST || 12756 Bitcast2->getValueType(0) != MVT::f32) 12757 return false; 12758 12759 if (Subtarget.isLittleEndian()) 12760 std::swap(Bitcast, Bitcast2); 12761 12762 // Bitcast has the second float (in memory-layout order) and Bitcast2 12763 // has the first one. 12764 12765 SDValue BasePtr = LD->getBasePtr(); 12766 if (LD->isIndexed()) { 12767 assert(LD->getAddressingMode() == ISD::PRE_INC && 12768 "Non-pre-inc AM on PPC?"); 12769 BasePtr = 12770 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 12771 LD->getOffset()); 12772 } 12773 12774 auto MMOFlags = 12775 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 12776 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 12777 LD->getPointerInfo(), LD->getAlignment(), 12778 MMOFlags, LD->getAAInfo()); 12779 SDValue AddPtr = 12780 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 12781 BasePtr, DAG.getIntPtrConstant(4, dl)); 12782 SDValue FloatLoad2 = DAG.getLoad( 12783 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 12784 LD->getPointerInfo().getWithOffset(4), 12785 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 12786 12787 if (LD->isIndexed()) { 12788 // Note that DAGCombine should re-form any pre-increment load(s) from 12789 // what is produced here if that makes sense. 12790 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 12791 } 12792 12793 DCI.CombineTo(Bitcast2, FloatLoad); 12794 DCI.CombineTo(Bitcast, FloatLoad2); 12795 12796 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 12797 SDValue(FloatLoad2.getNode(), 1)); 12798 return true; 12799 }; 12800 12801 if (ReplaceTwoFloatLoad()) 12802 return SDValue(N, 0); 12803 12804 EVT MemVT = LD->getMemoryVT(); 12805 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 12806 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 12807 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 12808 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 12809 if (LD->isUnindexed() && VT.isVector() && 12810 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 12811 // P8 and later hardware should just use LOAD. 12812 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 12813 VT == MVT::v4i32 || VT == MVT::v4f32)) || 12814 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 12815 LD->getAlignment() >= ScalarABIAlignment)) && 12816 LD->getAlignment() < ABIAlignment) { 12817 // This is a type-legal unaligned Altivec or QPX load. 12818 SDValue Chain = LD->getChain(); 12819 SDValue Ptr = LD->getBasePtr(); 12820 bool isLittleEndian = Subtarget.isLittleEndian(); 12821 12822 // This implements the loading of unaligned vectors as described in 12823 // the venerable Apple Velocity Engine overview. Specifically: 12824 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 12825 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 12826 // 12827 // The general idea is to expand a sequence of one or more unaligned 12828 // loads into an alignment-based permutation-control instruction (lvsl 12829 // or lvsr), a series of regular vector loads (which always truncate 12830 // their input address to an aligned address), and a series of 12831 // permutations. The results of these permutations are the requested 12832 // loaded values. The trick is that the last "extra" load is not taken 12833 // from the address you might suspect (sizeof(vector) bytes after the 12834 // last requested load), but rather sizeof(vector) - 1 bytes after the 12835 // last requested vector. The point of this is to avoid a page fault if 12836 // the base address happened to be aligned. This works because if the 12837 // base address is aligned, then adding less than a full vector length 12838 // will cause the last vector in the sequence to be (re)loaded. 12839 // Otherwise, the next vector will be fetched as you might suspect was 12840 // necessary. 12841 12842 // We might be able to reuse the permutation generation from 12843 // a different base address offset from this one by an aligned amount. 12844 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 12845 // optimization later. 12846 Intrinsic::ID Intr, IntrLD, IntrPerm; 12847 MVT PermCntlTy, PermTy, LDTy; 12848 if (Subtarget.hasAltivec()) { 12849 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 12850 Intrinsic::ppc_altivec_lvsl; 12851 IntrLD = Intrinsic::ppc_altivec_lvx; 12852 IntrPerm = Intrinsic::ppc_altivec_vperm; 12853 PermCntlTy = MVT::v16i8; 12854 PermTy = MVT::v4i32; 12855 LDTy = MVT::v4i32; 12856 } else { 12857 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 12858 Intrinsic::ppc_qpx_qvlpcls; 12859 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 12860 Intrinsic::ppc_qpx_qvlfs; 12861 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 12862 PermCntlTy = MVT::v4f64; 12863 PermTy = MVT::v4f64; 12864 LDTy = MemVT.getSimpleVT(); 12865 } 12866 12867 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 12868 12869 // Create the new MMO for the new base load. It is like the original MMO, 12870 // but represents an area in memory almost twice the vector size centered 12871 // on the original address. If the address is unaligned, we might start 12872 // reading up to (sizeof(vector)-1) bytes below the address of the 12873 // original unaligned load. 12874 MachineFunction &MF = DAG.getMachineFunction(); 12875 MachineMemOperand *BaseMMO = 12876 MF.getMachineMemOperand(LD->getMemOperand(), 12877 -(long)MemVT.getStoreSize()+1, 12878 2*MemVT.getStoreSize()-1); 12879 12880 // Create the new base load. 12881 SDValue LDXIntID = 12882 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 12883 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 12884 SDValue BaseLoad = 12885 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12886 DAG.getVTList(PermTy, MVT::Other), 12887 BaseLoadOps, LDTy, BaseMMO); 12888 12889 // Note that the value of IncOffset (which is provided to the next 12890 // load's pointer info offset value, and thus used to calculate the 12891 // alignment), and the value of IncValue (which is actually used to 12892 // increment the pointer value) are different! This is because we 12893 // require the next load to appear to be aligned, even though it 12894 // is actually offset from the base pointer by a lesser amount. 12895 int IncOffset = VT.getSizeInBits() / 8; 12896 int IncValue = IncOffset; 12897 12898 // Walk (both up and down) the chain looking for another load at the real 12899 // (aligned) offset (the alignment of the other load does not matter in 12900 // this case). If found, then do not use the offset reduction trick, as 12901 // that will prevent the loads from being later combined (as they would 12902 // otherwise be duplicates). 12903 if (!findConsecutiveLoad(LD, DAG)) 12904 --IncValue; 12905 12906 SDValue Increment = 12907 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 12908 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 12909 12910 MachineMemOperand *ExtraMMO = 12911 MF.getMachineMemOperand(LD->getMemOperand(), 12912 1, 2*MemVT.getStoreSize()-1); 12913 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 12914 SDValue ExtraLoad = 12915 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12916 DAG.getVTList(PermTy, MVT::Other), 12917 ExtraLoadOps, LDTy, ExtraMMO); 12918 12919 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 12920 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 12921 12922 // Because vperm has a big-endian bias, we must reverse the order 12923 // of the input vectors and complement the permute control vector 12924 // when generating little endian code. We have already handled the 12925 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 12926 // and ExtraLoad here. 12927 SDValue Perm; 12928 if (isLittleEndian) 12929 Perm = BuildIntrinsicOp(IntrPerm, 12930 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 12931 else 12932 Perm = BuildIntrinsicOp(IntrPerm, 12933 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 12934 12935 if (VT != PermTy) 12936 Perm = Subtarget.hasAltivec() ? 12937 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 12938 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 12939 DAG.getTargetConstant(1, dl, MVT::i64)); 12940 // second argument is 1 because this rounding 12941 // is always exact. 12942 12943 // The output of the permutation is our loaded result, the TokenFactor is 12944 // our new chain. 12945 DCI.CombineTo(N, Perm, TF); 12946 return SDValue(N, 0); 12947 } 12948 } 12949 break; 12950 case ISD::INTRINSIC_WO_CHAIN: { 12951 bool isLittleEndian = Subtarget.isLittleEndian(); 12952 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 12953 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 12954 : Intrinsic::ppc_altivec_lvsl); 12955 if ((IID == Intr || 12956 IID == Intrinsic::ppc_qpx_qvlpcld || 12957 IID == Intrinsic::ppc_qpx_qvlpcls) && 12958 N->getOperand(1)->getOpcode() == ISD::ADD) { 12959 SDValue Add = N->getOperand(1); 12960 12961 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 12962 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 12963 12964 if (DAG.MaskedValueIsZero(Add->getOperand(1), 12965 APInt::getAllOnesValue(Bits /* alignment */) 12966 .zext(Add.getScalarValueSizeInBits()))) { 12967 SDNode *BasePtr = Add->getOperand(0).getNode(); 12968 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12969 UE = BasePtr->use_end(); 12970 UI != UE; ++UI) { 12971 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12972 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 12973 // We've found another LVSL/LVSR, and this address is an aligned 12974 // multiple of that one. The results will be the same, so use the 12975 // one we've just found instead. 12976 12977 return SDValue(*UI, 0); 12978 } 12979 } 12980 } 12981 12982 if (isa<ConstantSDNode>(Add->getOperand(1))) { 12983 SDNode *BasePtr = Add->getOperand(0).getNode(); 12984 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12985 UE = BasePtr->use_end(); UI != UE; ++UI) { 12986 if (UI->getOpcode() == ISD::ADD && 12987 isa<ConstantSDNode>(UI->getOperand(1)) && 12988 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 12989 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 12990 (1ULL << Bits) == 0) { 12991 SDNode *OtherAdd = *UI; 12992 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 12993 VE = OtherAdd->use_end(); VI != VE; ++VI) { 12994 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12995 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 12996 return SDValue(*VI, 0); 12997 } 12998 } 12999 } 13000 } 13001 } 13002 } 13003 13004 // Combine vmaxsw/h/b(a, a's negation) to abs(a) 13005 // Expose the vabsduw/h/b opportunity for down stream 13006 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() && 13007 (IID == Intrinsic::ppc_altivec_vmaxsw || 13008 IID == Intrinsic::ppc_altivec_vmaxsh || 13009 IID == Intrinsic::ppc_altivec_vmaxsb)) { 13010 SDValue V1 = N->getOperand(1); 13011 SDValue V2 = N->getOperand(2); 13012 if ((V1.getSimpleValueType() == MVT::v4i32 || 13013 V1.getSimpleValueType() == MVT::v8i16 || 13014 V1.getSimpleValueType() == MVT::v16i8) && 13015 V1.getSimpleValueType() == V2.getSimpleValueType()) { 13016 // (0-a, a) 13017 if (V1.getOpcode() == ISD::SUB && 13018 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 13019 V1.getOperand(1) == V2) { 13020 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2); 13021 } 13022 // (a, 0-a) 13023 if (V2.getOpcode() == ISD::SUB && 13024 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 13025 V2.getOperand(1) == V1) { 13026 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 13027 } 13028 // (x-y, y-x) 13029 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB && 13030 V1.getOperand(0) == V2.getOperand(1) && 13031 V1.getOperand(1) == V2.getOperand(0)) { 13032 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 13033 } 13034 } 13035 } 13036 } 13037 13038 break; 13039 case ISD::INTRINSIC_W_CHAIN: 13040 // For little endian, VSX loads require generating lxvd2x/xxswapd. 13041 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 13042 if (Subtarget.needsSwapsForVSXMemOps()) { 13043 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 13044 default: 13045 break; 13046 case Intrinsic::ppc_vsx_lxvw4x: 13047 case Intrinsic::ppc_vsx_lxvd2x: 13048 return expandVSXLoadForLE(N, DCI); 13049 } 13050 } 13051 break; 13052 case ISD::INTRINSIC_VOID: 13053 // For little endian, VSX stores require generating xxswapd/stxvd2x. 13054 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 13055 if (Subtarget.needsSwapsForVSXMemOps()) { 13056 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 13057 default: 13058 break; 13059 case Intrinsic::ppc_vsx_stxvw4x: 13060 case Intrinsic::ppc_vsx_stxvd2x: 13061 return expandVSXStoreForLE(N, DCI); 13062 } 13063 } 13064 break; 13065 case ISD::BSWAP: 13066 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 13067 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 13068 N->getOperand(0).hasOneUse() && 13069 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 13070 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 13071 N->getValueType(0) == MVT::i64))) { 13072 SDValue Load = N->getOperand(0); 13073 LoadSDNode *LD = cast<LoadSDNode>(Load); 13074 // Create the byte-swapping load. 13075 SDValue Ops[] = { 13076 LD->getChain(), // Chain 13077 LD->getBasePtr(), // Ptr 13078 DAG.getValueType(N->getValueType(0)) // VT 13079 }; 13080 SDValue BSLoad = 13081 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 13082 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 13083 MVT::i64 : MVT::i32, MVT::Other), 13084 Ops, LD->getMemoryVT(), LD->getMemOperand()); 13085 13086 // If this is an i16 load, insert the truncate. 13087 SDValue ResVal = BSLoad; 13088 if (N->getValueType(0) == MVT::i16) 13089 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 13090 13091 // First, combine the bswap away. This makes the value produced by the 13092 // load dead. 13093 DCI.CombineTo(N, ResVal); 13094 13095 // Next, combine the load away, we give it a bogus result value but a real 13096 // chain result. The result value is dead because the bswap is dead. 13097 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 13098 13099 // Return N so it doesn't get rechecked! 13100 return SDValue(N, 0); 13101 } 13102 break; 13103 case PPCISD::VCMP: 13104 // If a VCMPo node already exists with exactly the same operands as this 13105 // node, use its result instead of this node (VCMPo computes both a CR6 and 13106 // a normal output). 13107 // 13108 if (!N->getOperand(0).hasOneUse() && 13109 !N->getOperand(1).hasOneUse() && 13110 !N->getOperand(2).hasOneUse()) { 13111 13112 // Scan all of the users of the LHS, looking for VCMPo's that match. 13113 SDNode *VCMPoNode = nullptr; 13114 13115 SDNode *LHSN = N->getOperand(0).getNode(); 13116 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 13117 UI != E; ++UI) 13118 if (UI->getOpcode() == PPCISD::VCMPo && 13119 UI->getOperand(1) == N->getOperand(1) && 13120 UI->getOperand(2) == N->getOperand(2) && 13121 UI->getOperand(0) == N->getOperand(0)) { 13122 VCMPoNode = *UI; 13123 break; 13124 } 13125 13126 // If there is no VCMPo node, or if the flag value has a single use, don't 13127 // transform this. 13128 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 13129 break; 13130 13131 // Look at the (necessarily single) use of the flag value. If it has a 13132 // chain, this transformation is more complex. Note that multiple things 13133 // could use the value result, which we should ignore. 13134 SDNode *FlagUser = nullptr; 13135 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 13136 FlagUser == nullptr; ++UI) { 13137 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 13138 SDNode *User = *UI; 13139 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 13140 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 13141 FlagUser = User; 13142 break; 13143 } 13144 } 13145 } 13146 13147 // If the user is a MFOCRF instruction, we know this is safe. 13148 // Otherwise we give up for right now. 13149 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 13150 return SDValue(VCMPoNode, 0); 13151 } 13152 break; 13153 case ISD::BRCOND: { 13154 SDValue Cond = N->getOperand(1); 13155 SDValue Target = N->getOperand(2); 13156 13157 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 13158 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 13159 Intrinsic::ppc_is_decremented_ctr_nonzero) { 13160 13161 // We now need to make the intrinsic dead (it cannot be instruction 13162 // selected). 13163 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 13164 assert(Cond.getNode()->hasOneUse() && 13165 "Counter decrement has more than one use"); 13166 13167 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 13168 N->getOperand(0), Target); 13169 } 13170 } 13171 break; 13172 case ISD::BR_CC: { 13173 // If this is a branch on an altivec predicate comparison, lower this so 13174 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 13175 // lowering is done pre-legalize, because the legalizer lowers the predicate 13176 // compare down to code that is difficult to reassemble. 13177 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 13178 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 13179 13180 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 13181 // value. If so, pass-through the AND to get to the intrinsic. 13182 if (LHS.getOpcode() == ISD::AND && 13183 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 13184 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 13185 Intrinsic::ppc_is_decremented_ctr_nonzero && 13186 isa<ConstantSDNode>(LHS.getOperand(1)) && 13187 !isNullConstant(LHS.getOperand(1))) 13188 LHS = LHS.getOperand(0); 13189 13190 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 13191 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 13192 Intrinsic::ppc_is_decremented_ctr_nonzero && 13193 isa<ConstantSDNode>(RHS)) { 13194 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 13195 "Counter decrement comparison is not EQ or NE"); 13196 13197 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 13198 bool isBDNZ = (CC == ISD::SETEQ && Val) || 13199 (CC == ISD::SETNE && !Val); 13200 13201 // We now need to make the intrinsic dead (it cannot be instruction 13202 // selected). 13203 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 13204 assert(LHS.getNode()->hasOneUse() && 13205 "Counter decrement has more than one use"); 13206 13207 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 13208 N->getOperand(0), N->getOperand(4)); 13209 } 13210 13211 int CompareOpc; 13212 bool isDot; 13213 13214 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 13215 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 13216 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 13217 assert(isDot && "Can't compare against a vector result!"); 13218 13219 // If this is a comparison against something other than 0/1, then we know 13220 // that the condition is never/always true. 13221 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 13222 if (Val != 0 && Val != 1) { 13223 if (CC == ISD::SETEQ) // Cond never true, remove branch. 13224 return N->getOperand(0); 13225 // Always !=, turn it into an unconditional branch. 13226 return DAG.getNode(ISD::BR, dl, MVT::Other, 13227 N->getOperand(0), N->getOperand(4)); 13228 } 13229 13230 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 13231 13232 // Create the PPCISD altivec 'dot' comparison node. 13233 SDValue Ops[] = { 13234 LHS.getOperand(2), // LHS of compare 13235 LHS.getOperand(3), // RHS of compare 13236 DAG.getConstant(CompareOpc, dl, MVT::i32) 13237 }; 13238 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 13239 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 13240 13241 // Unpack the result based on how the target uses it. 13242 PPC::Predicate CompOpc; 13243 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 13244 default: // Can't happen, don't crash on invalid number though. 13245 case 0: // Branch on the value of the EQ bit of CR6. 13246 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 13247 break; 13248 case 1: // Branch on the inverted value of the EQ bit of CR6. 13249 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 13250 break; 13251 case 2: // Branch on the value of the LT bit of CR6. 13252 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 13253 break; 13254 case 3: // Branch on the inverted value of the LT bit of CR6. 13255 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 13256 break; 13257 } 13258 13259 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 13260 DAG.getConstant(CompOpc, dl, MVT::i32), 13261 DAG.getRegister(PPC::CR6, MVT::i32), 13262 N->getOperand(4), CompNode.getValue(1)); 13263 } 13264 break; 13265 } 13266 case ISD::BUILD_VECTOR: 13267 return DAGCombineBuildVector(N, DCI); 13268 case ISD::ABS: 13269 return combineABS(N, DCI); 13270 } 13271 13272 return SDValue(); 13273 } 13274 13275 SDValue 13276 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 13277 SelectionDAG &DAG, 13278 SmallVectorImpl<SDNode *> &Created) const { 13279 // fold (sdiv X, pow2) 13280 EVT VT = N->getValueType(0); 13281 if (VT == MVT::i64 && !Subtarget.isPPC64()) 13282 return SDValue(); 13283 if ((VT != MVT::i32 && VT != MVT::i64) || 13284 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 13285 return SDValue(); 13286 13287 SDLoc DL(N); 13288 SDValue N0 = N->getOperand(0); 13289 13290 bool IsNegPow2 = (-Divisor).isPowerOf2(); 13291 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 13292 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 13293 13294 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 13295 Created.push_back(Op.getNode()); 13296 13297 if (IsNegPow2) { 13298 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 13299 Created.push_back(Op.getNode()); 13300 } 13301 13302 return Op; 13303 } 13304 13305 //===----------------------------------------------------------------------===// 13306 // Inline Assembly Support 13307 //===----------------------------------------------------------------------===// 13308 13309 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 13310 KnownBits &Known, 13311 const APInt &DemandedElts, 13312 const SelectionDAG &DAG, 13313 unsigned Depth) const { 13314 Known.resetAll(); 13315 switch (Op.getOpcode()) { 13316 default: break; 13317 case PPCISD::LBRX: { 13318 // lhbrx is known to have the top bits cleared out. 13319 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 13320 Known.Zero = 0xFFFF0000; 13321 break; 13322 } 13323 case ISD::INTRINSIC_WO_CHAIN: { 13324 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 13325 default: break; 13326 case Intrinsic::ppc_altivec_vcmpbfp_p: 13327 case Intrinsic::ppc_altivec_vcmpeqfp_p: 13328 case Intrinsic::ppc_altivec_vcmpequb_p: 13329 case Intrinsic::ppc_altivec_vcmpequh_p: 13330 case Intrinsic::ppc_altivec_vcmpequw_p: 13331 case Intrinsic::ppc_altivec_vcmpequd_p: 13332 case Intrinsic::ppc_altivec_vcmpgefp_p: 13333 case Intrinsic::ppc_altivec_vcmpgtfp_p: 13334 case Intrinsic::ppc_altivec_vcmpgtsb_p: 13335 case Intrinsic::ppc_altivec_vcmpgtsh_p: 13336 case Intrinsic::ppc_altivec_vcmpgtsw_p: 13337 case Intrinsic::ppc_altivec_vcmpgtsd_p: 13338 case Intrinsic::ppc_altivec_vcmpgtub_p: 13339 case Intrinsic::ppc_altivec_vcmpgtuh_p: 13340 case Intrinsic::ppc_altivec_vcmpgtuw_p: 13341 case Intrinsic::ppc_altivec_vcmpgtud_p: 13342 Known.Zero = ~1U; // All bits but the low one are known to be zero. 13343 break; 13344 } 13345 } 13346 } 13347 } 13348 13349 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 13350 switch (Subtarget.getDarwinDirective()) { 13351 default: break; 13352 case PPC::DIR_970: 13353 case PPC::DIR_PWR4: 13354 case PPC::DIR_PWR5: 13355 case PPC::DIR_PWR5X: 13356 case PPC::DIR_PWR6: 13357 case PPC::DIR_PWR6X: 13358 case PPC::DIR_PWR7: 13359 case PPC::DIR_PWR8: 13360 case PPC::DIR_PWR9: { 13361 if (!ML) 13362 break; 13363 13364 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 13365 13366 // For small loops (between 5 and 8 instructions), align to a 32-byte 13367 // boundary so that the entire loop fits in one instruction-cache line. 13368 uint64_t LoopSize = 0; 13369 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 13370 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 13371 LoopSize += TII->getInstSizeInBytes(*J); 13372 if (LoopSize > 32) 13373 break; 13374 } 13375 13376 if (LoopSize > 16 && LoopSize <= 32) 13377 return 5; 13378 13379 break; 13380 } 13381 } 13382 13383 return TargetLowering::getPrefLoopAlignment(ML); 13384 } 13385 13386 /// getConstraintType - Given a constraint, return the type of 13387 /// constraint it is for this target. 13388 PPCTargetLowering::ConstraintType 13389 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 13390 if (Constraint.size() == 1) { 13391 switch (Constraint[0]) { 13392 default: break; 13393 case 'b': 13394 case 'r': 13395 case 'f': 13396 case 'd': 13397 case 'v': 13398 case 'y': 13399 return C_RegisterClass; 13400 case 'Z': 13401 // FIXME: While Z does indicate a memory constraint, it specifically 13402 // indicates an r+r address (used in conjunction with the 'y' modifier 13403 // in the replacement string). Currently, we're forcing the base 13404 // register to be r0 in the asm printer (which is interpreted as zero) 13405 // and forming the complete address in the second register. This is 13406 // suboptimal. 13407 return C_Memory; 13408 } 13409 } else if (Constraint == "wc") { // individual CR bits. 13410 return C_RegisterClass; 13411 } else if (Constraint == "wa" || Constraint == "wd" || 13412 Constraint == "wf" || Constraint == "ws" || 13413 Constraint == "wi") { 13414 return C_RegisterClass; // VSX registers. 13415 } 13416 return TargetLowering::getConstraintType(Constraint); 13417 } 13418 13419 /// Examine constraint type and operand type and determine a weight value. 13420 /// This object must already have been set up with the operand type 13421 /// and the current alternative constraint selected. 13422 TargetLowering::ConstraintWeight 13423 PPCTargetLowering::getSingleConstraintMatchWeight( 13424 AsmOperandInfo &info, const char *constraint) const { 13425 ConstraintWeight weight = CW_Invalid; 13426 Value *CallOperandVal = info.CallOperandVal; 13427 // If we don't have a value, we can't do a match, 13428 // but allow it at the lowest weight. 13429 if (!CallOperandVal) 13430 return CW_Default; 13431 Type *type = CallOperandVal->getType(); 13432 13433 // Look at the constraint type. 13434 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 13435 return CW_Register; // an individual CR bit. 13436 else if ((StringRef(constraint) == "wa" || 13437 StringRef(constraint) == "wd" || 13438 StringRef(constraint) == "wf") && 13439 type->isVectorTy()) 13440 return CW_Register; 13441 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 13442 return CW_Register; 13443 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64)) 13444 return CW_Register; // just hold 64-bit integers data. 13445 13446 switch (*constraint) { 13447 default: 13448 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 13449 break; 13450 case 'b': 13451 if (type->isIntegerTy()) 13452 weight = CW_Register; 13453 break; 13454 case 'f': 13455 if (type->isFloatTy()) 13456 weight = CW_Register; 13457 break; 13458 case 'd': 13459 if (type->isDoubleTy()) 13460 weight = CW_Register; 13461 break; 13462 case 'v': 13463 if (type->isVectorTy()) 13464 weight = CW_Register; 13465 break; 13466 case 'y': 13467 weight = CW_Register; 13468 break; 13469 case 'Z': 13470 weight = CW_Memory; 13471 break; 13472 } 13473 return weight; 13474 } 13475 13476 std::pair<unsigned, const TargetRegisterClass *> 13477 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 13478 StringRef Constraint, 13479 MVT VT) const { 13480 if (Constraint.size() == 1) { 13481 // GCC RS6000 Constraint Letters 13482 switch (Constraint[0]) { 13483 case 'b': // R1-R31 13484 if (VT == MVT::i64 && Subtarget.isPPC64()) 13485 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 13486 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 13487 case 'r': // R0-R31 13488 if (VT == MVT::i64 && Subtarget.isPPC64()) 13489 return std::make_pair(0U, &PPC::G8RCRegClass); 13490 return std::make_pair(0U, &PPC::GPRCRegClass); 13491 // 'd' and 'f' constraints are both defined to be "the floating point 13492 // registers", where one is for 32-bit and the other for 64-bit. We don't 13493 // really care overly much here so just give them all the same reg classes. 13494 case 'd': 13495 case 'f': 13496 if (Subtarget.hasSPE()) { 13497 if (VT == MVT::f32 || VT == MVT::i32) 13498 return std::make_pair(0U, &PPC::SPE4RCRegClass); 13499 if (VT == MVT::f64 || VT == MVT::i64) 13500 return std::make_pair(0U, &PPC::SPERCRegClass); 13501 } else { 13502 if (VT == MVT::f32 || VT == MVT::i32) 13503 return std::make_pair(0U, &PPC::F4RCRegClass); 13504 if (VT == MVT::f64 || VT == MVT::i64) 13505 return std::make_pair(0U, &PPC::F8RCRegClass); 13506 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13507 return std::make_pair(0U, &PPC::QFRCRegClass); 13508 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13509 return std::make_pair(0U, &PPC::QSRCRegClass); 13510 } 13511 break; 13512 case 'v': 13513 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13514 return std::make_pair(0U, &PPC::QFRCRegClass); 13515 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13516 return std::make_pair(0U, &PPC::QSRCRegClass); 13517 if (Subtarget.hasAltivec()) 13518 return std::make_pair(0U, &PPC::VRRCRegClass); 13519 break; 13520 case 'y': // crrc 13521 return std::make_pair(0U, &PPC::CRRCRegClass); 13522 } 13523 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 13524 // An individual CR bit. 13525 return std::make_pair(0U, &PPC::CRBITRCRegClass); 13526 } else if ((Constraint == "wa" || Constraint == "wd" || 13527 Constraint == "wf" || Constraint == "wi") && 13528 Subtarget.hasVSX()) { 13529 return std::make_pair(0U, &PPC::VSRCRegClass); 13530 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 13531 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 13532 return std::make_pair(0U, &PPC::VSSRCRegClass); 13533 else 13534 return std::make_pair(0U, &PPC::VSFRCRegClass); 13535 } 13536 13537 std::pair<unsigned, const TargetRegisterClass *> R = 13538 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 13539 13540 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 13541 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 13542 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 13543 // register. 13544 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 13545 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 13546 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 13547 PPC::GPRCRegClass.contains(R.first)) 13548 return std::make_pair(TRI->getMatchingSuperReg(R.first, 13549 PPC::sub_32, &PPC::G8RCRegClass), 13550 &PPC::G8RCRegClass); 13551 13552 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 13553 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 13554 R.first = PPC::CR0; 13555 R.second = &PPC::CRRCRegClass; 13556 } 13557 13558 return R; 13559 } 13560 13561 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 13562 /// vector. If it is invalid, don't add anything to Ops. 13563 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 13564 std::string &Constraint, 13565 std::vector<SDValue>&Ops, 13566 SelectionDAG &DAG) const { 13567 SDValue Result; 13568 13569 // Only support length 1 constraints. 13570 if (Constraint.length() > 1) return; 13571 13572 char Letter = Constraint[0]; 13573 switch (Letter) { 13574 default: break; 13575 case 'I': 13576 case 'J': 13577 case 'K': 13578 case 'L': 13579 case 'M': 13580 case 'N': 13581 case 'O': 13582 case 'P': { 13583 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 13584 if (!CST) return; // Must be an immediate to match. 13585 SDLoc dl(Op); 13586 int64_t Value = CST->getSExtValue(); 13587 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 13588 // numbers are printed as such. 13589 switch (Letter) { 13590 default: llvm_unreachable("Unknown constraint letter!"); 13591 case 'I': // "I" is a signed 16-bit constant. 13592 if (isInt<16>(Value)) 13593 Result = DAG.getTargetConstant(Value, dl, TCVT); 13594 break; 13595 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 13596 if (isShiftedUInt<16, 16>(Value)) 13597 Result = DAG.getTargetConstant(Value, dl, TCVT); 13598 break; 13599 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 13600 if (isShiftedInt<16, 16>(Value)) 13601 Result = DAG.getTargetConstant(Value, dl, TCVT); 13602 break; 13603 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 13604 if (isUInt<16>(Value)) 13605 Result = DAG.getTargetConstant(Value, dl, TCVT); 13606 break; 13607 case 'M': // "M" is a constant that is greater than 31. 13608 if (Value > 31) 13609 Result = DAG.getTargetConstant(Value, dl, TCVT); 13610 break; 13611 case 'N': // "N" is a positive constant that is an exact power of two. 13612 if (Value > 0 && isPowerOf2_64(Value)) 13613 Result = DAG.getTargetConstant(Value, dl, TCVT); 13614 break; 13615 case 'O': // "O" is the constant zero. 13616 if (Value == 0) 13617 Result = DAG.getTargetConstant(Value, dl, TCVT); 13618 break; 13619 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 13620 if (isInt<16>(-Value)) 13621 Result = DAG.getTargetConstant(Value, dl, TCVT); 13622 break; 13623 } 13624 break; 13625 } 13626 } 13627 13628 if (Result.getNode()) { 13629 Ops.push_back(Result); 13630 return; 13631 } 13632 13633 // Handle standard constraint letters. 13634 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 13635 } 13636 13637 // isLegalAddressingMode - Return true if the addressing mode represented 13638 // by AM is legal for this target, for a load/store of the specified type. 13639 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 13640 const AddrMode &AM, Type *Ty, 13641 unsigned AS, Instruction *I) const { 13642 // PPC does not allow r+i addressing modes for vectors! 13643 if (Ty->isVectorTy() && AM.BaseOffs != 0) 13644 return false; 13645 13646 // PPC allows a sign-extended 16-bit immediate field. 13647 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 13648 return false; 13649 13650 // No global is ever allowed as a base. 13651 if (AM.BaseGV) 13652 return false; 13653 13654 // PPC only support r+r, 13655 switch (AM.Scale) { 13656 case 0: // "r+i" or just "i", depending on HasBaseReg. 13657 break; 13658 case 1: 13659 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 13660 return false; 13661 // Otherwise we have r+r or r+i. 13662 break; 13663 case 2: 13664 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 13665 return false; 13666 // Allow 2*r as r+r. 13667 break; 13668 default: 13669 // No other scales are supported. 13670 return false; 13671 } 13672 13673 return true; 13674 } 13675 13676 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 13677 SelectionDAG &DAG) const { 13678 MachineFunction &MF = DAG.getMachineFunction(); 13679 MachineFrameInfo &MFI = MF.getFrameInfo(); 13680 MFI.setReturnAddressIsTaken(true); 13681 13682 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 13683 return SDValue(); 13684 13685 SDLoc dl(Op); 13686 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13687 13688 // Make sure the function does not optimize away the store of the RA to 13689 // the stack. 13690 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 13691 FuncInfo->setLRStoreRequired(); 13692 bool isPPC64 = Subtarget.isPPC64(); 13693 auto PtrVT = getPointerTy(MF.getDataLayout()); 13694 13695 if (Depth > 0) { 13696 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 13697 SDValue Offset = 13698 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 13699 isPPC64 ? MVT::i64 : MVT::i32); 13700 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 13701 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 13702 MachinePointerInfo()); 13703 } 13704 13705 // Just load the return address off the stack. 13706 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 13707 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 13708 MachinePointerInfo()); 13709 } 13710 13711 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 13712 SelectionDAG &DAG) const { 13713 SDLoc dl(Op); 13714 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13715 13716 MachineFunction &MF = DAG.getMachineFunction(); 13717 MachineFrameInfo &MFI = MF.getFrameInfo(); 13718 MFI.setFrameAddressIsTaken(true); 13719 13720 EVT PtrVT = getPointerTy(MF.getDataLayout()); 13721 bool isPPC64 = PtrVT == MVT::i64; 13722 13723 // Naked functions never have a frame pointer, and so we use r1. For all 13724 // other functions, this decision must be delayed until during PEI. 13725 unsigned FrameReg; 13726 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 13727 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 13728 else 13729 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 13730 13731 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 13732 PtrVT); 13733 while (Depth--) 13734 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 13735 FrameAddr, MachinePointerInfo()); 13736 return FrameAddr; 13737 } 13738 13739 // FIXME? Maybe this could be a TableGen attribute on some registers and 13740 // this table could be generated automatically from RegInfo. 13741 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 13742 SelectionDAG &DAG) const { 13743 bool isPPC64 = Subtarget.isPPC64(); 13744 bool isDarwinABI = Subtarget.isDarwinABI(); 13745 13746 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 13747 (!isPPC64 && VT != MVT::i32)) 13748 report_fatal_error("Invalid register global variable type"); 13749 13750 bool is64Bit = isPPC64 && VT == MVT::i64; 13751 unsigned Reg = StringSwitch<unsigned>(RegName) 13752 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 13753 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 13754 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 13755 (is64Bit ? PPC::X13 : PPC::R13)) 13756 .Default(0); 13757 13758 if (Reg) 13759 return Reg; 13760 report_fatal_error("Invalid register name global variable"); 13761 } 13762 13763 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const { 13764 // 32-bit SVR4 ABI access everything as got-indirect. 13765 if (Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 13766 return true; 13767 13768 CodeModel::Model CModel = getTargetMachine().getCodeModel(); 13769 // If it is small or large code model, module locals are accessed 13770 // indirectly by loading their address from .toc/.got. The difference 13771 // is that for large code model we have ADDISTocHa + LDtocL and for 13772 // small code model we simply have LDtoc. 13773 if (CModel == CodeModel::Small || CModel == CodeModel::Large) 13774 return true; 13775 13776 // JumpTable and BlockAddress are accessed as got-indirect. 13777 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA)) 13778 return true; 13779 13780 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) { 13781 const GlobalValue *GV = G->getGlobal(); 13782 unsigned char GVFlags = Subtarget.classifyGlobalReference(GV); 13783 // The NLP flag indicates that a global access has to use an 13784 // extra indirection. 13785 if (GVFlags & PPCII::MO_NLP_FLAG) 13786 return true; 13787 } 13788 13789 return false; 13790 } 13791 13792 bool 13793 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 13794 // The PowerPC target isn't yet aware of offsets. 13795 return false; 13796 } 13797 13798 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 13799 const CallInst &I, 13800 MachineFunction &MF, 13801 unsigned Intrinsic) const { 13802 switch (Intrinsic) { 13803 case Intrinsic::ppc_qpx_qvlfd: 13804 case Intrinsic::ppc_qpx_qvlfs: 13805 case Intrinsic::ppc_qpx_qvlfcd: 13806 case Intrinsic::ppc_qpx_qvlfcs: 13807 case Intrinsic::ppc_qpx_qvlfiwa: 13808 case Intrinsic::ppc_qpx_qvlfiwz: 13809 case Intrinsic::ppc_altivec_lvx: 13810 case Intrinsic::ppc_altivec_lvxl: 13811 case Intrinsic::ppc_altivec_lvebx: 13812 case Intrinsic::ppc_altivec_lvehx: 13813 case Intrinsic::ppc_altivec_lvewx: 13814 case Intrinsic::ppc_vsx_lxvd2x: 13815 case Intrinsic::ppc_vsx_lxvw4x: { 13816 EVT VT; 13817 switch (Intrinsic) { 13818 case Intrinsic::ppc_altivec_lvebx: 13819 VT = MVT::i8; 13820 break; 13821 case Intrinsic::ppc_altivec_lvehx: 13822 VT = MVT::i16; 13823 break; 13824 case Intrinsic::ppc_altivec_lvewx: 13825 VT = MVT::i32; 13826 break; 13827 case Intrinsic::ppc_vsx_lxvd2x: 13828 VT = MVT::v2f64; 13829 break; 13830 case Intrinsic::ppc_qpx_qvlfd: 13831 VT = MVT::v4f64; 13832 break; 13833 case Intrinsic::ppc_qpx_qvlfs: 13834 VT = MVT::v4f32; 13835 break; 13836 case Intrinsic::ppc_qpx_qvlfcd: 13837 VT = MVT::v2f64; 13838 break; 13839 case Intrinsic::ppc_qpx_qvlfcs: 13840 VT = MVT::v2f32; 13841 break; 13842 default: 13843 VT = MVT::v4i32; 13844 break; 13845 } 13846 13847 Info.opc = ISD::INTRINSIC_W_CHAIN; 13848 Info.memVT = VT; 13849 Info.ptrVal = I.getArgOperand(0); 13850 Info.offset = -VT.getStoreSize()+1; 13851 Info.size = 2*VT.getStoreSize()-1; 13852 Info.align = 1; 13853 Info.flags = MachineMemOperand::MOLoad; 13854 return true; 13855 } 13856 case Intrinsic::ppc_qpx_qvlfda: 13857 case Intrinsic::ppc_qpx_qvlfsa: 13858 case Intrinsic::ppc_qpx_qvlfcda: 13859 case Intrinsic::ppc_qpx_qvlfcsa: 13860 case Intrinsic::ppc_qpx_qvlfiwaa: 13861 case Intrinsic::ppc_qpx_qvlfiwza: { 13862 EVT VT; 13863 switch (Intrinsic) { 13864 case Intrinsic::ppc_qpx_qvlfda: 13865 VT = MVT::v4f64; 13866 break; 13867 case Intrinsic::ppc_qpx_qvlfsa: 13868 VT = MVT::v4f32; 13869 break; 13870 case Intrinsic::ppc_qpx_qvlfcda: 13871 VT = MVT::v2f64; 13872 break; 13873 case Intrinsic::ppc_qpx_qvlfcsa: 13874 VT = MVT::v2f32; 13875 break; 13876 default: 13877 VT = MVT::v4i32; 13878 break; 13879 } 13880 13881 Info.opc = ISD::INTRINSIC_W_CHAIN; 13882 Info.memVT = VT; 13883 Info.ptrVal = I.getArgOperand(0); 13884 Info.offset = 0; 13885 Info.size = VT.getStoreSize(); 13886 Info.align = 1; 13887 Info.flags = MachineMemOperand::MOLoad; 13888 return true; 13889 } 13890 case Intrinsic::ppc_qpx_qvstfd: 13891 case Intrinsic::ppc_qpx_qvstfs: 13892 case Intrinsic::ppc_qpx_qvstfcd: 13893 case Intrinsic::ppc_qpx_qvstfcs: 13894 case Intrinsic::ppc_qpx_qvstfiw: 13895 case Intrinsic::ppc_altivec_stvx: 13896 case Intrinsic::ppc_altivec_stvxl: 13897 case Intrinsic::ppc_altivec_stvebx: 13898 case Intrinsic::ppc_altivec_stvehx: 13899 case Intrinsic::ppc_altivec_stvewx: 13900 case Intrinsic::ppc_vsx_stxvd2x: 13901 case Intrinsic::ppc_vsx_stxvw4x: { 13902 EVT VT; 13903 switch (Intrinsic) { 13904 case Intrinsic::ppc_altivec_stvebx: 13905 VT = MVT::i8; 13906 break; 13907 case Intrinsic::ppc_altivec_stvehx: 13908 VT = MVT::i16; 13909 break; 13910 case Intrinsic::ppc_altivec_stvewx: 13911 VT = MVT::i32; 13912 break; 13913 case Intrinsic::ppc_vsx_stxvd2x: 13914 VT = MVT::v2f64; 13915 break; 13916 case Intrinsic::ppc_qpx_qvstfd: 13917 VT = MVT::v4f64; 13918 break; 13919 case Intrinsic::ppc_qpx_qvstfs: 13920 VT = MVT::v4f32; 13921 break; 13922 case Intrinsic::ppc_qpx_qvstfcd: 13923 VT = MVT::v2f64; 13924 break; 13925 case Intrinsic::ppc_qpx_qvstfcs: 13926 VT = MVT::v2f32; 13927 break; 13928 default: 13929 VT = MVT::v4i32; 13930 break; 13931 } 13932 13933 Info.opc = ISD::INTRINSIC_VOID; 13934 Info.memVT = VT; 13935 Info.ptrVal = I.getArgOperand(1); 13936 Info.offset = -VT.getStoreSize()+1; 13937 Info.size = 2*VT.getStoreSize()-1; 13938 Info.align = 1; 13939 Info.flags = MachineMemOperand::MOStore; 13940 return true; 13941 } 13942 case Intrinsic::ppc_qpx_qvstfda: 13943 case Intrinsic::ppc_qpx_qvstfsa: 13944 case Intrinsic::ppc_qpx_qvstfcda: 13945 case Intrinsic::ppc_qpx_qvstfcsa: 13946 case Intrinsic::ppc_qpx_qvstfiwa: { 13947 EVT VT; 13948 switch (Intrinsic) { 13949 case Intrinsic::ppc_qpx_qvstfda: 13950 VT = MVT::v4f64; 13951 break; 13952 case Intrinsic::ppc_qpx_qvstfsa: 13953 VT = MVT::v4f32; 13954 break; 13955 case Intrinsic::ppc_qpx_qvstfcda: 13956 VT = MVT::v2f64; 13957 break; 13958 case Intrinsic::ppc_qpx_qvstfcsa: 13959 VT = MVT::v2f32; 13960 break; 13961 default: 13962 VT = MVT::v4i32; 13963 break; 13964 } 13965 13966 Info.opc = ISD::INTRINSIC_VOID; 13967 Info.memVT = VT; 13968 Info.ptrVal = I.getArgOperand(1); 13969 Info.offset = 0; 13970 Info.size = VT.getStoreSize(); 13971 Info.align = 1; 13972 Info.flags = MachineMemOperand::MOStore; 13973 return true; 13974 } 13975 default: 13976 break; 13977 } 13978 13979 return false; 13980 } 13981 13982 /// getOptimalMemOpType - Returns the target specific optimal type for load 13983 /// and store operations as a result of memset, memcpy, and memmove 13984 /// lowering. If DstAlign is zero that means it's safe to destination 13985 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 13986 /// means there isn't a need to check it against alignment requirement, 13987 /// probably because the source does not need to be loaded. If 'IsMemset' is 13988 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 13989 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 13990 /// source is constant so it does not need to be loaded. 13991 /// It returns EVT::Other if the type should be determined using generic 13992 /// target-independent logic. 13993 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 13994 unsigned DstAlign, unsigned SrcAlign, 13995 bool IsMemset, bool ZeroMemset, 13996 bool MemcpyStrSrc, 13997 MachineFunction &MF) const { 13998 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 13999 const Function &F = MF.getFunction(); 14000 // When expanding a memset, require at least two QPX instructions to cover 14001 // the cost of loading the value to be stored from the constant pool. 14002 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 14003 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 14004 !F.hasFnAttribute(Attribute::NoImplicitFloat)) { 14005 return MVT::v4f64; 14006 } 14007 14008 // We should use Altivec/VSX loads and stores when available. For unaligned 14009 // addresses, unaligned VSX loads are only fast starting with the P8. 14010 if (Subtarget.hasAltivec() && Size >= 16 && 14011 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 14012 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 14013 return MVT::v4i32; 14014 } 14015 14016 if (Subtarget.isPPC64()) { 14017 return MVT::i64; 14018 } 14019 14020 return MVT::i32; 14021 } 14022 14023 /// Returns true if it is beneficial to convert a load of a constant 14024 /// to just the constant itself. 14025 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 14026 Type *Ty) const { 14027 assert(Ty->isIntegerTy()); 14028 14029 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 14030 return !(BitSize == 0 || BitSize > 64); 14031 } 14032 14033 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 14034 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 14035 return false; 14036 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 14037 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 14038 return NumBits1 == 64 && NumBits2 == 32; 14039 } 14040 14041 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 14042 if (!VT1.isInteger() || !VT2.isInteger()) 14043 return false; 14044 unsigned NumBits1 = VT1.getSizeInBits(); 14045 unsigned NumBits2 = VT2.getSizeInBits(); 14046 return NumBits1 == 64 && NumBits2 == 32; 14047 } 14048 14049 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 14050 // Generally speaking, zexts are not free, but they are free when they can be 14051 // folded with other operations. 14052 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 14053 EVT MemVT = LD->getMemoryVT(); 14054 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 14055 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 14056 (LD->getExtensionType() == ISD::NON_EXTLOAD || 14057 LD->getExtensionType() == ISD::ZEXTLOAD)) 14058 return true; 14059 } 14060 14061 // FIXME: Add other cases... 14062 // - 32-bit shifts with a zext to i64 14063 // - zext after ctlz, bswap, etc. 14064 // - zext after and by a constant mask 14065 14066 return TargetLowering::isZExtFree(Val, VT2); 14067 } 14068 14069 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 14070 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 14071 "invalid fpext types"); 14072 // Extending to float128 is not free. 14073 if (DestVT == MVT::f128) 14074 return false; 14075 return true; 14076 } 14077 14078 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 14079 return isInt<16>(Imm) || isUInt<16>(Imm); 14080 } 14081 14082 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 14083 return isInt<16>(Imm) || isUInt<16>(Imm); 14084 } 14085 14086 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 14087 unsigned, 14088 unsigned, 14089 bool *Fast) const { 14090 if (DisablePPCUnaligned) 14091 return false; 14092 14093 // PowerPC supports unaligned memory access for simple non-vector types. 14094 // Although accessing unaligned addresses is not as efficient as accessing 14095 // aligned addresses, it is generally more efficient than manual expansion, 14096 // and generally only traps for software emulation when crossing page 14097 // boundaries. 14098 14099 if (!VT.isSimple()) 14100 return false; 14101 14102 if (VT.getSimpleVT().isVector()) { 14103 if (Subtarget.hasVSX()) { 14104 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 14105 VT != MVT::v4f32 && VT != MVT::v4i32) 14106 return false; 14107 } else { 14108 return false; 14109 } 14110 } 14111 14112 if (VT == MVT::ppcf128) 14113 return false; 14114 14115 if (Fast) 14116 *Fast = true; 14117 14118 return true; 14119 } 14120 14121 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 14122 VT = VT.getScalarType(); 14123 14124 if (!VT.isSimple()) 14125 return false; 14126 14127 switch (VT.getSimpleVT().SimpleTy) { 14128 case MVT::f32: 14129 case MVT::f64: 14130 return true; 14131 case MVT::f128: 14132 return (EnableQuadPrecision && Subtarget.hasP9Vector()); 14133 default: 14134 break; 14135 } 14136 14137 return false; 14138 } 14139 14140 const MCPhysReg * 14141 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 14142 // LR is a callee-save register, but we must treat it as clobbered by any call 14143 // site. Hence we include LR in the scratch registers, which are in turn added 14144 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 14145 // to CTR, which is used by any indirect call. 14146 static const MCPhysReg ScratchRegs[] = { 14147 PPC::X12, PPC::LR8, PPC::CTR8, 0 14148 }; 14149 14150 return ScratchRegs; 14151 } 14152 14153 unsigned PPCTargetLowering::getExceptionPointerRegister( 14154 const Constant *PersonalityFn) const { 14155 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 14156 } 14157 14158 unsigned PPCTargetLowering::getExceptionSelectorRegister( 14159 const Constant *PersonalityFn) const { 14160 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 14161 } 14162 14163 bool 14164 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 14165 EVT VT , unsigned DefinedValues) const { 14166 if (VT == MVT::v2i64) 14167 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 14168 14169 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 14170 return true; 14171 14172 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 14173 } 14174 14175 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 14176 if (DisableILPPref || Subtarget.enableMachineScheduler()) 14177 return TargetLowering::getSchedulingPreference(N); 14178 14179 return Sched::ILP; 14180 } 14181 14182 // Create a fast isel object. 14183 FastISel * 14184 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 14185 const TargetLibraryInfo *LibInfo) const { 14186 return PPC::createFastISel(FuncInfo, LibInfo); 14187 } 14188 14189 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 14190 if (Subtarget.isDarwinABI()) return; 14191 if (!Subtarget.isPPC64()) return; 14192 14193 // Update IsSplitCSR in PPCFunctionInfo 14194 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 14195 PFI->setIsSplitCSR(true); 14196 } 14197 14198 void PPCTargetLowering::insertCopiesSplitCSR( 14199 MachineBasicBlock *Entry, 14200 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 14201 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 14202 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 14203 if (!IStart) 14204 return; 14205 14206 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 14207 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 14208 MachineBasicBlock::iterator MBBI = Entry->begin(); 14209 for (const MCPhysReg *I = IStart; *I; ++I) { 14210 const TargetRegisterClass *RC = nullptr; 14211 if (PPC::G8RCRegClass.contains(*I)) 14212 RC = &PPC::G8RCRegClass; 14213 else if (PPC::F8RCRegClass.contains(*I)) 14214 RC = &PPC::F8RCRegClass; 14215 else if (PPC::CRRCRegClass.contains(*I)) 14216 RC = &PPC::CRRCRegClass; 14217 else if (PPC::VRRCRegClass.contains(*I)) 14218 RC = &PPC::VRRCRegClass; 14219 else 14220 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 14221 14222 unsigned NewVR = MRI->createVirtualRegister(RC); 14223 // Create copy from CSR to a virtual register. 14224 // FIXME: this currently does not emit CFI pseudo-instructions, it works 14225 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 14226 // nounwind. If we want to generalize this later, we may need to emit 14227 // CFI pseudo-instructions. 14228 assert(Entry->getParent()->getFunction().hasFnAttribute( 14229 Attribute::NoUnwind) && 14230 "Function should be nounwind in insertCopiesSplitCSR!"); 14231 Entry->addLiveIn(*I); 14232 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 14233 .addReg(*I); 14234 14235 // Insert the copy-back instructions right before the terminator 14236 for (auto *Exit : Exits) 14237 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 14238 TII->get(TargetOpcode::COPY), *I) 14239 .addReg(NewVR); 14240 } 14241 } 14242 14243 // Override to enable LOAD_STACK_GUARD lowering on Linux. 14244 bool PPCTargetLowering::useLoadStackGuardNode() const { 14245 if (!Subtarget.isTargetLinux()) 14246 return TargetLowering::useLoadStackGuardNode(); 14247 return true; 14248 } 14249 14250 // Override to disable global variable loading on Linux. 14251 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 14252 if (!Subtarget.isTargetLinux()) 14253 return TargetLowering::insertSSPDeclarations(M); 14254 } 14255 14256 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 14257 if (!VT.isSimple() || !Subtarget.hasVSX()) 14258 return false; 14259 14260 switch(VT.getSimpleVT().SimpleTy) { 14261 default: 14262 // For FP types that are currently not supported by PPC backend, return 14263 // false. Examples: f16, f80. 14264 return false; 14265 case MVT::f32: 14266 case MVT::f64: 14267 case MVT::ppcf128: 14268 return Imm.isPosZero(); 14269 } 14270 } 14271 14272 // For vector shift operation op, fold 14273 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 14274 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 14275 SelectionDAG &DAG) { 14276 SDValue N0 = N->getOperand(0); 14277 SDValue N1 = N->getOperand(1); 14278 EVT VT = N0.getValueType(); 14279 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 14280 unsigned Opcode = N->getOpcode(); 14281 unsigned TargetOpcode; 14282 14283 switch (Opcode) { 14284 default: 14285 llvm_unreachable("Unexpected shift operation"); 14286 case ISD::SHL: 14287 TargetOpcode = PPCISD::SHL; 14288 break; 14289 case ISD::SRL: 14290 TargetOpcode = PPCISD::SRL; 14291 break; 14292 case ISD::SRA: 14293 TargetOpcode = PPCISD::SRA; 14294 break; 14295 } 14296 14297 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 14298 N1->getOpcode() == ISD::AND) 14299 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 14300 if (Mask->getZExtValue() == OpSizeInBits - 1) 14301 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 14302 14303 return SDValue(); 14304 } 14305 14306 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 14307 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 14308 return Value; 14309 14310 SDValue N0 = N->getOperand(0); 14311 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 14312 if (!Subtarget.isISA3_0() || 14313 N0.getOpcode() != ISD::SIGN_EXTEND || 14314 N0.getOperand(0).getValueType() != MVT::i32 || 14315 CN1 == nullptr || N->getValueType(0) != MVT::i64) 14316 return SDValue(); 14317 14318 // We can't save an operation here if the value is already extended, and 14319 // the existing shift is easier to combine. 14320 SDValue ExtsSrc = N0.getOperand(0); 14321 if (ExtsSrc.getOpcode() == ISD::TRUNCATE && 14322 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext) 14323 return SDValue(); 14324 14325 SDLoc DL(N0); 14326 SDValue ShiftBy = SDValue(CN1, 0); 14327 // We want the shift amount to be i32 on the extswli, but the shift could 14328 // have an i64. 14329 if (ShiftBy.getValueType() == MVT::i64) 14330 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32); 14331 14332 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0), 14333 ShiftBy); 14334 } 14335 14336 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 14337 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 14338 return Value; 14339 14340 return SDValue(); 14341 } 14342 14343 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 14344 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 14345 return Value; 14346 14347 return SDValue(); 14348 } 14349 14350 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1)) 14351 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0)) 14352 // When C is zero, the equation (addi Z, -C) can be simplified to Z 14353 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types 14354 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, 14355 const PPCSubtarget &Subtarget) { 14356 if (!Subtarget.isPPC64()) 14357 return SDValue(); 14358 14359 SDValue LHS = N->getOperand(0); 14360 SDValue RHS = N->getOperand(1); 14361 14362 auto isZextOfCompareWithConstant = [](SDValue Op) { 14363 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() || 14364 Op.getValueType() != MVT::i64) 14365 return false; 14366 14367 SDValue Cmp = Op.getOperand(0); 14368 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() || 14369 Cmp.getOperand(0).getValueType() != MVT::i64) 14370 return false; 14371 14372 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) { 14373 int64_t NegConstant = 0 - Constant->getSExtValue(); 14374 // Due to the limitations of the addi instruction, 14375 // -C is required to be [-32768, 32767]. 14376 return isInt<16>(NegConstant); 14377 } 14378 14379 return false; 14380 }; 14381 14382 bool LHSHasPattern = isZextOfCompareWithConstant(LHS); 14383 bool RHSHasPattern = isZextOfCompareWithConstant(RHS); 14384 14385 // If there is a pattern, canonicalize a zext operand to the RHS. 14386 if (LHSHasPattern && !RHSHasPattern) 14387 std::swap(LHS, RHS); 14388 else if (!LHSHasPattern && !RHSHasPattern) 14389 return SDValue(); 14390 14391 SDLoc DL(N); 14392 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i64); 14393 SDValue Cmp = RHS.getOperand(0); 14394 SDValue Z = Cmp.getOperand(0); 14395 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1)); 14396 14397 assert(Constant && "Constant Should not be a null pointer."); 14398 int64_t NegConstant = 0 - Constant->getSExtValue(); 14399 14400 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) { 14401 default: break; 14402 case ISD::SETNE: { 14403 // when C == 0 14404 // --> addze X, (addic Z, -1).carry 14405 // / 14406 // add X, (zext(setne Z, C))-- 14407 // \ when -32768 <= -C <= 32767 && C != 0 14408 // --> addze X, (addic (addi Z, -C), -1).carry 14409 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 14410 DAG.getConstant(NegConstant, DL, MVT::i64)); 14411 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 14412 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 14413 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64)); 14414 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 14415 SDValue(Addc.getNode(), 1)); 14416 } 14417 case ISD::SETEQ: { 14418 // when C == 0 14419 // --> addze X, (subfic Z, 0).carry 14420 // / 14421 // add X, (zext(sete Z, C))-- 14422 // \ when -32768 <= -C <= 32767 && C != 0 14423 // --> addze X, (subfic (addi Z, -C), 0).carry 14424 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 14425 DAG.getConstant(NegConstant, DL, MVT::i64)); 14426 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 14427 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 14428 DAG.getConstant(0, DL, MVT::i64), AddOrZ); 14429 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 14430 SDValue(Subc.getNode(), 1)); 14431 } 14432 } 14433 14434 return SDValue(); 14435 } 14436 14437 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const { 14438 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget)) 14439 return Value; 14440 14441 return SDValue(); 14442 } 14443 14444 // Detect TRUNCATE operations on bitcasts of float128 values. 14445 // What we are looking for here is the situtation where we extract a subset 14446 // of bits from a 128 bit float. 14447 // This can be of two forms: 14448 // 1) BITCAST of f128 feeding TRUNCATE 14449 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE 14450 // The reason this is required is because we do not have a legal i128 type 14451 // and so we want to prevent having to store the f128 and then reload part 14452 // of it. 14453 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N, 14454 DAGCombinerInfo &DCI) const { 14455 // If we are using CRBits then try that first. 14456 if (Subtarget.useCRBits()) { 14457 // Check if CRBits did anything and return that if it did. 14458 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI)) 14459 return CRTruncValue; 14460 } 14461 14462 SDLoc dl(N); 14463 SDValue Op0 = N->getOperand(0); 14464 14465 // Looking for a truncate of i128 to i64. 14466 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64) 14467 return SDValue(); 14468 14469 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0; 14470 14471 // SRL feeding TRUNCATE. 14472 if (Op0.getOpcode() == ISD::SRL) { 14473 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 14474 // The right shift has to be by 64 bits. 14475 if (!ConstNode || ConstNode->getZExtValue() != 64) 14476 return SDValue(); 14477 14478 // Switch the element number to extract. 14479 EltToExtract = EltToExtract ? 0 : 1; 14480 // Update Op0 past the SRL. 14481 Op0 = Op0.getOperand(0); 14482 } 14483 14484 // BITCAST feeding a TRUNCATE possibly via SRL. 14485 if (Op0.getOpcode() == ISD::BITCAST && 14486 Op0.getValueType() == MVT::i128 && 14487 Op0.getOperand(0).getValueType() == MVT::f128) { 14488 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0)); 14489 return DCI.DAG.getNode( 14490 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast, 14491 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32)); 14492 } 14493 return SDValue(); 14494 } 14495 14496 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 14497 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 14498 if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64()) 14499 return false; 14500 14501 // If not a tail call then no need to proceed. 14502 if (!CI->isTailCall()) 14503 return false; 14504 14505 // If tail calls are disabled for the caller then we are done. 14506 const Function *Caller = CI->getParent()->getParent(); 14507 auto Attr = Caller->getFnAttribute("disable-tail-calls"); 14508 if (Attr.getValueAsString() == "true") 14509 return false; 14510 14511 // If sibling calls have been disabled and tail-calls aren't guaranteed 14512 // there is no reason to duplicate. 14513 auto &TM = getTargetMachine(); 14514 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 14515 return false; 14516 14517 // Can't tail call a function called indirectly, or if it has variadic args. 14518 const Function *Callee = CI->getCalledFunction(); 14519 if (!Callee || Callee->isVarArg()) 14520 return false; 14521 14522 // Make sure the callee and caller calling conventions are eligible for tco. 14523 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 14524 CI->getCallingConv())) 14525 return false; 14526 14527 // If the function is local then we have a good chance at tail-calling it 14528 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 14529 } 14530 14531 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 14532 if (!Subtarget.hasVSX()) 14533 return false; 14534 if (Subtarget.hasP9Vector() && VT == MVT::f128) 14535 return true; 14536 return VT == MVT::f32 || VT == MVT::f64 || 14537 VT == MVT::v4f32 || VT == MVT::v2f64; 14538 } 14539 14540 bool PPCTargetLowering:: 14541 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 14542 const Value *Mask = AndI.getOperand(1); 14543 // If the mask is suitable for andi. or andis. we should sink the and. 14544 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 14545 // Can't handle constants wider than 64-bits. 14546 if (CI->getBitWidth() > 64) 14547 return false; 14548 int64_t ConstVal = CI->getZExtValue(); 14549 return isUInt<16>(ConstVal) || 14550 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 14551 } 14552 14553 // For non-constant masks, we can always use the record-form and. 14554 return true; 14555 } 14556 14557 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0) 14558 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0) 14559 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0) 14560 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0) 14561 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32 14562 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const { 14563 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here"); 14564 assert(Subtarget.hasP9Altivec() && 14565 "Only combine this when P9 altivec supported!"); 14566 EVT VT = N->getValueType(0); 14567 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 14568 return SDValue(); 14569 14570 SelectionDAG &DAG = DCI.DAG; 14571 SDLoc dl(N); 14572 if (N->getOperand(0).getOpcode() == ISD::SUB) { 14573 // Even for signed integers, if it's known to be positive (as signed 14574 // integer) due to zero-extended inputs. 14575 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode(); 14576 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode(); 14577 if ((SubOpcd0 == ISD::ZERO_EXTEND || 14578 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) && 14579 (SubOpcd1 == ISD::ZERO_EXTEND || 14580 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) { 14581 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 14582 N->getOperand(0)->getOperand(0), 14583 N->getOperand(0)->getOperand(1), 14584 DAG.getTargetConstant(0, dl, MVT::i32)); 14585 } 14586 14587 // For type v4i32, it can be optimized with xvnegsp + vabsduw 14588 if (N->getOperand(0).getValueType() == MVT::v4i32 && 14589 N->getOperand(0).hasOneUse()) { 14590 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 14591 N->getOperand(0)->getOperand(0), 14592 N->getOperand(0)->getOperand(1), 14593 DAG.getTargetConstant(1, dl, MVT::i32)); 14594 } 14595 } 14596 14597 return SDValue(); 14598 } 14599 14600